-
Notifications
You must be signed in to change notification settings - Fork 4
61 lines (52 loc) · 1.69 KB
/
llamacpp.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
name: Build llama.cpp in CANN container
on:
workflow_dispatch:
pull_request:
paths:
- '.github/workflows/llamacpp.yaml'
- 'requirements/**'
schedule:
- cron: "0 0 * * *"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
# It's used to activate ascend-toolkit environment variables.
defaults:
run:
shell: bash -el {0}
jobs:
openeuler-arm64-test:
if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }}
name: Build llama.cpp on OpenEuler for Arm64
runs-on: ubuntu-24.04-arm
strategy:
matrix:
cann:
- '8.0.rc3.beta1-910b-openeuler22.03-py3.10'
device:
- 'ascend910b3'
build:
- 'Release'
container: ascendai/cann:${{ matrix.cann }}
steps:
- name: Install dependencies
run: |
yum update -y
yum install -y git gcc gcc-c++ make cmake
- name: Checkout
uses: actions/checkout@v4
- name: Checkout llama.cpp
uses: actions/checkout@v4
with:
repository: ggerganov/llama.cpp
path: llama.cpp
- name: Build llama.cpp
working-directory: llama.cpp
run: |
mkdir build
cd build
export LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/$(uname -m)-linux/devlib/:${LD_LIBRARY_PATH}
cmake .. -DCMAKE_BUILD_TYPE=${{ matrix.build }} -DGGML_CANN=on -DSOC_TYPE=${{ matrix.device }}
cmake --build . -j $(nproc)