-
Notifications
You must be signed in to change notification settings - Fork 89
/
Copy pathsetup.py
49 lines (42 loc) · 1.34 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# -*- coding: utf-8 -*-
import ast
import os
import re
from pathlib import Path
from setuptools import find_packages, setup
with open('README.md') as f:
long_description = f.read()
def get_package_version():
with open(Path(os.path.dirname(os.path.abspath(__file__))) / 'fla' / '__init__.py') as f:
version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
return ast.literal_eval(version_match.group(1))
setup(
name='fla',
version=get_package_version(),
description='Fast Triton-based implementations of causal linear attention',
long_description=long_description,
long_description_content_type='text/markdown',
author='Songlin Yang, Yu Zhang',
url='https://github.com/fla-org/flash-linear-attention',
packages=find_packages(),
license='MIT',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
python_requires='>=3.7',
install_requires=[
'torch>=2.3',
'transformers>=4.45.0',
'triton>=3.0',
'datasets>=3.1.0',
'einops',
'ninja'
],
extras_require={
'conv1d': ['causal-conv1d>=1.4.0']
}
)