Skip to content

Commit

Permalink
test fd_array_cnt_v4
Browse files Browse the repository at this point in the history
Signed-off-by: Anton Protopopov <[email protected]>
  • Loading branch information
aspsk committed Dec 3, 2024
1 parent 525b4ba commit 7c23bdb
Show file tree
Hide file tree
Showing 12 changed files with 524 additions and 83 deletions.
17 changes: 17 additions & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -2301,6 +2301,14 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);

/*
* The __bpf_map_get() and __btf_get_by_fd() functions parse a file
* descriptor and return a corresponding map or btf object.
* Their names are double underscored to emphasize the fact that they
* do not increase refcnt. To also increase refcnt use corresponding
* bpf_map_get() and btf_get_by_fd() functions.
*/

static inline struct bpf_map *__bpf_map_get(struct fd f)
{
if (fd_empty(f))
Expand All @@ -2310,6 +2318,15 @@ static inline struct bpf_map *__bpf_map_get(struct fd f)
return fd_file(f)->private_data;
}

static inline struct btf *__btf_get_by_fd(struct fd f)
{
if (fd_empty(f))
return ERR_PTR(-EBADF);
if (unlikely(fd_file(f)->f_op != &btf_fops))
return ERR_PTR(-EINVAL);
return fd_file(f)->private_data;
}

void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
Expand Down
2 changes: 2 additions & 0 deletions include/linux/btf.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#ifndef _LINUX_BTF_H
#define _LINUX_BTF_H 1

#include <linux/file.h>
#include <linux/types.h>
#include <linux/bpfptr.h>
#include <linux/bsearch.h>
Expand Down Expand Up @@ -143,6 +144,7 @@ void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
const struct btf_header *btf_header(const struct btf *btf);
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);

struct btf *btf_get_by_fd(int fd);
int btf_get_info_by_fd(const struct btf *btf,
const union bpf_attr *attr,
Expand Down
10 changes: 10 additions & 0 deletions include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -1573,6 +1573,16 @@ union bpf_attr {
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 prog_token_fd;
/* The fd_array_cnt can be used to pass the length of the
* fd_array array. In this case all the [map] file descriptors
* passed in this array will be bound to the program, even if
* the maps are not referenced directly. The functionality is
* similar to the BPF_PROG_BIND_MAP syscall, but maps can be
* used by the verifier during the program load. If provided,
* then the fd_array[0,...,fd_array_cnt-1] is expected to be
* continuous.
*/
__u32 fd_array_cnt;
};

struct { /* anonymous struct used by BPF_OBJ_* commands */
Expand Down
13 changes: 4 additions & 9 deletions kernel/bpf/btf.c
Original file line number Diff line number Diff line change
Expand Up @@ -7743,17 +7743,12 @@ int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)

struct btf *btf_get_by_fd(int fd)
{
struct btf *btf;
CLASS(fd, f)(fd);
struct btf *btf;

if (fd_empty(f))
return ERR_PTR(-EBADF);

if (fd_file(f)->f_op != &btf_fops)
return ERR_PTR(-EINVAL);

btf = fd_file(f)->private_data;
refcount_inc(&btf->refcnt);
btf = __btf_get_by_fd(f);
if (!IS_ERR(btf))
refcount_inc(&btf->refcnt);

return btf;
}
Expand Down
6 changes: 5 additions & 1 deletion kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -539,14 +539,18 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,

int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
{
int err;

/* Branch offsets can't overflow when program is shrinking, no need
* to call bpf_adj_branches(..., true) here
*/
memmove(prog->insnsi + off, prog->insnsi + off + cnt,
sizeof(struct bpf_insn) * (prog->len - off - cnt));
prog->len -= cnt;

return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
err = bpf_adj_branches(prog, off, off + cnt, off, false);
WARN_ON_ONCE(err);
return err;
}

static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
Expand Down
2 changes: 1 addition & 1 deletion kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -2730,7 +2730,7 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
}

/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD prog_token_fd
#define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt

static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
{
Expand Down
193 changes: 126 additions & 67 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -19064,6 +19064,12 @@ static bool is_tracing_prog_type(enum bpf_prog_type type)
}
}

static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
{
return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
}

static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map,
struct bpf_prog *prog)
Expand Down Expand Up @@ -19142,46 +19148,58 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
return -EINVAL;
}

return 0;
}
if (bpf_map_is_cgroup_storage(map) &&
bpf_cgroup_storage_assign(env->prog->aux, map)) {
verbose(env, "only one cgroup storage of each type is allowed\n");
return -EBUSY;
}

static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
{
return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
if (map->map_type == BPF_MAP_TYPE_ARENA) {
if (env->prog->aux->arena) {
verbose(env, "Only one arena per program\n");
return -EBUSY;
}
if (!env->allow_ptr_leaks || !env->bpf_capable) {
verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
return -EPERM;
}
if (!env->prog->jit_requested) {
verbose(env, "JIT is required to use arena\n");
return -EOPNOTSUPP;
}
if (!bpf_jit_supports_arena()) {
verbose(env, "JIT doesn't support arena\n");
return -EOPNOTSUPP;
}
env->prog->aux->arena = (void *)map;
if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
verbose(env, "arena's user address must be set via map_extra or mmap()\n");
return -EINVAL;
}
}

return 0;
}

/* Add map behind fd to used maps list, if it's not already there, and return
* its index. Also set *reused to true if this map was already in the list of
* used maps.
* Returns <0 on error, or >= 0 index, on success.
*/
static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reused)
static int __add_used_map(struct bpf_verifier_env *env, struct bpf_map *map)
{
CLASS(fd, f)(fd);
struct bpf_map *map;
int i;

map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
return PTR_ERR(map);
}
int i, err;

/* check whether we recorded this map already */
for (i = 0; i < env->used_map_cnt; i++) {
if (env->used_maps[i] == map) {
*reused = true;
for (i = 0; i < env->used_map_cnt; i++)
if (env->used_maps[i] == map)
return i;
}
}

if (env->used_map_cnt >= MAX_USED_MAPS) {
verbose(env, "The total number of maps per program has reached the limit of %u\n",
MAX_USED_MAPS);
return -E2BIG;
}

err = check_map_prog_compatibility(env, map, env->prog);
if (err)
return err;

if (env->prog->sleepable)
atomic64_inc(&map->sleepable_refcnt);

Expand All @@ -19192,12 +19210,29 @@ static int add_used_map_from_fd(struct bpf_verifier_env *env, int fd, bool *reus
*/
bpf_map_inc(map);

*reused = false;
env->used_maps[env->used_map_cnt++] = map;

return env->used_map_cnt - 1;
}

/* Add map behind fd to used maps list, if it's not already there, and return
* its index.
* Returns <0 on error, or >= 0 index, on success.
*/
static int add_used_map(struct bpf_verifier_env *env, int fd)
{
struct bpf_map *map;
CLASS(fd, f)(fd);

map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
return PTR_ERR(map);
}

return __add_used_map(env, map);
}

/* find and rewrite pseudo imm in ld_imm64 instructions:
*
* 1. if it accesses map FD, replace it with actual map pointer.
Expand Down Expand Up @@ -19229,7 +19264,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
int map_idx;
u64 addr;
u32 fd;
bool reused;

if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
Expand Down Expand Up @@ -19290,18 +19324,14 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
break;
}

map_idx = add_used_map_from_fd(env, fd, &reused);
map_idx = add_used_map(env, fd);
if (map_idx < 0)
return map_idx;
map = env->used_maps[map_idx];

aux = &env->insn_aux_data[i];
aux->map_index = map_idx;

err = check_map_prog_compatibility(env, map, env->prog);
if (err)
return err;

if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
addr = (unsigned long)map;
Expand Down Expand Up @@ -19332,39 +19362,6 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
insn[0].imm = (u32)addr;
insn[1].imm = addr >> 32;

/* proceed with extra checks only if its newly added used map */
if (reused)
goto next_insn;

if (bpf_map_is_cgroup_storage(map) &&
bpf_cgroup_storage_assign(env->prog->aux, map)) {
verbose(env, "only one cgroup storage of each type is allowed\n");
return -EBUSY;
}
if (map->map_type == BPF_MAP_TYPE_ARENA) {
if (env->prog->aux->arena) {
verbose(env, "Only one arena per program\n");
return -EBUSY;
}
if (!env->allow_ptr_leaks || !env->bpf_capable) {
verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
return -EPERM;
}
if (!env->prog->jit_requested) {
verbose(env, "JIT is required to use arena\n");
return -EOPNOTSUPP;
}
if (!bpf_jit_supports_arena()) {
verbose(env, "JIT doesn't support arena\n");
return -EOPNOTSUPP;
}
env->prog->aux->arena = (void *)map;
if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
verbose(env, "arena's user address must be set via map_extra or mmap()\n");
return -EINVAL;
}
}

next_insn:
insn++;
i++;
Expand Down Expand Up @@ -22535,6 +22532,65 @@ struct btf *bpf_get_btf_vmlinux(void)
return btf_vmlinux;
}

/*
* The add_fd_from_fd_array() is executed only if fd_array_cnt is non-zero. In
* this case expect that every file descriptor in the array is either a map or
* a BTF. Everything else is considered to be trash.
*/
static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd)
{
struct bpf_map *map;
CLASS(fd, f)(fd);
int ret;

map = __bpf_map_get(f);
if (!IS_ERR(map)) {
ret = __add_used_map(env, map);
if (ret < 0)
return ret;
return 0;
}

/*
* Unlike "unused" maps which do not appear in the BPF program,
* BTFs are visible, so no reason to refcnt them now
*/
if (!IS_ERR(__btf_get_by_fd(f)))
return 0;

verbose(env, "fd %d is not pointing to valid bpf_map or btf\n", fd);
return PTR_ERR(map);
}

static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr, bpfptr_t uattr)
{
size_t size = sizeof(int);
int ret;
int fd;
u32 i;

env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);

/*
* The only difference between old (no fd_array_cnt is given) and new
* APIs is that in the latter case the fd_array is expected to be
* continuous and is scanned for map fds right away
*/
if (!attr->fd_array_cnt)
return 0;

for (i = 0; i < attr->fd_array_cnt; i++) {
if (copy_from_bpfptr_offset(&fd, env->fd_array, i * size, size))
return -EFAULT;

ret = add_fd_from_fd_array(env, fd);
if (ret)
return ret;
}

return 0;
}

int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
{
u64 start_time = ktime_get_ns();
Expand Down Expand Up @@ -22566,7 +22622,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
env->insn_aux_data[i].orig_idx = i;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);

env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token);
env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token);
Expand All @@ -22589,6 +22644,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (ret)
goto err_unlock;

ret = process_fd_array(env, attr, uattr);
if (ret)
goto err_release_maps;

mark_verifier_state_clean(env);

if (IS_ERR(btf_vmlinux)) {
Expand Down
Loading

0 comments on commit 7c23bdb

Please sign in to comment.