/
*
If a small request, check regular
bin
. Since these
"smallbins"
hold one size each, no searching within bins
is
necessary.
(For a large request, we need to wait until unsorted chunks are
processed to find best fit. But
for
small ones, fits are exact
anyway, so we can check now, which
is
faster.)
*
/
if
(in_smallbin_range (nb))
{
idx
=
smallbin_index (nb);
/
/
拿smallbin的idx
bin
=
bin_at (av, idx);
if
((victim
=
last (
bin
)) !
=
bin
)
/
/
smallbin不空时
{
bck
=
victim
-
>bk;
/
/
先拿倒数第二个chunk
if
(__glibc_unlikely (bck
-
>fd !
=
victim))
/
/
分支预测优化,保证倒数第二个chunk的fd指向最后一个chunk(victim)
malloc_printerr (
"malloc(): smallbin double linked list corrupted"
);
set_inuse_bit_at_offset (victim, nb);
bin
-
>bk
=
bck;
/
/
改
bin
的最后一个chunk为倒数第二个,准备最后一个chunk脱链
bck
-
>fd
=
bin
;
/
/
bck的fd重新指向
bin
,victim脱链
if
(av !
=
&main_arena)
set_non_main_arena (victim);
check_malloced_chunk (av, victim, nb);
/
*
While we're here,
if
we see other chunks of the same size,
stash them
in
the tcache.
*
/
size_t tc_idx
=
csize2tidx (nb);
/
/
遍历tcache,获取相同size的tc_idx
if
(tcache && tc_idx < mp_.tcache_bins)
{
mchunkptr tc_victim;
/
*
While
bin
not
empty
and
tcache
not
full, copy chunks over.
*
/
/
/
/
/
/
/
stashing 机制
while
(tcache
-
>counts[tc_idx] < mp_.tcache_count
&& (tc_victim
=
last (
bin
)) !
=
bin
)
/
/
当对应的tcache不满时,取出smallbin末尾的chunk
=
tc_victim
{
/
/
如果成功获取了chunk
if
(tc_victim !
=
0
)
{
bck
=
tc_victim
-
>bk;
/
/
bck为smallbin中倒数第二个chunk
set_inuse_bit_at_offset (tc_victim, nb);
if
(av !
=
&main_arena)
set_non_main_arena (tc_victim);
bin
-
>bk
=
bck;
/
/
最后一个chunk脱链
bck
-
>fd
=
bin
;
tcache_put (tc_victim, tc_idx);
/
/
放到tcache
}
}
}
void
*
p
=
chunk2mem (victim);
alloc_perturb (p, bytes);
return
p;
}
}