骑麦兜看落日

[Binary]Tcache

字数统计: 222阅读时长: 1 min
2018/10/24 Share

Tcache

结构体

tcache_entry

1
2
3
4
5
6
7
8
#if USE_TCACHE

/* We overlay this structure on the user-data portion of a chunk when
the chunk is stored in the per-thread cache. */
typedef struct tcache_entry
{
struct tcache_entry *next;
} tcache_entry;

tcache_entry为chunk的结构体,只有一个next指针指向下一个chunk

tcache_perthread_struct

1
2
3
4
5
6
7
8
9
10
/* There is one of these for each thread, which contains the
per-thread cache (hence "tcache_perthread_struct"). Keeping
overall size low is mildly important. Note that COUNTS and ENTRIES
are redundant (we could have just counted the linked list each
time), this is for performance reasons. */
typedef struct tcache_perthread_struct
{
char counts[TCACHE_MAX_BINS];
tcache_entry *entries[TCACHE_MAX_BINS];
} tcache_perthread_struct;

函数

put

1
2
3
4
5
6
7
8
9
10
11
/* Caller must ensure that we know tc_idx is valid and there's room
for more chunks. */
static __always_inline void
tcache_put (mchunkptr chunk, size_t tc_idx)
{
tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
assert (tc_idx < TCACHE_MAX_BINS);
e->next = tcache->entries[tc_idx];
tcache->entries[tc_idx] = e;
++(tcache->counts[tc_idx]);
}

get

1
2
3
4
5
6
7
8
9
10
11
12
/* Caller must ensure that we know tc_idx is valid and there's
available chunks to remove. */
static __always_inline void *
tcache_get (size_t tc_idx)
{
tcache_entry *e = tcache->entries[tc_idx];
assert (tc_idx < TCACHE_MAX_BINS);
assert (tcache->entries[tc_idx] > 0);
tcache->entries[tc_idx] = e->next;
--(tcache->counts[tc_idx]);
return (void *) e;
}
CATALOG
  1. 1. Tcache
    1. 1.1. 结构体
      1. 1.1.1. tcache_entry
      2. 1.1.2. tcache_perthread_struct
    2. 1.2. 函数
      1. 1.2.1. put
      2. 1.2.2. get