Mailing List Archive

CVS: python/dist/src/Objects obmalloc.c,2.14,2.15
Update of /cvsroot/python/python/dist/src/Objects
In directory usw-pr-cvs1:/tmp/cvs-serv8692/python/Objects

Modified Files:
obmalloc.c
Log Message:
Now that we're no longer linking arenas together, there's no need to
waste the first pool if malloc happens to return a pool-aligned address.

This means the number of pools per arena can now vary by 1. Unfortunately,
the code counted up from 0 to a presumed constant number of pools. So
changed the increasing "watermark" counter to a decreasing "nfreepools"
counter instead, and fiddled various stuff accordingly. This also allowed
getting rid of two more macros.

Also changed the code to align the first address to a pool boundary
instead of a page boundary. These are two parallel sets of macro #defines
that happen to be identical now, but the page macros are in theory more
restrictive (bigger), and there's simply no reason I can see that it
wasn't aligning to the less restrictive pool size all along (the code
only relies on pool alignment).

Hmm. The "page size" macros aren't used for anything *except* defining
the pool size macros, and the comments claim the latter isn't necessary.
So this has the feel of a layer of indirection that doesn't serve a
purpose; should probably get rid of the page macros now.


Index: obmalloc.c
===================================================================
RCS file: /cvsroot/python/python/dist/src/Objects/obmalloc.c,v
retrieving revision 2.14
retrieving revision 2.15
diff -C2 -d -r2.14 -r2.15
*** obmalloc.c 30 Mar 2002 06:20:23 -0000 2.14
--- obmalloc.c 30 Mar 2002 07:04:41 -0000 2.15
***************
*** 164,173 ****
* memory from the system across various platforms.
*/
!
! /* ALLOCATED_ARENA_SIZE is passed to malloc; after alignment, we can't
! * count on more than ARENA_SIZE bytes being usable for pools.
! */
! #define ALLOCATED_ARENA_SIZE (256 << 10) /* 256KB */
! #define ARENA_SIZE (ALLOCATED_ARENA_SIZE - SYSTEM_PAGE_SIZE)

#ifdef WITH_MEMORY_LIMITS
--- 164,168 ----
* memory from the system across various platforms.
*/
! #define ARENA_SIZE (256 << 10) /* 256KB */

#ifdef WITH_MEMORY_LIMITS
***************
*** 181,185 ****
#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
- #define ARENA_NB_POOLS (ARENA_SIZE / POOL_SIZE)

/*
--- 176,179 ----
***************
*** 325,335 ****
static ulong maxarenas = 0;

! /* Number of pools already allocated from the current arena. This is
! * initialized to the max # of pools to provoke the first allocation request
! * into allocating a new arena.
! */
! static uint watermark = ARENA_NB_POOLS;

! /* Free space start address in current arena. */
static block *arenabase = NULL;

--- 319,326 ----
static ulong maxarenas = 0;

! /* Number of pools still available to be allocated in the current arena. */
! static uint nfreepools = 0;

! /* Free space start address in current arena. This is pool-aligned. */
static block *arenabase = NULL;

***************
*** 361,372 ****
new_arena(void)
{
! block *bp = (block *)PyMem_MALLOC(ALLOCATED_ARENA_SIZE);
if (bp == NULL)
return NULL;

! watermark = 0;
! /* Page-round up */
! arenabase = bp + (SYSTEM_PAGE_SIZE -
! ((off_t )bp & SYSTEM_PAGE_SIZE_MASK));

/* Make room for a new entry in the arenas vector. */
--- 352,369 ----
new_arena(void)
{
! uint excess; /* number of bytes above pool alignment */
! block *bp = (block *)PyMem_MALLOC(ARENA_SIZE);
if (bp == NULL)
return NULL;

! /* arenabase <- first pool-aligned address in the arena
! nfreepools <- number of whole pools that fit after alignment */
! arenabase = bp;
! nfreepools = ARENA_SIZE / POOL_SIZE;
! excess = (uint)bp & POOL_SIZE_MASK;
! if (excess != 0) {
! --nfreepools;
! arenabase += POOL_SIZE - excess;
! }

/* Make room for a new entry in the arenas vector. */
***************
*** 438,448 ****
* Tricky: Letting B be the arena base address in arenas[I], P belongs to the
* arena if and only if
! * B <= P < B + ALLOCATED_ARENA_SIZE
* Subtracting B throughout, this is true iff
! * 0 <= P-B < ALLOCATED_ARENA_SIZE
* By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
*/
#define ADDRESS_IN_RANGE(P, I) \
! ((I) < narenas && (uptr)(P) - arenas[I] < (uptr)ALLOCATED_ARENA_SIZE)
/*==========================================================================*/

--- 435,445 ----
* Tricky: Letting B be the arena base address in arenas[I], P belongs to the
* arena if and only if
! * B <= P < B + ARENA_SIZE
* Subtracting B throughout, this is true iff
! * 0 <= P-B < ARENA_SIZE
* By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
*/
#define ADDRESS_IN_RANGE(P, I) \
! ((I) < narenas && (uptr)(P) - arenas[I] < (uptr)ARENA_SIZE)
/*==========================================================================*/

***************
*** 559,567 ****
* Allocate new pool
*/
! if (watermark < ARENA_NB_POOLS) {
! /* commit malloc(POOL_SIZE) from the current arena */
commit_pool:
! watermark++;
! pool = (poolp )arenabase;
arenabase += POOL_SIZE;
pool->arenaindex = narenas - 1;
--- 556,563 ----
* Allocate new pool
*/
! if (nfreepools) {
commit_pool:
! --nfreepools;
! pool = (poolp)arenabase;
arenabase += POOL_SIZE;
pool->arenaindex = narenas - 1;