diff options
author | Bryan Newbold <bnewbold@robocracy.org> | 2017-02-20 00:05:23 -0800 |
---|---|---|
committer | Bryan Newbold <bnewbold@robocracy.org> | 2017-02-20 00:05:23 -0800 |
commit | 5ca6e8e6a4e5c022a6fb5d28f30219c22c99eda8 (patch) | |
tree | 9b744b9dbf39e716e56daa620e2f3041968caf19 /gmalloc.c | |
download | scm-5ca6e8e6a4e5c022a6fb5d28f30219c22c99eda8.tar.gz scm-5ca6e8e6a4e5c022a6fb5d28f30219c22c99eda8.zip |
Import Upstream version 4e6upstream/4e6
Diffstat (limited to 'gmalloc.c')
-rw-r--r-- | gmalloc.c | 1638 |
1 files changed, 1638 insertions, 0 deletions
diff --git a/gmalloc.c b/gmalloc.c new file mode 100644 index 0000000..59874ee --- /dev/null +++ b/gmalloc.c @@ -0,0 +1,1638 @@ +/* DO NOT EDIT THIS FILE -- it is automagically generated. -*- C -*- */ + +#define _MALLOC_INTERNAL + +/* The malloc headers and source files from the C library follow here. */ + +/* Declarations for `malloc' and friends. + Copyright 1990, 91, 92, 93, 95, 96 Free Software Foundation, Inc. + Written May 1989 by Mike Haertel. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this library; see the file COPYING.LIB. If +not, write to the Free Software Foundation, Inc., 675 Mass Ave, +Cambridge, MA 02139, USA. + + The author may be reached (Email) at the address mike@ai.mit.edu, + or (US mail) as Mike Haertel c/o Free Software Foundation. */ + +#ifndef _MALLOC_H + +#define _MALLOC_H 1 + +#ifdef _MALLOC_INTERNAL + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#if defined (__cplusplus) || (defined (__STDC__) && __STDC__) +#undef __P +#define __P(args) args +#undef __ptr_t +#define __ptr_t void * +#else /* Not C++ or ANSI C. */ +#undef __P +#define __P(args) () +#undef const +#define const +#undef __ptr_t +#define __ptr_t char * +#endif /* C++ or ANSI C. */ + +#if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG) +#include <string.h> +#else +#ifndef memset +#define memset(s, zero, n) bzero ((s), (n)) +#endif +#ifndef memcpy +#define memcpy(d, s, n) bcopy ((s), (d), (n)) +#endif +#endif + +#if defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__) +#include <limits.h> +#else +#ifndef CHAR_BIT +#define CHAR_BIT 8 +#endif +#endif + +#ifdef HAVE_UNISTD_H +#include <unistd.h> +#endif + +#endif /* _MALLOC_INTERNAL. */ + + +#ifdef __cplusplus +extern "C" +{ +#endif + +#if defined (__STDC__) && __STDC__ +#include <stddef.h> +#define __malloc_size_t size_t +#define __malloc_ptrdiff_t ptrdiff_t +#else +#define __malloc_size_t unsigned int +#define __malloc_ptrdiff_t int +#endif + +#ifndef NULL +#define NULL 0 +#endif + + +/* Allocate SIZE bytes of memory. */ +extern __ptr_t malloc __P ((__malloc_size_t __size)); +/* Re-allocate the previously allocated block + in __ptr_t, making the new block SIZE bytes long. */ +extern __ptr_t realloc __P ((__ptr_t __ptr, __malloc_size_t __size)); +/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */ +extern __ptr_t calloc __P ((__malloc_size_t __nmemb, __malloc_size_t __size)); +/* Free a block allocated by `malloc', `realloc' or `calloc'. */ +extern void free __P ((__ptr_t __ptr)); + +/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */ +#if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */ +extern __ptr_t memalign __P ((__malloc_size_t __alignment, + __malloc_size_t __size)); +#endif + +/* Allocate SIZE bytes on a page boundary. */ +#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)) +extern __ptr_t valloc __P ((__malloc_size_t __size)); +#endif + + +#ifdef _MALLOC_INTERNAL + +/* The allocator divides the heap into blocks of fixed size; large + requests receive one or more whole blocks, and small requests + receive a fragment of a block. Fragment sizes are powers of two, + and all fragments of a block are the same size. When all the + fragments in a block have been freed, the block itself is freed. */ +#define INT_BIT (CHAR_BIT * sizeof(int)) +#define BLOCKLOG (INT_BIT > 16 ? 12 : 9) +#define BLOCKSIZE (1 << BLOCKLOG) +#define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) + +/* Determine the amount of memory spanned by the initial heap table + (not an absolute limit). */ +#define HEAP (INT_BIT > 16 ? 4194304 : 65536) + +/* Number of contiguous free blocks allowed to build up at the end of + memory before they will be returned to the system. */ +#define FINAL_FREE_BLOCKS 8 + +/* Data structure giving per-block information. */ +typedef union + { + /* Heap information for a busy block. */ + struct + { + /* Zero for a large (multiblock) object, or positive giving the + logarithm to the base two of the fragment size. */ + int type; + union + { + struct + { + __malloc_size_t nfree; /* Free frags in a fragmented block. */ + __malloc_size_t first; /* First free fragment of the block. */ + } frag; + /* For a large object, in its first block, this has the number + of blocks in the object. In the other blocks, this has a + negative number which says how far back the first block is. */ + __malloc_ptrdiff_t size; + } info; + } busy; + /* Heap information for a free block + (that may be the first of a free cluster). */ + struct + { + __malloc_size_t size; /* Size (in blocks) of a free cluster. */ + __malloc_size_t next; /* Index of next free cluster. */ + __malloc_size_t prev; /* Index of previous free cluster. */ + } free; + } malloc_info; + +/* Pointer to first block of the heap. */ +extern char *_heapbase; + +/* Table indexed by block number giving per-block information. */ +extern malloc_info *_heapinfo; + +/* Address to block number and vice versa. */ +#define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1) +#define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase)) + +/* Current search index for the heap table. */ +extern __malloc_size_t _heapindex; + +/* Limit of valid info table indices. */ +extern __malloc_size_t _heaplimit; + +/* Doubly linked lists of free fragments. */ +struct list + { + struct list *next; + struct list *prev; + }; + +/* Free list headers for each fragment size. */ +extern struct list _fraghead[]; + +/* List of blocks allocated with `memalign' (or `valloc'). */ +struct alignlist + { + struct alignlist *next; + __ptr_t aligned; /* The address that memaligned returned. */ + __ptr_t exact; /* The address that malloc returned. */ + }; +extern struct alignlist *_aligned_blocks; + +/* Instrumentation. */ +extern __malloc_size_t _chunks_used; +extern __malloc_size_t _bytes_used; +extern __malloc_size_t _chunks_free; +extern __malloc_size_t _bytes_free; + +/* Internal versions of `malloc', `realloc', and `free' + used when these functions need to call each other. + They are the same but don't call the hooks. */ +extern __ptr_t _malloc_internal __P ((__malloc_size_t __size)); +extern __ptr_t _realloc_internal __P ((__ptr_t __ptr, __malloc_size_t __size)); +extern void _free_internal __P ((__ptr_t __ptr)); + +#endif /* _MALLOC_INTERNAL. */ + +/* Given an address in the middle of a malloc'd object, + return the address of the beginning of the object. */ +extern __ptr_t malloc_find_object_address __P ((__ptr_t __ptr)); + +/* Underlying allocation function; successive calls should + return contiguous pieces of memory. */ +extern __ptr_t (*__morecore) __P ((__malloc_ptrdiff_t __size)); + +/* Default value of `__morecore'. */ +extern __ptr_t __default_morecore __P ((__malloc_ptrdiff_t __size)); + +/* If not NULL, this function is called after each time + `__morecore' is called to increase the data size. */ +extern void (*__after_morecore_hook) __P ((void)); + +/* Number of extra blocks to get each time we ask for more core. + This reduces the frequency of calling `(*__morecore)'. */ +extern __malloc_size_t __malloc_extra_blocks; + +/* Nonzero if `malloc' has been called and done its initialization. */ +extern int __malloc_initialized; +/* Function called to initialize malloc data structures. */ +extern int __malloc_initialize __P ((void)); + +/* Hooks for debugging versions. */ +extern void (*__malloc_initialize_hook) __P ((void)); +extern void (*__free_hook) __P ((__ptr_t __ptr)); +extern __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size)); +extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size)); +extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size, + __malloc_size_t __alignment)); + +/* Return values for `mprobe': these are the kinds of inconsistencies that + `mcheck' enables detection of. */ +enum mcheck_status + { + MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */ + MCHECK_OK, /* Block is fine. */ + MCHECK_FREE, /* Block freed twice. */ + MCHECK_HEAD, /* Memory before the block was clobbered. */ + MCHECK_TAIL /* Memory after the block was clobbered. */ + }; + +/* Activate a standard collection of debugging hooks. This must be called + before `malloc' is ever called. ABORTFUNC is called with an error code + (see enum above) when an inconsistency is detected. If ABORTFUNC is + null, the standard function prints on stderr and then calls `abort'. */ +extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status)))); + +/* Check for aberrations in a particular malloc'd block. You must have + called `mcheck' already. These are the same checks that `mcheck' does + when you free or reallocate a block. */ +extern enum mcheck_status mprobe __P ((__ptr_t __ptr)); + +/* Activate a standard collection of tracing hooks. */ +extern void mtrace __P ((void)); +extern void muntrace __P ((void)); + +/* Statistics available to the user. */ +struct mstats + { + __malloc_size_t bytes_total; /* Total size of the heap. */ + __malloc_size_t chunks_used; /* Chunks allocated by the user. */ + __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */ + __malloc_size_t chunks_free; /* Chunks in the free list. */ + __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */ + }; + +/* Pick up the current statistics. */ +extern struct mstats mstats __P ((void)); + +/* Call WARNFUN with a warning message when memory usage is high. */ +extern void memory_warnings __P ((__ptr_t __start, + void (*__warnfun) __P ((const char *)))); + + +/* Relocating allocator. */ + +/* Allocate SIZE bytes, and store the address in *HANDLEPTR. */ +extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size)); + +/* Free the storage allocated in HANDLEPTR. */ +extern void r_alloc_free __P ((__ptr_t *__handleptr)); + +/* Adjust the block at HANDLEPTR to be SIZE bytes long. */ +extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size)); + + +#ifdef __cplusplus +} +#endif + +#endif /* malloc.h */ +/* Memory allocator `malloc'. + Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. + Written May 1989 by Mike Haertel. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this library; see the file COPYING.LIB. If +not, write to the Free Software Foundation, Inc., 675 Mass Ave, +Cambridge, MA 02139, USA. + + The author may be reached (Email) at the address mike@ai.mit.edu, + or (US mail) as Mike Haertel c/o Free Software Foundation. */ + +#ifndef _MALLOC_INTERNAL +#define _MALLOC_INTERNAL +#include <malloc.h> +#endif +#include <errno.h> + +/* How to really get more memory. */ +__ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore; + +/* Debugging hook for `malloc'. */ +__ptr_t (*__malloc_hook) __P ((__malloc_size_t __size)); + +/* Pointer to the base of the first block. */ +char *_heapbase; + +/* Block information table. Allocated with align/__free (not malloc/free). */ +malloc_info *_heapinfo; + +/* Number of info entries. */ +static __malloc_size_t heapsize; + +/* Search index in the info table. */ +__malloc_size_t _heapindex; + +/* Limit of valid info table indices. */ +__malloc_size_t _heaplimit; + +/* Free lists for each fragment size. */ +struct list _fraghead[BLOCKLOG]; + +/* Instrumentation. */ +__malloc_size_t _chunks_used; +__malloc_size_t _bytes_used; +__malloc_size_t _chunks_free; +__malloc_size_t _bytes_free; + +/* Are you experienced? */ +int __malloc_initialized; + +__malloc_size_t __malloc_extra_blocks; + +void (*__malloc_initialize_hook) __P ((void)); +void (*__after_morecore_hook) __P ((void)); + + +/* Aligned allocation. */ +static __ptr_t align __P ((__malloc_size_t)); +static __ptr_t +align (size) + __malloc_size_t size; +{ + __ptr_t result; + unsigned long int adj; + + result = (*__morecore) (size); + adj = (unsigned long int) ((unsigned long int) ((char *) result - + (char *) NULL)) % BLOCKSIZE; + if (adj != 0) + { + __ptr_t new; + adj = BLOCKSIZE - adj; + new = (*__morecore) (adj); + result = (char *) result + adj; + } + + if (__after_morecore_hook) + (*__after_morecore_hook) (); + + return result; +} + +/* Get SIZE bytes, if we can get them starting at END. + Return the address of the space we got. + If we cannot get space at END, fail and return 0. */ +static __ptr_t get_contiguous_space __P ((__malloc_ptrdiff_t, __ptr_t)); +static __ptr_t +get_contiguous_space (size, position) + __malloc_ptrdiff_t size; + __ptr_t position; +{ + __ptr_t before; + __ptr_t after; + + before = (*__morecore) (0); + /* If we can tell in advance that the break is at the wrong place, + fail now. */ + if (before != position) + return 0; + + /* Allocate SIZE bytes and get the address of them. */ + after = (*__morecore) (size); + if (!after) + return 0; + + /* It was not contiguous--reject it. */ + if (after != position) + { + (*__morecore) (- size); + return 0; + } + + return after; +} + + +/* This is called when `_heapinfo' and `heapsize' have just + been set to describe a new info table. Set up the table + to describe itself and account for it in the statistics. */ +static void register_heapinfo __P ((void)); +#ifdef __GNUC__ +__inline__ +#endif +static void +register_heapinfo () +{ + __malloc_size_t block, blocks; + + block = BLOCK (_heapinfo); + blocks = BLOCKIFY (heapsize * sizeof (malloc_info)); + + /* Account for the _heapinfo block itself in the statistics. */ + _bytes_used += blocks * BLOCKSIZE; + ++_chunks_used; + + /* Describe the heapinfo block itself in the heapinfo. */ + _heapinfo[block].busy.type = 0; + _heapinfo[block].busy.info.size = blocks; + /* Leave back-pointers for malloc_find_address. */ + while (--blocks > 0) + _heapinfo[block + blocks].busy.info.size = -blocks; +} + +/* Set everything up and remember that we have. */ +int +__malloc_initialize () +{ + if (__malloc_initialized) + return 0; + + if (__malloc_initialize_hook) + (*__malloc_initialize_hook) (); + + heapsize = HEAP / BLOCKSIZE; + _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info)); + if (_heapinfo == NULL) + return 0; + memset (_heapinfo, 0, heapsize * sizeof (malloc_info)); + _heapinfo[0].free.size = 0; + _heapinfo[0].free.next = _heapinfo[0].free.prev = 0; + _heapindex = 0; + _heapbase = (char *) _heapinfo; + _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info)); + + register_heapinfo (); + + __malloc_initialized = 1; + return 1; +} + +static int morecore_recursing; + +/* Get neatly aligned memory, initializing or + growing the heap info table as necessary. */ +static __ptr_t morecore __P ((__malloc_size_t)); +static __ptr_t +morecore (size) + __malloc_size_t size; +{ + __ptr_t result; + malloc_info *newinfo, *oldinfo; + __malloc_size_t newsize; + + if (morecore_recursing) + /* Avoid recursion. The caller will know how to handle a null return. */ + return NULL; + + result = align (size); + if (result == NULL) + return NULL; + + /* Check if we need to grow the info table. */ + if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize) + { + /* Calculate the new _heapinfo table size. We do not account for the + added blocks in the table itself, as we hope to place them in + existing free space, which is already covered by part of the + existing table. */ + newsize = heapsize; + do + newsize *= 2; + while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize); + + /* We must not reuse existing core for the new info table when called + from realloc in the case of growing a large block, because the + block being grown is momentarily marked as free. In this case + _heaplimit is zero so we know not to reuse space for internal + allocation. */ + if (_heaplimit != 0) + { + /* First try to allocate the new info table in core we already + have, in the usual way using realloc. If realloc cannot + extend it in place or relocate it to existing sufficient core, + we will get called again, and the code above will notice the + `morecore_recursing' flag and return null. */ + int save = errno; /* Don't want to clobber errno with ENOMEM. */ + morecore_recursing = 1; + newinfo = (malloc_info *) _realloc_internal + (_heapinfo, newsize * sizeof (malloc_info)); + morecore_recursing = 0; + if (newinfo == NULL) + errno = save; + else + { + /* We found some space in core, and realloc has put the old + table's blocks on the free list. Now zero the new part + of the table and install the new table location. */ + memset (&newinfo[heapsize], 0, + (newsize - heapsize) * sizeof (malloc_info)); + _heapinfo = newinfo; + heapsize = newsize; + goto got_heap; + } + } + + /* Allocate new space for the malloc info table. */ + while (1) + { + newinfo = (malloc_info *) align (newsize * sizeof (malloc_info)); + + /* Did it fail? */ + if (newinfo == NULL) + { + (*__morecore) (-size); + return NULL; + } + + /* Is it big enough to record status for its own space? + If so, we win. */ + if ((__malloc_size_t) BLOCK ((char *) newinfo + + newsize * sizeof (malloc_info)) + < newsize) + break; + + /* Must try again. First give back most of what we just got. */ + (*__morecore) (- newsize * sizeof (malloc_info)); + newsize *= 2; + } + + /* Copy the old table to the beginning of the new, + and zero the rest of the new table. */ + memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info)); + memset (&newinfo[heapsize], 0, + (newsize - heapsize) * sizeof (malloc_info)); + oldinfo = _heapinfo; + _heapinfo = newinfo; + heapsize = newsize; + + register_heapinfo (); + + /* Reset _heaplimit so _free_internal never decides + it can relocate or resize the info table. */ + _heaplimit = 0; + _free_internal (oldinfo); + + /* The new heap limit includes the new table just allocated. */ + _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info)); + return result; + } + + got_heap: + _heaplimit = BLOCK ((char *) result + size); + return result; +} + +/* Allocate memory from the heap. */ +__ptr_t +_malloc_internal (size) + __malloc_size_t size; +{ + __ptr_t result; + __malloc_size_t block, blocks, lastblocks, start; + register __malloc_size_t i; + struct list *next; + + /* ANSI C allows `malloc (0)' to either return NULL, or to return a + valid address you can realloc and free (though not dereference). + + It turns out that some extant code (sunrpc, at least Ultrix's version) + expects `malloc (0)' to return non-NULL and breaks otherwise. + Be compatible. */ + +#if 0 + if (size == 0) + return NULL; +#endif + + if (size < sizeof (struct list)) + size = sizeof (struct list); + +#ifdef SUNOS_LOCALTIME_BUG + if (size < 16) + size = 16; +#endif + + /* Determine the allocation policy based on the request size. */ + if (size <= BLOCKSIZE / 2) + { + /* Small allocation to receive a fragment of a block. + Determine the logarithm to base two of the fragment size. */ + register __malloc_size_t log = 1; + --size; + while ((size /= 2) != 0) + ++log; + + /* Look in the fragment lists for a + free fragment of the desired size. */ + next = _fraghead[log].next; + if (next != NULL) + { + /* There are free fragments of this size. + Pop a fragment out of the fragment list and return it. + Update the block's nfree and first counters. */ + result = (__ptr_t) next; + next->prev->next = next->next; + if (next->next != NULL) + next->next->prev = next->prev; + block = BLOCK (result); + if (--_heapinfo[block].busy.info.frag.nfree != 0) + _heapinfo[block].busy.info.frag.first = (unsigned long int) + ((unsigned long int) ((char *) next->next - (char *) NULL) + % BLOCKSIZE) >> log; + + /* Update the statistics. */ + ++_chunks_used; + _bytes_used += 1 << log; + --_chunks_free; + _bytes_free -= 1 << log; + } + else + { + /* No free fragments of the desired size, so get a new block + and break it into fragments, returning the first. */ + result = malloc (BLOCKSIZE); + if (result == NULL) + return NULL; + + /* Link all fragments but the first into the free list. */ + next = (struct list *) ((char *) result + (1 << log)); + next->next = NULL; + next->prev = &_fraghead[log]; + _fraghead[log].next = next; + + for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i) + { + next = (struct list *) ((char *) result + (i << log)); + next->next = _fraghead[log].next; + next->prev = &_fraghead[log]; + next->prev->next = next; + next->next->prev = next; + } + + /* Initialize the nfree and first counters for this block. */ + block = BLOCK (result); + _heapinfo[block].busy.type = log; + _heapinfo[block].busy.info.frag.nfree = i - 1; + _heapinfo[block].busy.info.frag.first = i - 1; + + _chunks_free += (BLOCKSIZE >> log) - 1; + _bytes_free += BLOCKSIZE - (1 << log); + _bytes_used -= BLOCKSIZE - (1 << log); + } + } + else + { + /* Large allocation to receive one or more blocks. + Search the free list in a circle starting at the last place visited. + If we loop completely around without finding a large enough + space we will have to get more memory from the system. */ + blocks = BLOCKIFY (size); + start = block = _heapindex; + while (_heapinfo[block].free.size < blocks) + { + block = _heapinfo[block].free.next; + if (block == start) + { + /* Need to get more from the system. Get a little extra. */ + __malloc_size_t wantblocks = blocks + __malloc_extra_blocks; + block = _heapinfo[0].free.prev; + lastblocks = _heapinfo[block].free.size; + /* Check to see if the new core will be contiguous with the + final free block; if so we don't need to get as much. */ + if (_heaplimit != 0 && block + lastblocks == _heaplimit && + /* We can't do this if we will have to make the heap info + table bigger to accomodate the new space. */ + block + wantblocks <= heapsize && + get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE, + ADDRESS (block + lastblocks))) + { + /* We got it contiguously. Which block we are extending + (the `final free block' referred to above) might have + changed, if it got combined with a freed info table. */ + block = _heapinfo[0].free.prev; + _heapinfo[block].free.size += (wantblocks - lastblocks); + _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE; + _heaplimit += wantblocks - lastblocks; + continue; + } + result = morecore (wantblocks * BLOCKSIZE); + if (result == NULL) + return NULL; + block = BLOCK (result); + /* Put the new block at the end of the free list. */ + _heapinfo[block].free.size = wantblocks; + _heapinfo[block].free.prev = _heapinfo[0].free.prev; + _heapinfo[block].free.next = 0; + _heapinfo[0].free.prev = block; + _heapinfo[_heapinfo[block].free.prev].free.next = block; + ++_chunks_free; + /* Now loop to use some of that block for this allocation. */ + } + } + + /* At this point we have found a suitable free list entry. + Figure out how to remove what we need from the list. */ + result = ADDRESS (block); + if (_heapinfo[block].free.size > blocks) + { + /* The block we found has a bit left over, + so relink the tail end back into the free list. */ + _heapinfo[block + blocks].free.size + = _heapinfo[block].free.size - blocks; + _heapinfo[block + blocks].free.next + = _heapinfo[block].free.next; + _heapinfo[block + blocks].free.prev + = _heapinfo[block].free.prev; + _heapinfo[_heapinfo[block].free.prev].free.next + = _heapinfo[_heapinfo[block].free.next].free.prev + = _heapindex = block + blocks; + } + else + { + /* The block exactly matches our requirements, + so just remove it from the list. */ + _heapinfo[_heapinfo[block].free.next].free.prev + = _heapinfo[block].free.prev; + _heapinfo[_heapinfo[block].free.prev].free.next + = _heapindex = _heapinfo[block].free.next; + --_chunks_free; + } + + _heapinfo[block].busy.type = 0; + _heapinfo[block].busy.info.size = blocks; + ++_chunks_used; + _bytes_used += blocks * BLOCKSIZE; + _bytes_free -= blocks * BLOCKSIZE; + + /* Mark all the blocks of the object just allocated except for the + first with a negative number so you can find the first block by + adding that adjustment. */ + while (--blocks > 0) + _heapinfo[block + blocks].busy.info.size = -blocks; + } + + return result; +} + +__ptr_t +malloc (size) + __malloc_size_t size; +{ + if (!__malloc_initialized && !__malloc_initialize ()) + return NULL; + + return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size); +} + +#ifndef _LIBC + +/* On some ANSI C systems, some libc functions call _malloc, _free + and _realloc. Make them use the GNU functions. */ + +__ptr_t +_malloc (size) + __malloc_size_t size; +{ + return malloc (size); +} + +void +_free (ptr) + __ptr_t ptr; +{ + free (ptr); +} + +__ptr_t +_realloc (ptr, size) + __ptr_t ptr; + __malloc_size_t size; +{ + return realloc (ptr, size); +} + +#endif +/* Free a block of memory allocated by `malloc'. + Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc. + Written May 1989 by Mike Haertel. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this library; see the file COPYING.LIB. If +not, write to the Free Software Foundation, Inc., 675 Mass Ave, +Cambridge, MA 02139, USA. + + The author may be reached (Email) at the address mike@ai.mit.edu, + or (US mail) as Mike Haertel c/o Free Software Foundation. */ + +#ifndef _MALLOC_INTERNAL +#define _MALLOC_INTERNAL +#include <malloc.h> +#endif + + +/* Cope with systems lacking `memmove'. */ +#ifndef memmove +#if (defined (MEMMOVE_MISSING) || \ + !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG)) +#ifdef emacs +#undef __malloc_safe_bcopy +#define __malloc_safe_bcopy safe_bcopy +#endif +/* This function is defined in realloc.c. */ +extern void __malloc_safe_bcopy __P ((__ptr_t, __ptr_t, __malloc_size_t)); +#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size)) +#endif +#endif + + +/* Debugging hook for free. */ +void (*__free_hook) __P ((__ptr_t __ptr)); + +/* List of blocks allocated by memalign. */ +struct alignlist *_aligned_blocks = NULL; + +/* Return memory to the heap. + Like `free' but don't call a __free_hook if there is one. */ +void +_free_internal (ptr) + __ptr_t ptr; +{ + int type; + __malloc_size_t block, blocks; + register __malloc_size_t i; + struct list *prev, *next; + __ptr_t curbrk; + const __malloc_size_t lesscore_threshold + /* Threshold of free space at which we will return some to the system. */ + = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks; + + register struct alignlist *l; + + if (ptr == NULL) + return; + + for (l = _aligned_blocks; l != NULL; l = l->next) + if (l->aligned == ptr) + { + l->aligned = NULL; /* Mark the slot in the list as free. */ + ptr = l->exact; + break; + } + + block = BLOCK (ptr); + + type = _heapinfo[block].busy.type; + switch (type) + { + case 0: + /* Get as many statistics as early as we can. */ + --_chunks_used; + _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE; + _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE; + + /* Find the free cluster previous to this one in the free list. + Start searching at the last block referenced; this may benefit + programs with locality of allocation. */ + i = _heapindex; + if (i > block) + while (i > block) + i = _heapinfo[i].free.prev; + else + { + do + i = _heapinfo[i].free.next; + while (i > 0 && i < block); + i = _heapinfo[i].free.prev; + } + + /* Determine how to link this block into the free list. */ + if (block == i + _heapinfo[i].free.size) + { + /* Coalesce this block with its predecessor. */ + _heapinfo[i].free.size += _heapinfo[block].busy.info.size; + block = i; + } + else + { + /* Really link this block back into the free list. */ + _heapinfo[block].free.size = _heapinfo[block].busy.info.size; + _heapinfo[block].free.next = _heapinfo[i].free.next; + _heapinfo[block].free.prev = i; + _heapinfo[i].free.next = block; + _heapinfo[_heapinfo[block].free.next].free.prev = block; + ++_chunks_free; + } + + /* Now that the block is linked in, see if we can coalesce it + with its successor (by deleting its successor from the list + and adding in its size). */ + if (block + _heapinfo[block].free.size == _heapinfo[block].free.next) + { + _heapinfo[block].free.size + += _heapinfo[_heapinfo[block].free.next].free.size; + _heapinfo[block].free.next + = _heapinfo[_heapinfo[block].free.next].free.next; + _heapinfo[_heapinfo[block].free.next].free.prev = block; + --_chunks_free; + } + + /* How many trailing free blocks are there now? */ + blocks = _heapinfo[block].free.size; + + /* Where is the current end of accessible core? */ + curbrk = (*__morecore) (0); + + if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit)) + { + /* The end of the malloc heap is at the end of accessible core. + It's possible that moving _heapinfo will allow us to + return some space to the system. */ + + __malloc_size_t info_block = BLOCK (_heapinfo); + __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size; + __malloc_size_t prev_block = _heapinfo[block].free.prev; + __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size; + __malloc_size_t next_block = _heapinfo[block].free.next; + __malloc_size_t next_blocks = _heapinfo[next_block].free.size; + + if (/* Win if this block being freed is last in core, the info table + is just before it, the previous free block is just before the + info table, and the two free blocks together form a useful + amount to return to the system. */ + (block + blocks == _heaplimit && + info_block + info_blocks == block && + prev_block != 0 && prev_block + prev_blocks == info_block && + blocks + prev_blocks >= lesscore_threshold) || + /* Nope, not the case. We can also win if this block being + freed is just before the info table, and the table extends + to the end of core or is followed only by a free block, + and the total free space is worth returning to the system. */ + (block + blocks == info_block && + ((info_block + info_blocks == _heaplimit && + blocks >= lesscore_threshold) || + (info_block + info_blocks == next_block && + next_block + next_blocks == _heaplimit && + blocks + next_blocks >= lesscore_threshold))) + ) + { + malloc_info *newinfo; + __malloc_size_t oldlimit = _heaplimit; + + /* Free the old info table, clearing _heaplimit to avoid + recursion into this code. We don't want to return the + table's blocks to the system before we have copied them to + the new location. */ + _heaplimit = 0; + _free_internal (_heapinfo); + _heaplimit = oldlimit; + + /* Tell malloc to search from the beginning of the heap for + free blocks, so it doesn't reuse the ones just freed. */ + _heapindex = 0; + + /* Allocate new space for the info table and move its data. */ + newinfo = (malloc_info *) _malloc_internal (info_blocks + * BLOCKSIZE); + memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE); + _heapinfo = newinfo; + + /* We should now have coalesced the free block with the + blocks freed from the old info table. Examine the entire + trailing free block to decide below whether to return some + to the system. */ + block = _heapinfo[0].free.prev; + blocks = _heapinfo[block].free.size; + } + + /* Now see if we can return stuff to the system. */ + if (block + blocks == _heaplimit && blocks >= lesscore_threshold) + { + register __malloc_size_t bytes = blocks * BLOCKSIZE; + _heaplimit -= blocks; + (*__morecore) (-bytes); + _heapinfo[_heapinfo[block].free.prev].free.next + = _heapinfo[block].free.next; + _heapinfo[_heapinfo[block].free.next].free.prev + = _heapinfo[block].free.prev; + block = _heapinfo[block].free.prev; + --_chunks_free; + _bytes_free -= bytes; + } + } + + /* Set the next search to begin at this block. */ + _heapindex = block; + break; + + default: + /* Do some of the statistics. */ + --_chunks_used; + _bytes_used -= 1 << type; + ++_chunks_free; + _bytes_free += 1 << type; + + /* Get the address of the first free fragment in this block. */ + prev = (struct list *) ((char *) ADDRESS (block) + + (_heapinfo[block].busy.info.frag.first << type)); + + if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1) + { + /* If all fragments of this block are free, remove them + from the fragment list and free the whole block. */ + next = prev; + for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i) + next = next->next; + prev->prev->next = next; + if (next != NULL) + next->prev = prev->prev; + _heapinfo[block].busy.type = 0; + _heapinfo[block].busy.info.size = 1; + + /* Keep the statistics accurate. */ + ++_chunks_used; + _bytes_used += BLOCKSIZE; + _chunks_free -= BLOCKSIZE >> type; + _bytes_free -= BLOCKSIZE; + + free (ADDRESS (block)); + } + else if (_heapinfo[block].busy.info.frag.nfree != 0) + { + /* If some fragments of this block are free, link this + fragment into the fragment list after the first free + fragment of this block. */ + next = (struct list *) ptr; + next->next = prev->next; + next->prev = prev; + prev->next = next; + if (next->next != NULL) + next->next->prev = next; + ++_heapinfo[block].busy.info.frag.nfree; + } + else + { + /* No fragments of this block are free, so link this + fragment into the fragment list and announce that + it is the first free fragment of this block. */ + prev = (struct list *) ptr; + _heapinfo[block].busy.info.frag.nfree = 1; + _heapinfo[block].busy.info.frag.first = (unsigned long int) + ((unsigned long int) ((char *) ptr - (char *) NULL) + % BLOCKSIZE >> type); + prev->next = _fraghead[type].next; + prev->prev = &_fraghead[type]; + prev->prev->next = prev; + if (prev->next != NULL) + prev->next->prev = prev; + } + break; + } +} + +/* Return memory to the heap. */ +void +free (ptr) + __ptr_t ptr; +{ + if (__free_hook != NULL) + (*__free_hook) (ptr); + else + _free_internal (ptr); +} + +/* Define the `cfree' alias for `free'. */ +#ifdef weak_alias +weak_alias (free, cfree) +#else +void +cfree (ptr) + __ptr_t ptr; +{ + free (ptr); +} +#endif +/* Change the size of a block allocated by `malloc'. + Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. + Written May 1989 by Mike Haertel. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this library; see the file COPYING.LIB. If +not, write to the Free Software Foundation, Inc., 675 Mass Ave, +Cambridge, MA 02139, USA. + + The author may be reached (Email) at the address mike@ai.mit.edu, + or (US mail) as Mike Haertel c/o Free Software Foundation. */ + +#ifndef _MALLOC_INTERNAL +#define _MALLOC_INTERNAL +#include <malloc.h> +#endif + + + +/* Cope with systems lacking `memmove'. */ +#if (defined (MEMMOVE_MISSING) || \ + !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG)) + +#ifdef emacs +#undef __malloc_safe_bcopy +#define __malloc_safe_bcopy safe_bcopy +#else + +/* Snarfed directly from Emacs src/dispnew.c: + XXX Should use system bcopy if it handles overlap. */ + +/* Like bcopy except never gets confused by overlap. */ + +void +__malloc_safe_bcopy (afrom, ato, size) + __ptr_t afrom; + __ptr_t ato; + __malloc_size_t size; +{ + char *from = afrom, *to = ato; + + if (size <= 0 || from == to) + return; + + /* If the source and destination don't overlap, then bcopy can + handle it. If they do overlap, but the destination is lower in + memory than the source, we'll assume bcopy can handle that. */ + if (to < from || from + size <= to) + bcopy (from, to, size); + + /* Otherwise, we'll copy from the end. */ + else + { + register char *endf = from + size; + register char *endt = to + size; + + /* If TO - FROM is large, then we should break the copy into + nonoverlapping chunks of TO - FROM bytes each. However, if + TO - FROM is small, then the bcopy function call overhead + makes this not worth it. The crossover point could be about + anywhere. Since I don't think the obvious copy loop is too + bad, I'm trying to err in its favor. */ + if (to - from < 64) + { + do + *--endt = *--endf; + while (endf != from); + } + else + { + for (;;) + { + endt -= (to - from); + endf -= (to - from); + + if (endt < to) + break; + + bcopy (endf, endt, to - from); + } + + /* If SIZE wasn't a multiple of TO - FROM, there will be a + little left over. The amount left over is + (endt + (to - from)) - to, which is endt - from. */ + bcopy (from, to, endt - from); + } + } +} +#endif /* emacs */ + +#ifndef memmove +extern void __malloc_safe_bcopy __P ((__ptr_t, __ptr_t, __malloc_size_t)); +#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size)) +#endif + +#endif + + +#define min(A, B) ((A) < (B) ? (A) : (B)) + +/* Debugging hook for realloc. */ +__ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size)); + +/* Resize the given region to the new size, returning a pointer + to the (possibly moved) region. This is optimized for speed; + some benchmarks seem to indicate that greater compactness is + achieved by unconditionally allocating and copying to a + new region. This module has incestuous knowledge of the + internals of both free and malloc. */ +__ptr_t +_realloc_internal (ptr, size) + __ptr_t ptr; + __malloc_size_t size; +{ + __ptr_t result; + int type; + __malloc_size_t block, blocks, oldlimit; + + if (size == 0) + { + _free_internal (ptr); + return _malloc_internal (0); + } + else if (ptr == NULL) + return _malloc_internal (size); + + block = BLOCK (ptr); + + type = _heapinfo[block].busy.type; + switch (type) + { + case 0: + /* Maybe reallocate a large block to a small fragment. */ + if (size <= BLOCKSIZE / 2) + { + result = _malloc_internal (size); + if (result != NULL) + { + memcpy (result, ptr, size); + _free_internal (ptr); + return result; + } + } + + /* The new size is a large allocation as well; + see if we can hold it in place. */ + blocks = BLOCKIFY (size); + if (blocks < _heapinfo[block].busy.info.size) + { + /* The new size is smaller; return + excess memory to the free list. */ + _heapinfo[block + blocks].busy.type = 0; + _heapinfo[block + blocks].busy.info.size + = _heapinfo[block].busy.info.size - blocks; + _heapinfo[block].busy.info.size = blocks; + /* We have just created a new chunk by splitting a chunk in two. + Now we will free this chunk; increment the statistics counter + so it doesn't become wrong when _free_internal decrements it. */ + ++_chunks_used; + _free_internal (ADDRESS (block + blocks)); + result = ptr; + } + else if (blocks == _heapinfo[block].busy.info.size) + /* No size change necessary. */ + result = ptr; + else + { + /* Won't fit, so allocate a new region that will. + Free the old region first in case there is sufficient + adjacent free space to grow without moving. */ + blocks = _heapinfo[block].busy.info.size; + /* Prevent free from actually returning memory to the system. */ + oldlimit = _heaplimit; + _heaplimit = 0; + _free_internal (ptr); + result = _malloc_internal (size); + if (_heaplimit == 0) + _heaplimit = oldlimit; + if (result == NULL) + { + /* Now we're really in trouble. We have to unfree + the thing we just freed. Unfortunately it might + have been coalesced with its neighbors. */ + if (_heapindex == block) + (void) _malloc_internal (blocks * BLOCKSIZE); + else + { + __ptr_t previous + = _malloc_internal ((block - _heapindex) * BLOCKSIZE); + (void) _malloc_internal (blocks * BLOCKSIZE); + _free_internal (previous); + } + return NULL; + } + if (ptr != result) + memmove (result, ptr, blocks * BLOCKSIZE); + } + break; + + default: + /* Old size is a fragment; type is logarithm + to base two of the fragment size. */ + if (size > (__malloc_size_t) (1 << (type - 1)) && + size <= (__malloc_size_t) (1 << type)) + /* The new size is the same kind of fragment. */ + result = ptr; + else + { + /* The new size is different; allocate a new space, + and copy the lesser of the new size and the old. */ + result = _malloc_internal (size); + if (result == NULL) + return NULL; + memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type)); + _free_internal (ptr); + } + break; + } + + return result; +} + +__ptr_t +realloc (ptr, size) + __ptr_t ptr; + __malloc_size_t size; +{ + if (!__malloc_initialized && !__malloc_initialize ()) + return NULL; + + return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal) + (ptr, size); +} +/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this library; see the file COPYING.LIB. If +not, write to the Free Software Foundation, Inc., 675 Mass Ave, +Cambridge, MA 02139, USA. + + The author may be reached (Email) at the address mike@ai.mit.edu, + or (US mail) as Mike Haertel c/o Free Software Foundation. */ + +#ifndef _MALLOC_INTERNAL +#define _MALLOC_INTERNAL +#include <malloc.h> +#endif + +/* Allocate an array of NMEMB elements each SIZE bytes long. + The entire array is initialized to zeros. */ +__ptr_t +calloc (nmemb, size) + register __malloc_size_t nmemb; + register __malloc_size_t size; +{ + register __ptr_t result = malloc (nmemb * size); + + if (result != NULL) + (void) memset (result, 0, nmemb * size); + + return result; +} +/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. +This file is part of the GNU C Library. + +The GNU C Library is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +The GNU C Library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with the GNU C Library; see the file COPYING. If not, write to +the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ + +#ifndef _MALLOC_INTERNAL +#define _MALLOC_INTERNAL +#include <malloc.h> +#endif + +#ifndef __GNU_LIBRARY__ +#define __sbrk sbrk +#endif + +#ifdef __GNU_LIBRARY__ +/* It is best not to declare this and cast its result on foreign operating + systems with potentially hostile include files. */ + +#include <stddef.h> +extern __ptr_t __sbrk __P ((ptrdiff_t increment)); +#endif + +#ifndef NULL +#define NULL 0 +#endif + +/* Allocate INCREMENT more bytes of data space, + and return the start of data space, or NULL on errors. + If INCREMENT is negative, shrink data space. */ +__ptr_t +__default_morecore (increment) + __malloc_ptrdiff_t increment; +{ + __ptr_t result = (__ptr_t) __sbrk (increment); + if (result == (__ptr_t) -1) + return NULL; + return result; +} +/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this library; see the file COPYING.LIB. If +not, write to the Free Software Foundation, Inc., 675 Mass Ave, +Cambridge, MA 02139, USA. */ + +#ifndef _MALLOC_INTERNAL +#define _MALLOC_INTERNAL +#include <malloc.h> +#endif + +#if __DJGPP__ - 0 == 1 + +/* There is some problem with memalign in DJGPP v1 and we are supposed + to omit it. Noone told me why, they just told me to do it. */ + +#else + +__ptr_t (*__memalign_hook) __P ((size_t __size, size_t __alignment)); + +__ptr_t +memalign (alignment, size) + __malloc_size_t alignment; + __malloc_size_t size; +{ + __ptr_t result; + unsigned long int adj, lastadj; + + if (__memalign_hook) + return (*__memalign_hook) (alignment, size); + + /* Allocate a block with enough extra space to pad the block with up to + (ALIGNMENT - 1) bytes if necessary. */ + result = malloc (size + alignment - 1); + if (result == NULL) + return NULL; + + /* Figure out how much we will need to pad this particular block + to achieve the required alignment. */ + adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment; + + do + { + /* Reallocate the block with only as much excess as it needs. */ + free (result); + result = malloc (adj + size); + if (result == NULL) /* Impossible unless interrupted. */ + return NULL; + + lastadj = adj; + adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment; + /* It's conceivable we might have been so unlucky as to get a + different block with weaker alignment. If so, this block is too + short to contain SIZE after alignment correction. So we must + try again and get another block, slightly larger. */ + } while (adj > lastadj); + + if (adj != 0) + { + /* Record this block in the list of aligned blocks, so that `free' + can identify the pointer it is passed, which will be in the middle + of an allocated block. */ + + struct alignlist *l; + for (l = _aligned_blocks; l != NULL; l = l->next) + if (l->aligned == NULL) + /* This slot is free. Use it. */ + break; + if (l == NULL) + { + l = (struct alignlist *) malloc (sizeof (struct alignlist)); + if (l == NULL) + { + free (result); + return NULL; + } + l->next = _aligned_blocks; + _aligned_blocks = l; + } + l->exact = result; + result = l->aligned = (char *) result + alignment - adj; + } + + return result; +} + +#endif /* Not DJGPP v1 */ +/* Allocate memory on a page boundary. + Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this library; see the file COPYING.LIB. If +not, write to the Free Software Foundation, Inc., 675 Mass Ave, +Cambridge, MA 02139, USA. + + The author may be reached (Email) at the address mike@ai.mit.edu, + or (US mail) as Mike Haertel c/o Free Software Foundation. */ + +#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC) + +/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition + on MSDOS, where it conflicts with a system header file. */ + +#define ELIDE_VALLOC + +#endif + +#ifndef ELIDE_VALLOC + +#if defined (__GNU_LIBRARY__) || defined (_LIBC) +#include <stddef.h> +#include <sys/cdefs.h> +extern size_t __getpagesize __P ((void)); +#else +#include "getpagesize.h" +#define __getpagesize() getpagesize() +#endif + +#ifndef _MALLOC_INTERNAL +#define _MALLOC_INTERNAL +#include <malloc.h> +#endif + +static __malloc_size_t pagesize; + +__ptr_t +valloc (size) + __malloc_size_t size; +{ + if (pagesize == 0) + pagesize = __getpagesize (); + + return memalign (pagesize, size); +} + +#endif /* Not ELIDE_VALLOC. */ |