diff options
| author | Akinori Ito <aito@eie.yz.yamagata-u.ac.jp> | 2001-11-08 05:14:08 +0000 | 
|---|---|---|
| committer | Akinori Ito <aito@eie.yz.yamagata-u.ac.jp> | 2001-11-08 05:14:08 +0000 | 
| commit | 68a07bf03b7624c9924065cce9ffa45497225834 (patch) | |
| tree | c2adb06a909a8594445e4a3f8587c4bad46e3ecd /gc/include | |
| download | w3m-68a07bf03b7624c9924065cce9ffa45497225834.tar.gz w3m-68a07bf03b7624c9924065cce9ffa45497225834.zip | |
Initial revision
Diffstat (limited to 'gc/include')
| -rw-r--r-- | gc/include/backptr.h | 56 | ||||
| -rw-r--r-- | gc/include/cord.h | 327 | ||||
| -rw-r--r-- | gc/include/ec.h | 70 | ||||
| -rw-r--r-- | gc/include/gc.h | 754 | ||||
| -rw-r--r-- | gc/include/gc_alloc.h | 380 | ||||
| -rw-r--r-- | gc/include/gc_cpp.h | 290 | ||||
| -rw-r--r-- | gc/include/gc_inl.h | 103 | ||||
| -rw-r--r-- | gc/include/gc_inline.h | 1 | ||||
| -rw-r--r-- | gc/include/gc_typed.h | 91 | ||||
| -rw-r--r-- | gc/include/javaxfc.h | 41 | ||||
| -rw-r--r-- | gc/include/leak_detector.h | 7 | ||||
| -rw-r--r-- | gc/include/new_gc_alloc.h | 456 | ||||
| -rw-r--r-- | gc/include/private/cord_pos.h | 118 | ||||
| -rw-r--r-- | gc/include/private/gc_hdrs.h | 135 | ||||
| -rw-r--r-- | gc/include/private/gc_priv.h | 1748 | ||||
| -rw-r--r-- | gc/include/private/gcconfig.h | 1099 | ||||
| -rw-r--r-- | gc/include/weakpointer.h | 221 | 
17 files changed, 5897 insertions, 0 deletions
| diff --git a/gc/include/backptr.h b/gc/include/backptr.h new file mode 100644 index 0000000..d34224e --- /dev/null +++ b/gc/include/backptr.h @@ -0,0 +1,56 @@ +/* + * This is a simple API to implement pointer back tracing, i.e. + * to answer questions such as "who is pointing to this" or + * "why is this object being retained by the collector" + * + * This API assumes that we have an ANSI C compiler. + * + * Most of these calls yield useful information on only after + * a garbage collection.  Usually the client will first force + * a full collection and then gather information, preferably + * before much intervening allocation. + * + * The implementation of the interface is only about 99.9999% + * correct.  It is intended to be good enough for profiling, + * but is not intended to be used with production code. + * + * Results are likely to be much more useful if all allocation is + * accomplished through the debugging allocators. + * + * The implementation idea is due to A. Demers. + */ + +/* Store information about the object referencing dest in *base_p     */ +/* and *offset_p.                                                     */ +/* If multiple objects or roots point to dest, the one reported	      */ +/* will be the last on used by the garbage collector to trace the     */ +/* object.							      */ +/*   source is root ==> *base_p = address, *offset_p = 0	      */ +/*   source is heap object ==> *base_p != 0, *offset_p = offset       */ +/*   Returns 1 on success, 0 if source couldn't be determined.        */ +/* Dest can be any address within a heap object.                      */ +typedef enum {  GC_UNREFERENCED, /* No refence info available.		*/ +		GC_NO_SPACE,	/* Dest not allocated with debug alloc  */ +		GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */ +		GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */ +		GC_FINALIZER_REFD /* Finalizable and hence accessible.  */ +} GC_ref_kind; + +GC_ref_kind GC_get_back_ptr_info(void *dest, void **base_p, size_t *offset_p); + +/* Generate a random heap address.            */ +/* The resulting address is in the heap, but  */ +/* not necessarily inside a valid object.     */ +void * GC_generate_random_heap_address(void); + +/* Generate a random address inside a valid marked heap object. */ +void * GC_generate_random_valid_address(void); + +/* Force a garbage collection and generate a backtrace from a */ +/* random heap address.                                       */ +/* This uses the GC logging mechanism (GC_printf) to produce  */ +/* output.  It can often be called from a debugger.  The      */ +/* source in dbg_mlc.c also serves as a sample client.	      */ +void GC_generate_random_backtrace(void); + + diff --git a/gc/include/cord.h b/gc/include/cord.h new file mode 100644 index 0000000..584112f --- /dev/null +++ b/gc/include/cord.h @@ -0,0 +1,327 @@ +/*  + * Copyright (c) 1993-1994 by Xerox Corporation.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + * + * Author: Hans-J. Boehm (boehm@parc.xerox.com) + */ +/* Boehm, October 5, 1995 4:20 pm PDT */ +  +/* + * Cords are immutable character strings.  A number of operations + * on long cords are much more efficient than their strings.h counterpart. + * In particular, concatenation takes constant time independent of the length + * of the arguments.  (Cords are represented as trees, with internal + * nodes representing concatenation and leaves consisting of either C + * strings or a functional description of the string.) + * + * The following are reasonable applications of cords.  They would perform + * unacceptably if C strings were used: + * - A compiler that produces assembly language output by repeatedly + *   concatenating instructions onto a cord representing the output file. + * - A text editor that converts the input file to a cord, and then + *   performs editing operations by producing a new cord representing + *   the file after echa character change (and keeping the old ones in an + *   edit history) + * + * For optimal performance, cords should be built by + * concatenating short sections. + * This interface is designed for maximum compatibility with C strings. + * ASCII NUL characters may be embedded in cords using CORD_from_fn. + * This is handled correctly, but CORD_to_char_star will produce a string + * with embedded NULs when given such a cord.  + * + * This interface is fairly big, largely for performance reasons. + * The most basic constants and functions: + * + * CORD - the type fo a cord; + * CORD_EMPTY - empty cord; + * CORD_len(cord) - length of a cord; + * CORD_cat(cord1,cord2) - concatenation of two cords; + * CORD_substr(cord, start, len) - substring (or subcord); + * CORD_pos i;  CORD_FOR(i, cord) {  ... CORD_pos_fetch(i) ... } - + *    examine each character in a cord.  CORD_pos_fetch(i) is the char. + * CORD_fetch(int i) - Retrieve i'th character (slowly). + * CORD_cmp(cord1, cord2) - compare two cords. + * CORD_from_file(FILE * f) - turn a read-only file into a cord. + * CORD_to_char_star(cord) - convert to C string. + *   (Non-NULL C constant strings are cords.) + * CORD_printf (etc.) - cord version of printf. Use %r for cords. + */ +# ifndef CORD_H + +# define CORD_H +# include <stddef.h> +# include <stdio.h> +/* Cords have type const char *.  This is cheating quite a bit, and not	*/ +/* 100% portable.  But it means that nonempty character string		*/ +/* constants may be used as cords directly, provided the string is	*/ +/* never modified in place.  The empty cord is represented by, and	*/ +/* can be written as, 0.						*/ + +typedef const char * CORD; + +/* An empty cord is always represented as nil 	*/ +# define CORD_EMPTY 0 + +/* Is a nonempty cord represented as a C string? */ +#define CORD_IS_STRING(s) (*(s) != '\0') + +/* Concatenate two cords.  If the arguments are C strings, they may 	*/ +/* not be subsequently altered.						*/ +CORD CORD_cat(CORD x, CORD y); + +/* Concatenate a cord and a C string with known length.  Except for the	*/ +/* empty string case, this is a special case of CORD_cat.  Since the	*/ +/* length is known, it can be faster.					*/ +/* The string y is shared with the resulting CORD.  Hence it should	*/ +/* not be altered by the caller.					*/ +CORD CORD_cat_char_star(CORD x, const char * y, size_t leny); + +/* Compute the length of a cord */ +size_t CORD_len(CORD x); + +/* Cords may be represented by functions defining the ith character */ +typedef char (* CORD_fn)(size_t i, void * client_data); + +/* Turn a functional description into a cord. 	*/ +CORD CORD_from_fn(CORD_fn fn, void * client_data, size_t len); + +/* Return the substring (subcord really) of x with length at most n,	*/ +/* starting at position i.  (The initial character has position 0.)	*/ +CORD CORD_substr(CORD x, size_t i, size_t n); + +/* Return the argument, but rebalanced to allow more efficient   	*/ +/* character retrieval, substring operations, and comparisons.		*/ +/* This is useful only for cords that were built using repeated 	*/ +/* concatenation.  Guarantees log time access to the result, unless	*/ +/* x was obtained through a large number of repeated substring ops	*/ +/* or the embedded functional descriptions take longer to evaluate.	*/ +/* May reallocate significant parts of the cord.  The argument is not	*/ +/* modified; only the result is balanced.				*/ +CORD CORD_balance(CORD x); + +/* The following traverse a cord by applying a function to each 	*/ +/* character.  This is occasionally appropriate, especially where	*/ +/* speed is crucial.  But, since C doesn't have nested functions,	*/ +/* clients of this sort of traversal are clumsy to write.  Consider	*/ +/* the functions that operate on cord positions instead.		*/ + +/* Function to iteratively apply to individual characters in cord.	*/ +typedef int (* CORD_iter_fn)(char c, void * client_data); + +/* Function to apply to substrings of a cord.  Each substring is a 	*/ +/* a C character string, not a general cord.				*/ +typedef int (* CORD_batched_iter_fn)(const char * s, void * client_data); +# define CORD_NO_FN ((CORD_batched_iter_fn)0) + +/* Apply f1 to each character in the cord, in ascending order,		*/ +/* starting at position i. If						*/ +/* f2 is not CORD_NO_FN, then multiple calls to f1 may be replaced by	*/ +/* a single call to f2.  The parameter f2 is provided only to allow	*/ +/* some optimization by the client.  This terminates when the right	*/ +/* end of this string is reached, or when f1 or f2 return != 0.  In the	*/ +/* latter case CORD_iter returns != 0.  Otherwise it returns 0.		*/ +/* The specified value of i must be < CORD_len(x).			*/ +int CORD_iter5(CORD x, size_t i, CORD_iter_fn f1, +	       CORD_batched_iter_fn f2, void * client_data); + +/* A simpler version that starts at 0, and without f2:	*/ +int CORD_iter(CORD x, CORD_iter_fn f1, void * client_data); +# define CORD_iter(x, f1, cd) CORD_iter5(x, 0, f1, CORD_NO_FN, cd) + +/* Similar to CORD_iter5, but end-to-beginning.	No provisions for	*/ +/* CORD_batched_iter_fn.						*/ +int CORD_riter4(CORD x, size_t i, CORD_iter_fn f1, void * client_data); + +/* A simpler version that starts at the end:	*/ +int CORD_riter(CORD x, CORD_iter_fn f1, void * client_data); + +/* Functions that operate on cord positions.  The easy way to traverse	*/ +/* cords.  A cord position is logically a pair consisting of a cord	*/ +/* and an index into that cord.  But it is much faster to retrieve a	*/ +/* charcter based on a position than on an index.  Unfortunately,	*/ +/* positions are big (order of a few 100 bytes), so allocate them with	*/ +/* caution.								*/ +/* Things in cord_pos.h should be treated as opaque, except as		*/ +/* described below.  Also note that					*/ +/* CORD_pos_fetch, CORD_next and CORD_prev have both macro and function	*/ +/* definitions.  The former may evaluate their argument more than once. */ +# include "private/cord_pos.h" + +/* +	Visible definitions from above: +	 +	typedef <OPAQUE but fairly big> CORD_pos[1]; +	 +	* Extract the cord from a position: +	CORD CORD_pos_to_cord(CORD_pos p); +	 +	* Extract the current index from a position: +	size_t CORD_pos_to_index(CORD_pos p); +	 +	* Fetch the character located at the given position: +	char CORD_pos_fetch(CORD_pos p); +	 +	* Initialize the position to refer to the given cord and index. +	* Note that this is the most expensive function on positions: +	void CORD_set_pos(CORD_pos p, CORD x, size_t i); +	 +	* Advance the position to the next character. +	* P must be initialized and valid. +	* Invalidates p if past end: +	void CORD_next(CORD_pos p); +	 +	* Move the position to the preceding character. +	* P must be initialized and valid. +	* Invalidates p if past beginning: +	void CORD_prev(CORD_pos p); +	 +	* Is the position valid, i.e. inside the cord? +	int CORD_pos_valid(CORD_pos p); +*/ +# define CORD_FOR(pos, cord) \ +    for (CORD_set_pos(pos, cord, 0); CORD_pos_valid(pos); CORD_next(pos)) + +			 +/* An out of memory handler to call.  May be supplied by client.	*/ +/* Must not return.							*/ +extern void (* CORD_oom_fn)(void); + +/* Dump the representation of x to stdout in an implementation defined	*/ +/* manner.  Intended for debugging only.				*/ +void CORD_dump(CORD x); + +/* The following could easily be implemented by the client.  They are	*/ +/* provided in cordxtra.c for convenience.				*/ + +/* Concatenate a character to the end of a cord.	*/ +CORD CORD_cat_char(CORD x, char c); + +/* Concatenate n cords.	*/ +CORD CORD_catn(int n, /* CORD */ ...); + +/* Return the character in CORD_substr(x, i, 1)  	*/ +char CORD_fetch(CORD x, size_t i); + +/* Return < 0, 0, or > 0, depending on whether x < y, x = y, x > y	*/ +int CORD_cmp(CORD x, CORD y); + +/* A generalization that takes both starting positions for the 		*/ +/* comparison, and a limit on the number of characters to be compared.	*/ +int CORD_ncmp(CORD x, size_t x_start, CORD y, size_t y_start, size_t len); + +/* Find the first occurrence of s in x at position start or later.	*/ +/* Return the position of the first character of s in x, or		*/ +/* CORD_NOT_FOUND if there is none.					*/ +size_t CORD_str(CORD x, size_t start, CORD s); + +/* Return a cord consisting of i copies of (possibly NUL) c.  Dangerous	*/ +/* in conjunction with CORD_to_char_star.				*/ +/* The resulting representation takes constant space, independent of i.	*/ +CORD CORD_chars(char c, size_t i); +# define CORD_nul(i) CORD_chars('\0', (i)) + +/* Turn a file into cord.  The file must be seekable.  Its contents	*/ +/* must remain constant.  The file may be accessed as an immediate	*/ +/* result of this call and/or as a result of subsequent accesses to 	*/ +/* the cord.  Short files are likely to be immediately read, but	*/ +/* long files are likely to be read on demand, possibly relying on 	*/ +/* stdio for buffering.							*/ +/* We must have exclusive access to the descriptor f, i.e. we may	*/ +/* read it at any time, and expect the file pointer to be		*/ +/* where we left it.  Normally this should be invoked as		*/ +/* CORD_from_file(fopen(...))						*/ +/* CORD_from_file arranges to close the file descriptor when it is no	*/ +/* longer needed (e.g. when the result becomes inaccessible).		*/  +/* The file f must be such that ftell reflects the actual character	*/ +/* position in the file, i.e. the number of characters that can be 	*/ +/* or were read with fread.  On UNIX systems this is always true.  On	*/ +/* MS Windows systems, f must be opened in binary mode.			*/ +CORD CORD_from_file(FILE * f); + +/* Equivalent to the above, except that the entire file will be read	*/ +/* and the file pointer will be closed immediately.			*/ +/* The binary mode restriction from above does not apply.		*/ +CORD CORD_from_file_eager(FILE * f); + +/* Equivalent to the above, except that the file will be read on demand.*/ +/* The binary mode restriction applies.					*/ +CORD CORD_from_file_lazy(FILE * f); + +/* Turn a cord into a C string.	The result shares no structure with	*/ +/* x, and is thus modifiable.						*/ +char * CORD_to_char_star(CORD x); + +/* Turn a C string into a CORD.  The C string is copied, and so may	*/ +/* subsequently be modified.						*/ +CORD CORD_from_char_star(const char *s); + +/* Identical to the above, but the result may share structure with	*/ +/* the argument and is thus not modifiable.				*/ +const char * CORD_to_const_char_star(CORD x);  + +/* Write a cord to a file, starting at the current position.  No	*/ +/* trailing NULs are newlines are added.				*/ +/* Returns EOF if a write error occurs, 1 otherwise.			*/ +int CORD_put(CORD x, FILE * f); + +/* "Not found" result for the following two functions.			*/ +# define CORD_NOT_FOUND ((size_t)(-1)) + +/* A vague analog of strchr.  Returns the position (an integer, not	*/ +/* a pointer) of the first occurrence of (char) c inside x at position 	*/ +/* i or later. The value i must be < CORD_len(x).			*/ +size_t CORD_chr(CORD x, size_t i, int c); + +/* A vague analog of strrchr.  Returns index of the last occurrence	*/ +/* of (char) c inside x at position i or earlier. The value i		*/ +/* must be < CORD_len(x).						*/ +size_t CORD_rchr(CORD x, size_t i, int c); + + +/* The following are also not primitive, but are implemented in 	*/ +/* cordprnt.c.  They provide functionality similar to the ANSI C	*/ +/* functions with corresponding names, but with the following		*/ +/* additions and changes:						*/ +/* 1. A %r conversion specification specifies a CORD argument.  Field	*/ +/*    width, precision, etc. have the same semantics as for %s.		*/ +/*    (Note that %c,%C, and %S were already taken.)			*/ +/* 2. The format string is represented as a CORD.		        */ +/* 3. CORD_sprintf and CORD_vsprintf assign the result through the 1st	*/ 	/*    argument.	Unlike their ANSI C versions, there is no need to guess	*/ +/*    the correct buffer size.						*/ +/* 4. Most of the conversions are implement through the native 		*/ +/*    vsprintf.  Hence they are usually no faster, and 			*/ +/*    idiosyncracies of the native printf are preserved.  However,	*/ +/*    CORD arguments to CORD_sprintf and CORD_vsprintf are NOT copied;	*/ +/*    the result shares the original structure.  This may make them	*/ +/*    very efficient in some unusual applications.			*/ +/*    The format string is copied.					*/ +/* All functions return the number of characters generated or -1 on	*/ +/* error.  This complies with the ANSI standard, but is inconsistent	*/ +/* with some older implementations of sprintf.				*/ + +/* The implementation of these is probably less portable than the rest	*/ +/* of this package.							*/ + +#ifndef CORD_NO_IO + +#include <stdarg.h> + +int CORD_sprintf(CORD * out, CORD format, ...); +int CORD_vsprintf(CORD * out, CORD format, va_list args); +int CORD_fprintf(FILE * f, CORD format, ...); +int CORD_vfprintf(FILE * f, CORD format, va_list args); +int CORD_printf(CORD format, ...); +int CORD_vprintf(CORD format, va_list args); + +#endif /* CORD_NO_IO */ + +# endif /* CORD_H */ diff --git a/gc/include/ec.h b/gc/include/ec.h new file mode 100644 index 0000000..c829b83 --- /dev/null +++ b/gc/include/ec.h @@ -0,0 +1,70 @@ +# ifndef EC_H +# define EC_H + +# ifndef CORD_H +#  include "cord.h" +# endif + +/* Extensible cords are strings that may be destructively appended to.	*/ +/* They allow fast construction of cords from characters that are	*/ +/* being read from a stream.						*/ +/* + * A client might look like: + * + *	{ + *	    CORD_ec x; + *	    CORD result; + *	    char c; + *	    FILE *f; + * + *	    ... + *	    CORD_ec_init(x); + *	    while(...) { + *		c = getc(f); + *		... + *		CORD_ec_append(x, c); + *	    } + *	    result = CORD_balance(CORD_ec_to_cord(x)); + * + * If a C string is desired as the final result, the call to CORD_balance + * may be replaced by a call to CORD_to_char_star. + */ + +# ifndef CORD_BUFSZ +#   define CORD_BUFSZ 128 +# endif + +typedef struct CORD_ec_struct { +    CORD ec_cord; +    char * ec_bufptr; +    char ec_buf[CORD_BUFSZ+1]; +} CORD_ec[1]; + +/* This structure represents the concatenation of ec_cord with		*/ +/* ec_buf[0 ... (ec_bufptr-ec_buf-1)]					*/ + +/* Flush the buffer part of the extended chord into ec_cord.	*/ +/* Note that this is almost the only real function, and it is	*/ +/* implemented in 6 lines in cordxtra.c				*/ +void CORD_ec_flush_buf(CORD_ec x); +       +/* Convert an extensible cord to a cord. */ +# define CORD_ec_to_cord(x) (CORD_ec_flush_buf(x), (x)[0].ec_cord) + +/* Initialize an extensible cord. */ +# define CORD_ec_init(x) ((x)[0].ec_cord = 0, (x)[0].ec_bufptr = (x)[0].ec_buf) + +/* Append a character to an extensible cord.	*/ +# define CORD_ec_append(x, c) \ +    {  \ +	if ((x)[0].ec_bufptr == (x)[0].ec_buf + CORD_BUFSZ) { \ +	  	CORD_ec_flush_buf(x); \ +	} \ +	*((x)[0].ec_bufptr)++ = (c); \ +    } + +/* Append a cord to an extensible cord.  Structure remains shared with 	*/ +/* original.								*/ +void CORD_ec_append_cord(CORD_ec x, CORD s); + +# endif /* EC_H */ diff --git a/gc/include/gc.h b/gc/include/gc.h new file mode 100644 index 0000000..3061409 --- /dev/null +++ b/gc/include/gc.h @@ -0,0 +1,754 @@ +/*  + * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers + * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved. + * Copyright 1996 by Silicon Graphics.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ + +/* + * Note that this defines a large number of tuning hooks, which can + * safely be ignored in nearly all cases.  For normal use it suffices + * to call only GC_MALLOC and perhaps GC_REALLOC. + * For better performance, also look at GC_MALLOC_ATOMIC, and + * GC_enable_incremental.  If you need an action to be performed + * immediately before an object is collected, look at GC_register_finalizer. + * If you are using Solaris threads, look at the end of this file. + * Everything else is best ignored unless you encounter performance + * problems. + */ +  +#ifndef _GC_H + +# define _GC_H +# define __GC +# include <stddef.h> + +#if defined(__CYGWIN32__) && defined(GC_USE_DLL) +#include "libgc_globals.h" +#endif + +#if defined(_MSC_VER) && defined(_DLL) +# ifdef GC_BUILD +#   define GC_API __declspec(dllexport) +# else +#   define GC_API __declspec(dllimport) +# endif +#endif + +#if defined(__WATCOMC__) && defined(GC_DLL) +# ifdef GC_BUILD +#   define GC_API extern __declspec(dllexport) +# else +#   define GC_API extern __declspec(dllimport) +# endif +#endif + +#ifndef GC_API +#define GC_API extern +#endif + +# if defined(__STDC__) || defined(__cplusplus) +#   define GC_PROTO(args) args +    typedef void * GC_PTR; +# else +#   define GC_PROTO(args) () +    typedef char * GC_PTR; +#  endif + +# ifdef __cplusplus +    extern "C" { +# endif + + +/* Define word and signed_word to be unsigned and signed types of the 	*/ +/* size as char * or void *.  There seems to be no way to do this	*/ +/* even semi-portably.  The following is probably no better/worse 	*/ +/* than almost anything else.						*/ +/* The ANSI standard suggests that size_t and ptr_diff_t might be 	*/ +/* better choices.  But those appear to have incorrect definitions	*/ +/* on may systems.  Notably "typedef int size_t" seems to be both	*/ +/* frequent and WRONG.							*/ +typedef unsigned long GC_word; +typedef long GC_signed_word; + +/* Public read-only variables */ + +GC_API GC_word GC_gc_no;/* Counter incremented per collection.  	*/ +			/* Includes empty GCs at startup.		*/ +			 + +/* Public R/W variables */ + +GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested)); +			/* When there is insufficient memory to satisfy */ +			/* an allocation request, we return		*/ +			/* (*GC_oom_fn)().  By default this just	*/ +			/* returns 0.					*/ +			/* If it returns, it must return 0 or a valid	*/ +			/* pointer to a previously allocated heap 	*/ +			/* object.					*/ + +GC_API int GC_find_leak; +			/* Do not actually garbage collect, but simply	*/ +			/* report inaccessible memory that was not	*/ +			/* deallocated with GC_free.  Initial value	*/ +			/* is determined by FIND_LEAK macro.		*/ + +GC_API int GC_quiet;	/* Disable statistics output.  Only matters if	*/ +			/* collector has been compiled with statistics	*/ +			/* enabled.  This involves a performance cost,	*/ +			/* and is thus not the default.			*/ + +GC_API int GC_finalize_on_demand; +			/* If nonzero, finalizers will only be run in 	*/ +			/* response to an eplit GC_invoke_finalizers	*/ +			/* call.  The default is determined by whether	*/ +			/* the FINALIZE_ON_DEMAND macro is defined	*/ +			/* when the collector is built.			*/ + +GC_API int GC_java_finalization; +			/* Mark objects reachable from finalizable 	*/ +			/* objects in a separate postpass.  This makes	*/ +			/* it a bit safer to use non-topologically-	*/ +			/* ordered finalization.  Default value is	*/ +			/* determined by JAVA_FINALIZATION macro.	*/ + +GC_API int GC_dont_gc;	/* Dont collect unless explicitly requested, e.g. */ +			/* because it's not safe.			  */ + +GC_API int GC_dont_expand; +			/* Dont expand heap unless explicitly requested */ +			/* or forced to.				*/ + +GC_API int GC_full_freq;    /* Number of partial collections between	*/ +			    /* full collections.  Matters only if	*/ +			    /* GC_incremental is set.			*/ +			 +GC_API GC_word GC_non_gc_bytes; +			/* Bytes not considered candidates for collection. */ +			/* Used only to control scheduling of collections. */ + +GC_API GC_word GC_free_space_divisor; +			/* We try to make sure that we allocate at 	*/ +			/* least N/GC_free_space_divisor bytes between	*/ +			/* collections, where N is the heap size plus	*/ +			/* a rough estimate of the root set size.	*/ +			/* Initially, GC_free_space_divisor = 4.	*/ +			/* Increasing its value will use less space	*/ +			/* but more collection time.  Decreasing it	*/ +			/* will appreciably decrease collection time	*/ +			/* at the expense of space.			*/ +			/* GC_free_space_divisor = 1 will effectively	*/ +			/* disable collections.				*/ + +GC_API GC_word GC_max_retries; +			/* The maximum number of GCs attempted before	*/ +			/* reporting out of memory after heap		*/ +			/* expansion fails.  Initially 0.		*/ +			 + +GC_API char *GC_stackbottom;	/* Cool end of user stack.		*/ +				/* May be set in the client prior to	*/ +				/* calling any GC_ routines.  This	*/ +				/* avoids some overhead, and 		*/ +				/* potentially some signals that can 	*/ +				/* confuse debuggers.  Otherwise the	*/ +				/* collector attempts to set it 	*/ +				/* automatically.			*/ +				/* For multithreaded code, this is the	*/ +				/* cold end of the stack for the	*/ +				/* primordial thread.			*/ +				 +/* Public procedures */ +/* + * general purpose allocation routines, with roughly malloc calling conv. + * The atomic versions promise that no relevant pointers are contained + * in the object.  The nonatomic versions guarantee that the new object + * is cleared.  GC_malloc_stubborn promises that no changes to the object + * will occur after GC_end_stubborn_change has been called on the + * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object + * that is scanned for pointers to collectable objects, but is not itself + * collectable.  GC_malloc_uncollectable and GC_free called on the resulting + * object implicitly update GC_non_gc_bytes appropriately. + */ +GC_API GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes)); +GC_API GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes)); +GC_API GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes)); +GC_API GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes)); + +/* The following is only defined if the library has been suitably	*/ +/* compiled:								*/ +GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes)); + +/* Explicitly deallocate an object.  Dangerous if used incorrectly.     */ +/* Requires a pointer to the base of an object.				*/ +/* If the argument is stubborn, it should not be changeable when freed. */ +/* An object should not be enable for finalization when it is 		*/ +/* explicitly deallocated.						*/ +/* GC_free(0) is a no-op, as required by ANSI C for free.		*/ +GC_API void GC_free GC_PROTO((GC_PTR object_addr)); + +/* + * Stubborn objects may be changed only if the collector is explicitly informed. + * The collector is implicitly informed of coming change when such + * an object is first allocated.  The following routines inform the + * collector that an object will no longer be changed, or that it will + * once again be changed.  Only nonNIL pointer stores into the object + * are considered to be changes.  The argument to GC_end_stubborn_change + * must be exacly the value returned by GC_malloc_stubborn or passed to + * GC_change_stubborn.  (In the second case it may be an interior pointer + * within 512 bytes of the beginning of the objects.) + * There is a performance penalty for allowing more than + * one stubborn object to be changed at once, but it is acceptable to + * do so.  The same applies to dropping stubborn objects that are still + * changeable. + */ +GC_API void GC_change_stubborn GC_PROTO((GC_PTR)); +GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR)); + +/* Return a pointer to the base (lowest address) of an object given	*/ +/* a pointer to a location within the object.				*/ +/* Return 0 if displaced_pointer doesn't point to within a valid	*/ +/* object.								*/ +GC_API GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer)); + +/* Given a pointer to the base of an object, return its size in bytes.	*/ +/* The returned size may be slightly larger than what was originally	*/ +/* requested.								*/ +GC_API size_t GC_size GC_PROTO((GC_PTR object_addr)); + +/* For compatibility with C library.  This is occasionally faster than	*/ +/* a malloc followed by a bcopy.  But if you rely on that, either here	*/ +/* or with the standard C library, your code is broken.  In my		*/ +/* opinion, it shouldn't have been invented, but now we're stuck. -HB	*/ +/* The resulting object has the same kind as the original.		*/ +/* If the argument is stubborn, the result will have changes enabled.	*/ +/* It is an error to have changes enabled for the original object.	*/ +/* Follows ANSI comventions for NULL old_object.			*/ +GC_API GC_PTR GC_realloc +	GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes)); +				    +/* Explicitly increase the heap size.	*/ +/* Returns 0 on failure, 1 on success.  */ +GC_API int GC_expand_hp GC_PROTO((size_t number_of_bytes)); + +/* Limit the heap size to n bytes.  Useful when you're debugging, 	*/ +/* especially on systems that don't handle running out of memory well.	*/ +/* n == 0 ==> unbounded.  This is the default.				*/ +GC_API void GC_set_max_heap_size GC_PROTO((GC_word n)); + +/* Inform the collector that a certain section of statically allocated	*/ +/* memory contains no pointers to garbage collected memory.  Thus it 	*/ +/* need not be scanned.  This is sometimes important if the application */ +/* maps large read/write files into the address space, which could be	*/ +/* mistaken for dynamic library data segments on some systems.		*/ +GC_API void GC_exclude_static_roots GC_PROTO((GC_PTR start, GC_PTR finish)); + +/* Clear the set of root segments.  Wizards only. */ +GC_API void GC_clear_roots GC_PROTO((void)); + +/* Add a root segment.  Wizards only. */ +GC_API void GC_add_roots GC_PROTO((char * low_address, +				   char * high_address_plus_1)); + +/* Add a displacement to the set of those considered valid by the	*/ +/* collector.  GC_register_displacement(n) means that if p was returned */ +/* by GC_malloc, then (char *)p + n will be considered to be a valid	*/ +/* pointer to n.  N must be small and less than the size of p.		*/ +/* (All pointers to the interior of objects from the stack are		*/ +/* considered valid in any case.  This applies to heap objects and	*/ +/* static data.)							*/ +/* Preferably, this should be called before any other GC procedures.	*/ +/* Calling it later adds to the probability of excess memory		*/ +/* retention.								*/ +/* This is a no-op if the collector was compiled with recognition of	*/ +/* arbitrary interior pointers enabled, which is now the default.	*/ +GC_API void GC_register_displacement GC_PROTO((GC_word n)); + +/* The following version should be used if any debugging allocation is	*/ +/* being done.								*/ +GC_API void GC_debug_register_displacement GC_PROTO((GC_word n)); + +/* Explicitly trigger a full, world-stop collection. 	*/ +GC_API void GC_gcollect GC_PROTO((void)); + +/* Trigger a full world-stopped collection.  Abort the collection if 	*/ +/* and when stop_func returns a nonzero value.  Stop_func will be 	*/ +/* called frequently, and should be reasonably fast.  This works even	*/ +/* if virtual dirty bits, and hence incremental collection is not 	*/ +/* available for this architecture.  Collections can be aborted faster	*/ +/* than normal pause times for incremental collection.  However,	*/ +/* aborted collections do no useful work; the next collection needs	*/ +/* to start from the beginning.						*/ +/* Return 0 if the collection was aborted, 1 if it succeeded.		*/ +typedef int (* GC_stop_func) GC_PROTO((void)); +GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func)); + +/* Return the number of bytes in the heap.  Excludes collector private	*/ +/* data structures.  Includes empty blocks and fragmentation loss.	*/ +/* Includes some pages that were allocated but never written.		*/ +GC_API size_t GC_get_heap_size GC_PROTO((void)); + +/* Return the number of bytes allocated since the last collection.	*/ +GC_API size_t GC_get_bytes_since_gc GC_PROTO((void)); + +/* Enable incremental/generational collection.	*/ +/* Not advisable unless dirty bits are 		*/ +/* available or most heap objects are		*/ +/* pointerfree(atomic) or immutable.		*/ +/* Don't use in leak finding mode.		*/ +/* Ignored if GC_dont_gc is true.		*/ +GC_API void GC_enable_incremental GC_PROTO((void)); + +/* Perform some garbage collection work, if appropriate.	*/ +/* Return 0 if there is no more work to be done.		*/ +/* Typically performs an amount of work corresponding roughly	*/ +/* to marking from one page.  May do more work if further	*/ +/* progress requires it, e.g. if incremental collection is	*/ +/* disabled.  It is reasonable to call this in a wait loop	*/ +/* until it returns 0.						*/ +GC_API int GC_collect_a_little GC_PROTO((void)); + +/* Allocate an object of size lb bytes.  The client guarantees that	*/ +/* as long as the object is live, it will be referenced by a pointer	*/ +/* that points to somewhere within the first 256 bytes of the object.	*/ +/* (This should normally be declared volatile to prevent the compiler	*/ +/* from invalidating this assertion.)  This routine is only useful	*/ +/* if a large array is being allocated.  It reduces the chance of 	*/ +/* accidentally retaining such an array as a result of scanning an	*/ +/* integer that happens to be an address inside the array.  (Actually,	*/ +/* it reduces the chance of the allocator not finding space for such	*/ +/* an array, since it will try hard to avoid introducing such a false	*/ +/* reference.)  On a SunOS 4.X or MS Windows system this is recommended */ +/* for arrays likely to be larger than 100K or so.  For other systems,	*/ +/* or if the collector is not configured to recognize all interior	*/ +/* pointers, the threshold is normally much higher.			*/ +GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb)); +GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb)); + +#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720 +#   define GC_ADD_CALLER +#   define GC_RETURN_ADDR (GC_word)__return_address +#endif + +#ifdef GC_ADD_CALLER +#  define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__ +#  define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int +#else +#  define GC_EXTRAS __FILE__, __LINE__ +#  define GC_EXTRA_PARAMS char * descr_string, int descr_int +#endif + +/* Debugging (annotated) allocation.  GC_gcollect will check 		*/ +/* objects allocated in this way for overwrites, etc.			*/ +GC_API GC_PTR GC_debug_malloc +	GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); +GC_API GC_PTR GC_debug_malloc_atomic +	GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); +GC_API GC_PTR GC_debug_malloc_uncollectable +	GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); +GC_API GC_PTR GC_debug_malloc_stubborn +	GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS)); +GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr)); +GC_API GC_PTR GC_debug_realloc +	GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes, +  		  GC_EXTRA_PARAMS)); +  			 	  +GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR)); +GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR)); +# ifdef GC_DEBUG +#   define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS) +#   define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS) +#   define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \ +							GC_EXTRAS) +#   define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS) +#   define GC_FREE(p) GC_debug_free(p) +#   define GC_REGISTER_FINALIZER(p, f, d, of, od) \ +	GC_debug_register_finalizer(p, f, d, of, od) +#   define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \ +	GC_debug_register_finalizer_ignore_self(p, f, d, of, od) +#   define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS); +#   define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p) +#   define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p) +#   define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \ +	GC_general_register_disappearing_link(link, GC_base(obj)) +#   define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n) +# else +#   define GC_MALLOC(sz) GC_malloc(sz) +#   define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz) +#   define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz) +#   define GC_REALLOC(old, sz) GC_realloc(old, sz) +#   define GC_FREE(p) GC_free(p) +#   define GC_REGISTER_FINALIZER(p, f, d, of, od) \ +	GC_register_finalizer(p, f, d, of, od) +#   define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \ +	GC_register_finalizer_ignore_self(p, f, d, of, od) +#   define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz) +#   define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p) +#   define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p) +#   define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \ +	GC_general_register_disappearing_link(link, obj) +#   define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n) +# endif +/* The following are included because they are often convenient, and	*/ +/* reduce the chance for a misspecifed size argument.  But calls may	*/ +/* expand to something syntactically incorrect if t is a complicated	*/ +/* type expression.  							*/ +# define GC_NEW(t) (t *)GC_MALLOC(sizeof (t)) +# define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t)) +# define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t)) +# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t)) + +/* Finalization.  Some of these primitives are grossly unsafe.		*/ +/* The idea is to make them both cheap, and sufficient to build		*/ +/* a safer layer, closer to PCedar finalization.			*/ +/* The interface represents my conclusions from a long discussion	*/ +/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, 		*/ +/* Christian Jacobi, and Russ Atkinson.  It's not perfect, and		*/ +/* probably nobody else agrees with it.	    Hans-J. Boehm  3/13/92	*/ +typedef void (*GC_finalization_proc) +  	GC_PROTO((GC_PTR obj, GC_PTR client_data)); + +GC_API void GC_register_finalizer +    	GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, +		  GC_finalization_proc *ofn, GC_PTR *ocd)); +GC_API void GC_debug_register_finalizer +    	GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, +		  GC_finalization_proc *ofn, GC_PTR *ocd)); +	/* When obj is no longer accessible, invoke		*/ +	/* (*fn)(obj, cd).  If a and b are inaccessible, and	*/ +	/* a points to b (after disappearing links have been	*/ +	/* made to disappear), then only a will be		*/ +	/* finalized.  (If this does not create any new		*/ +	/* pointers to b, then b will be finalized after the	*/ +	/* next collection.)  Any finalizable object that	*/ +	/* is reachable from itself by following one or more	*/ +	/* pointers will not be finalized (or collected).	*/ +	/* Thus cycles involving finalizable objects should	*/ +	/* be avoided, or broken by disappearing links.		*/ +	/* All but the last finalizer registered for an object  */ +	/* is ignored.						*/ +	/* Finalization may be removed by passing 0 as fn.	*/ +	/* Finalizers are implicitly unregistered just before   */ +	/* they are invoked.					*/ +	/* The old finalizer and client data are stored in	*/ +	/* *ofn and *ocd.					*/  +	/* Fn is never invoked on an accessible object,		*/ +	/* provided hidden pointers are converted to real 	*/ +	/* pointers only if the allocation lock is held, and	*/ +	/* such conversions are not performed by finalization	*/ +	/* routines.						*/ +	/* If GC_register_finalizer is aborted as a result of	*/ +	/* a signal, the object may be left with no		*/ +	/* finalization, even if neither the old nor new	*/ +	/* finalizer were NULL.					*/ +	/* Obj should be the nonNULL starting address of an 	*/ +	/* object allocated by GC_malloc or friends.		*/ +	/* Note that any garbage collectable object referenced	*/ +	/* by cd will be considered accessible until the	*/ +	/* finalizer is invoked.				*/ + +/* Another versions of the above follow.  It ignores		*/ +/* self-cycles, i.e. pointers from a finalizable object to	*/ +/* itself.  There is a stylistic argument that this is wrong,	*/ +/* but it's unavoidable for C++, since the compiler may		*/ +/* silently introduce these.  It's also benign in that specific	*/ +/* case.							*/ +GC_API void GC_register_finalizer_ignore_self +	GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, +		  GC_finalization_proc *ofn, GC_PTR *ocd)); +GC_API void GC_debug_register_finalizer_ignore_self +	GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd, +		  GC_finalization_proc *ofn, GC_PTR *ocd)); + +/* The following routine may be used to break cycles between	*/ +/* finalizable objects, thus causing cyclic finalizable		*/ +/* objects to be finalized in the correct order.  Standard	*/ +/* use involves calling GC_register_disappearing_link(&p),	*/ +/* where p is a pointer that is not followed by finalization	*/ +/* code, and should not be considered in determining 		*/ +/* finalization order.						*/ +GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */)); +	/* Link should point to a field of a heap allocated 	*/ +	/* object obj.  *link will be cleared when obj is	*/ +	/* found to be inaccessible.  This happens BEFORE any	*/ +	/* finalization code is invoked, and BEFORE any		*/ +	/* decisions about finalization order are made.		*/ +	/* This is useful in telling the finalizer that 	*/ +	/* some pointers are not essential for proper		*/ +	/* finalization.  This may avoid finalization cycles.	*/ +	/* Note that obj may be resurrected by another		*/ +	/* finalizer, and thus the clearing of *link may	*/ +	/* be visible to non-finalization code.  		*/ +	/* There's an argument that an arbitrary action should  */ +	/* be allowed here, instead of just clearing a pointer. */ +	/* But this causes problems if that action alters, or 	*/ +	/* examines connectivity.				*/ +	/* Returns 1 if link was already registered, 0		*/ +	/* otherwise.						*/ +	/* Only exists for backward compatibility.  See below:	*/ +	 +GC_API int GC_general_register_disappearing_link +	GC_PROTO((GC_PTR * /* link */, GC_PTR obj)); +	/* A slight generalization of the above. *link is	*/ +	/* cleared when obj first becomes inaccessible.  This	*/ +	/* can be used to implement weak pointers easily and	*/ +	/* safely. Typically link will point to a location	*/ +	/* holding a disguised pointer to obj.  (A pointer 	*/ +	/* inside an "atomic" object is effectively  		*/ +	/* disguised.)   In this way soft			*/ +	/* pointers are broken before any object		*/ +	/* reachable from them are finalized.  Each link	*/ +	/* May be registered only once, i.e. with one obj	*/ +	/* value.  This was added after a long email discussion */ +	/* with John Ellis.					*/ +	/* Obj must be a pointer to the first word of an object */ +	/* we allocated.  It is unsafe to explicitly deallocate */ +	/* the object containing link.  Explicitly deallocating */ +	/* obj may or may not cause link to eventually be	*/ +	/* cleared.						*/ +GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */)); +	/* Returns 0 if link was not actually registered.	*/ +	/* Undoes a registration by either of the above two	*/ +	/* routines.						*/ + +/* Auxiliary fns to make finalization work correctly with displaced	*/ +/* pointers introduced by the debugging allocators.			*/ +GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data)); +GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data)); + +GC_API int GC_invoke_finalizers GC_PROTO((void)); +	/* Run finalizers for all objects that are ready to	*/ +	/* be finalized.  Return the number of finalizers	*/ +	/* that were run.  Normally this is also called		*/ +	/* implicitly during some allocations.	If		*/ +	/* GC-finalize_on_demand is nonzero, it must be called	*/ +	/* explicitly.						*/ + +/* GC_set_warn_proc can be used to redirect or filter warning messages.	*/ +/* p may not be a NULL pointer.						*/ +typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg)); +GC_API GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p)); +    /* Returns old warning procedure.	*/ +	 +/* The following is intended to be used by a higher level	*/ +/* (e.g. cedar-like) finalization facility.  It is expected	*/ +/* that finalization code will arrange for hidden pointers to	*/ +/* disappear.  Otherwise objects can be accessed after they	*/ +/* have been collected.						*/ +/* Note that putting pointers in atomic objects or in 		*/ +/* nonpointer slots of "typed" objects is equivalent to 	*/ +/* disguising them in this way, and may have other advantages.	*/ +# if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS) +    typedef GC_word GC_hidden_pointer; +#   define HIDE_POINTER(p) (~(GC_hidden_pointer)(p)) +#   define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p))) +    /* Converting a hidden pointer to a real pointer requires verifying	*/ +    /* that the object still exists.  This involves acquiring the  	*/ +    /* allocator lock to avoid a race with the collector.		*/ +# endif /* I_HIDE_POINTERS */ + +typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data)); +GC_API GC_PTR GC_call_with_alloc_lock +        	GC_PROTO((GC_fn_type fn, GC_PTR client_data)); + +/* Check that p and q point to the same object.  		*/ +/* Fail conspicuously if they don't.				*/ +/* Returns the first argument.  				*/ +/* Succeeds if neither p nor q points to the heap.		*/ +/* May succeed if both p and q point to between heap objects.	*/ +GC_API GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q)); + +/* Checked pointer pre- and post- increment operations.  Note that	*/ +/* the second argument is in units of bytes, not multiples of the	*/ +/* object size.  This should either be invoked from a macro, or the	*/ +/* call should be automatically generated.				*/ +GC_API GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much)); +GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much)); + +/* Check that p is visible						*/ +/* to the collector as a possibly pointer containing location.		*/ +/* If it isn't fail conspicuously.					*/ +/* Returns the argument in all cases.  May erroneously succeed		*/ +/* in hard cases.  (This is intended for debugging use with		*/ +/* untyped allocations.  The idea is that it should be possible, though	*/ +/* slow, to add such a call to all indirect pointer stores.)		*/ +/* Currently useless for multithreaded worlds.				*/ +GC_API GC_PTR GC_is_visible GC_PROTO((GC_PTR p)); + +/* Check that if p is a pointer to a heap page, then it points to	*/ +/* a valid displacement within a heap object.				*/ +/* Fail conspicuously if this property does not hold.			*/ +/* Uninteresting with ALL_INTERIOR_POINTERS.				*/ +/* Always returns its argument.						*/ +GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR	p)); + +/* Safer, but slow, pointer addition.  Probably useful mainly with 	*/ +/* a preprocessor.  Useful only for heap pointers.			*/ +#ifdef GC_DEBUG +#   define GC_PTR_ADD3(x, n, type_of_result) \ +	((type_of_result)GC_same_obj((x)+(n), (x))) +#   define GC_PRE_INCR3(x, n, type_of_result) \ +	((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x)) +#   define GC_POST_INCR2(x, type_of_result) \ +	((type_of_result)GC_post_incr(&(x), sizeof(*x)) +#   ifdef __GNUC__ +#       define GC_PTR_ADD(x, n) \ +	    GC_PTR_ADD3(x, n, typeof(x)) +#   define GC_PRE_INCR(x, n) \ +	    GC_PRE_INCR3(x, n, typeof(x)) +#   define GC_POST_INCR(x, n) \ +	    GC_POST_INCR3(x, typeof(x)) +#   else +	/* We can't do this right without typeof, which ANSI	*/ +	/* decided was not sufficiently useful.  Repeatedly	*/ +	/* mentioning the arguments seems too dangerous to be	*/ +	/* useful.  So does not casting the result.		*/ +#   	define GC_PTR_ADD(x, n) ((x)+(n)) +#   endif +#else	/* !GC_DEBUG */ +#   define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n)) +#   define GC_PTR_ADD(x, n) ((x)+(n)) +#   define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n)) +#   define GC_PRE_INCR(x, n) ((x) += (n)) +#   define GC_POST_INCR2(x, n, type_of_result) ((x)++) +#   define GC_POST_INCR(x, n) ((x)++) +#endif + +/* Safer assignment of a pointer to a nonstack location.	*/ +#ifdef GC_DEBUG +# ifdef __STDC__ +#   define GC_PTR_STORE(p, q) \ +	(*(void **)GC_is_visible(p) = GC_is_valid_displacement(q)) +# else +#   define GC_PTR_STORE(p, q) \ +	(*(char **)GC_is_visible(p) = GC_is_valid_displacement(q)) +# endif +#else /* !GC_DEBUG */ +#   define GC_PTR_STORE(p, q) *((p) = (q)) +#endif + +/* Fynctions called to report pointer checking errors */ +GC_API void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR p, GC_PTR q)); + +GC_API void (*GC_is_valid_displacement_print_proc) +	GC_PROTO((GC_PTR p)); + +GC_API void (*GC_is_visible_print_proc) +	GC_PROTO((GC_PTR p)); + +#if defined(_SOLARIS_PTHREADS) && !defined(SOLARIS_THREADS) +#   define SOLARIS_THREADS +#endif + +#ifdef SOLARIS_THREADS +/* We need to intercept calls to many of the threads primitives, so 	*/ +/* that we can locate thread stacks and stop the world.			*/ +/* Note also that the collector cannot see thread specific data.	*/ +/* Thread specific data should generally consist of pointers to		*/ +/* uncollectable objects, which are deallocated using the destructor	*/ +/* facility in thr_keycreate.						*/ +# include <thread.h> +# include <signal.h> +  int GC_thr_create(void *stack_base, size_t stack_size, +                    void *(*start_routine)(void *), void *arg, long flags, +                    thread_t *new_thread); +  int GC_thr_join(thread_t wait_for, thread_t *departed, void **status); +  int GC_thr_suspend(thread_t target_thread); +  int GC_thr_continue(thread_t target_thread); +  void * GC_dlopen(const char *path, int mode); + +# ifdef _SOLARIS_PTHREADS +#   include <pthread.h> +    extern int GC_pthread_create(pthread_t *new_thread, +    			         const pthread_attr_t *attr, +          			 void * (*thread_execp)(void *), void *arg); +    extern int GC_pthread_join(pthread_t wait_for, void **status); + +#   undef thread_t + +#   define pthread_join GC_pthread_join +#   define pthread_create GC_pthread_create +#endif + +# define thr_create GC_thr_create +# define thr_join GC_thr_join +# define thr_suspend GC_thr_suspend +# define thr_continue GC_thr_continue +# define dlopen GC_dlopen + +# endif /* SOLARIS_THREADS */ + + +#if defined(IRIX_THREADS) || defined(LINUX_THREADS) +/* We treat these similarly. */ +# include <pthread.h> +# include <signal.h> + +  int GC_pthread_create(pthread_t *new_thread, +                        const pthread_attr_t *attr, +		        void *(*start_routine)(void *), void *arg); +  int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset); +  int GC_pthread_join(pthread_t thread, void **retval); + +# define pthread_create GC_pthread_create +# define pthread_sigmask GC_pthread_sigmask +# define pthread_join GC_pthread_join + +#endif /* IRIX_THREADS || LINUX_THREADS */ + +# if defined(PCR) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \ +	defined(IRIX_THREADS) || defined(LINUX_THREADS) || \ +	defined(IRIX_JDK_THREADS) +   	/* Any flavor of threads except SRC_M3.	*/ +/* This returns a list of objects, linked through their first		*/ +/* word.  Its use can greatly reduce lock contention problems, since	*/ +/* the allocation lock can be acquired and released many fewer times.	*/ +/* lb must be large enough to hold the pointer field.			*/ +GC_PTR GC_malloc_many(size_t lb); +#define GC_NEXT(p) (*(GC_PTR *)(p)) 	/* Retrieve the next element	*/ +					/* in returned list.		*/ +extern void GC_thr_init();	/* Needed for Solaris/X86	*/ + +#endif /* THREADS && !SRC_M3 */ + +/* + * If you are planning on putting + * the collector in a SunOS 5 dynamic library, you need to call GC_INIT() + * from the statically loaded program section. + * This circumvents a Solaris 2.X (X<=4) linker bug. + */ +#if defined(sparc) || defined(__sparc) +#   define GC_INIT() { extern end, etext; \ +		       GC_noop(&end, &etext); } +#else +# if defined(__CYGWIN32__) && defined(GC_USE_DLL) +    /* +     * Similarly gnu-win32 DLLs need explicit initialization +     */ +#   define GC_INIT() { GC_add_roots(DATASTART, DATAEND); } +# else +#   define GC_INIT() +# endif +#endif + +#if (defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \ +     || defined(_WIN32) +  /* win32S may not free all resources on process exit.  */ +  /* This explicitly deallocates the heap.		 */ +    GC_API void GC_win32_free_heap (); +#endif + +#ifdef __cplusplus +    }  /* end of extern "C" */ +#endif + +#endif /* _GC_H */ diff --git a/gc/include/gc_alloc.h b/gc/include/gc_alloc.h new file mode 100644 index 0000000..1f1d54a --- /dev/null +++ b/gc/include/gc_alloc.h @@ -0,0 +1,380 @@ +/* + * Copyright (c) 1996-1998 by Silicon Graphics.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ + +// +// This is a C++ header file that is intended to replace the SGI STL +// alloc.h.  This assumes SGI STL version < 3.0. +// +// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE +// and -DALL_INTERIOR_POINTERS.  We also recommend +// -DREDIRECT_MALLOC=GC_uncollectable_malloc. +// +// Some of this could be faster in the explicit deallocation case.  In particular, +// we spend too much time clearing objects on the free lists.  That could be avoided. +// +// This uses template classes with static members, and hence does not work +// with g++ 2.7.2 and earlier. +// + +#include "gc.h" + +#ifndef GC_ALLOC_H + +#define GC_ALLOC_H +#define __ALLOC_H	// Prevent inclusion of the default version.  Ugly. +#define __SGI_STL_ALLOC_H +#define __SGI_STL_INTERNAL_ALLOC_H + +#ifndef __ALLOC +#   define __ALLOC alloc +#endif + +#include <stddef.h> +#include <string.h> + +// The following is just replicated from the conventional SGI alloc.h: + +template<class T, class alloc> +class simple_alloc { + +public: +    static T *allocate(size_t n) +                { return 0 == n? 0 : (T*) alloc::allocate(n * sizeof (T)); } +    static T *allocate(void) +                { return (T*) alloc::allocate(sizeof (T)); } +    static void deallocate(T *p, size_t n) +                { if (0 != n) alloc::deallocate(p, n * sizeof (T)); } +    static void deallocate(T *p) +                { alloc::deallocate(p, sizeof (T)); } +}; + +#include "gc.h" + +// The following need to match collector data structures. +// We can't include gc_priv.h, since that pulls in way too much stuff. +// This should eventually be factored out into another include file. + +extern "C" { +    extern void ** const GC_objfreelist_ptr; +    extern void ** const GC_aobjfreelist_ptr; +    extern void ** const GC_uobjfreelist_ptr; +    extern void ** const GC_auobjfreelist_ptr; + +    extern void GC_incr_words_allocd(size_t words); +    extern void GC_incr_mem_freed(size_t words); + +    extern char * GC_generic_malloc_words_small(size_t word, int kind); +} + +// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and +// AUNCOLLECTABLE in gc_priv.h. + +enum { GC_PTRFREE = 0, GC_NORMAL = 1, GC_UNCOLLECTABLE = 2, +       GC_AUNCOLLECTABLE = 3 }; + +enum { GC_max_fast_bytes = 255 }; + +enum { GC_bytes_per_word = sizeof(char *) }; + +enum { GC_byte_alignment = 8 }; + +enum { GC_word_alignment = GC_byte_alignment/GC_bytes_per_word }; + +inline void * &GC_obj_link(void * p) +{   return *(void **)p;  } + +// Compute a number of words >= n+1 bytes. +// The +1 allows for pointers one past the end. +inline size_t GC_round_up(size_t n) +{ +    return ((n + GC_byte_alignment)/GC_byte_alignment)*GC_word_alignment; +} + +// The same but don't allow for extra byte. +inline size_t GC_round_up_uncollectable(size_t n) +{ +    return ((n + GC_byte_alignment - 1)/GC_byte_alignment)*GC_word_alignment; +} + +template <int dummy> +class GC_aux_template { +public: +  // File local count of allocated words.  Occasionally this is +  // added into the global count.  A separate count is necessary since the +  // real one must be updated with a procedure call. +  static size_t GC_words_recently_allocd; + +  // Same for uncollectable mmory.  Not yet reflected in either +  // GC_words_recently_allocd or GC_non_gc_bytes. +  static size_t GC_uncollectable_words_recently_allocd; + +  // Similar counter for explicitly deallocated memory. +  static size_t GC_mem_recently_freed; + +  // Again for uncollectable memory. +  static size_t GC_uncollectable_mem_recently_freed; + +  static void * GC_out_of_line_malloc(size_t nwords, int kind); +}; + +template <int dummy> +size_t GC_aux_template<dummy>::GC_words_recently_allocd = 0; + +template <int dummy> +size_t GC_aux_template<dummy>::GC_uncollectable_words_recently_allocd = 0; + +template <int dummy> +size_t GC_aux_template<dummy>::GC_mem_recently_freed = 0; + +template <int dummy> +size_t GC_aux_template<dummy>::GC_uncollectable_mem_recently_freed = 0; + +template <int dummy> +void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind) +{ +    GC_words_recently_allocd += GC_uncollectable_words_recently_allocd; +    GC_non_gc_bytes += +                GC_bytes_per_word * GC_uncollectable_words_recently_allocd; +    GC_uncollectable_words_recently_allocd = 0; + +    GC_mem_recently_freed += GC_uncollectable_mem_recently_freed; +    GC_non_gc_bytes -=  +                GC_bytes_per_word * GC_uncollectable_mem_recently_freed; +    GC_uncollectable_mem_recently_freed = 0; + +    GC_incr_words_allocd(GC_words_recently_allocd); +    GC_words_recently_allocd = 0; + +    GC_incr_mem_freed(GC_mem_recently_freed); +    GC_mem_recently_freed = 0; + +    return GC_generic_malloc_words_small(nwords, kind); +} + +typedef GC_aux_template<0> GC_aux; + +// A fast, single-threaded, garbage-collected allocator +// We assume the first word will be immediately overwritten. +// In this version, deallocation is not a noop, and explicit +// deallocation is likely to help performance. +template <int dummy> +class single_client_gc_alloc_template { +    public: +     	static void * allocate(size_t n) +        { +	    size_t nwords = GC_round_up(n); +	    void ** flh; +	    void * op; + +  	    if (n > GC_max_fast_bytes) return GC_malloc(n); +	    flh = GC_objfreelist_ptr + nwords; +	    if (0 == (op = *flh)) { +		return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL); +	    } +	    *flh = GC_obj_link(op); +	    GC_aux::GC_words_recently_allocd += nwords; +	    return op; +        } +     	static void * ptr_free_allocate(size_t n) +        { +	    size_t nwords = GC_round_up(n); +	    void ** flh; +	    void * op; + +  	    if (n > GC_max_fast_bytes) return GC_malloc_atomic(n); +	    flh = GC_aobjfreelist_ptr + nwords; +	    if (0 == (op = *flh)) { +		return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE); +	    } +	    *flh = GC_obj_link(op); +	    GC_aux::GC_words_recently_allocd += nwords; +	    return op; +        } +	static void deallocate(void *p, size_t n) +	{ +            size_t nwords = GC_round_up(n); +            void ** flh; +	    +	    if (n > GC_max_fast_bytes)  { +		GC_free(p); +	    } else { +	        flh = GC_objfreelist_ptr + nwords; +	        GC_obj_link(p) = *flh; +		memset((char *)p + GC_bytes_per_word, 0, +		       GC_bytes_per_word * (nwords - 1)); +	        *flh = p; +	        GC_aux::GC_mem_recently_freed += nwords; +	    } +	} +	static void ptr_free_deallocate(void *p, size_t n) +	{ +            size_t nwords = GC_round_up(n); +            void ** flh; +	    +	    if (n > GC_max_fast_bytes) { +		GC_free(p); +	    } else { +	    	flh = GC_aobjfreelist_ptr + nwords; +	    	GC_obj_link(p) = *flh; +	    	*flh = p; +	    	GC_aux::GC_mem_recently_freed += nwords; +	    } +	} +}; + +typedef single_client_gc_alloc_template<0> single_client_gc_alloc; + +// Once more, for uncollectable objects. +template <int dummy> +class single_client_alloc_template { +    public: +     	static void * allocate(size_t n) +        { +	    size_t nwords = GC_round_up_uncollectable(n); +	    void ** flh; +	    void * op; + +  	    if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n); +	    flh = GC_uobjfreelist_ptr + nwords; +	    if (0 == (op = *flh)) { +		return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE); +	    } +	    *flh = GC_obj_link(op); +	    GC_aux::GC_uncollectable_words_recently_allocd += nwords; +	    return op; +        } +     	static void * ptr_free_allocate(size_t n) +        { +	    size_t nwords = GC_round_up_uncollectable(n); +	    void ** flh; +	    void * op; + +  	    if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n); +	    flh = GC_auobjfreelist_ptr + nwords; +	    if (0 == (op = *flh)) { +		return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE); +	    } +	    *flh = GC_obj_link(op); +	    GC_aux::GC_uncollectable_words_recently_allocd += nwords; +	    return op; +        } +	static void deallocate(void *p, size_t n) +	{ +            size_t nwords = GC_round_up_uncollectable(n); +            void ** flh; +	    +	    if (n > GC_max_fast_bytes)  { +		GC_free(p); +	    } else { +	        flh = GC_uobjfreelist_ptr + nwords; +	        GC_obj_link(p) = *flh; +	        *flh = p; +	        GC_aux::GC_uncollectable_mem_recently_freed += nwords; +	    } +	} +	static void ptr_free_deallocate(void *p, size_t n) +	{ +            size_t nwords = GC_round_up_uncollectable(n); +            void ** flh; +	    +	    if (n > GC_max_fast_bytes) { +		GC_free(p); +	    } else { +	    	flh = GC_auobjfreelist_ptr + nwords; +	    	GC_obj_link(p) = *flh; +	    	*flh = p; +	    	GC_aux::GC_uncollectable_mem_recently_freed += nwords; +	    } +	} +}; + +typedef single_client_alloc_template<0> single_client_alloc; + +template < int dummy > +class gc_alloc_template { +    public: +     	static void * allocate(size_t n) { return GC_malloc(n); } +     	static void * ptr_free_allocate(size_t n) +		{ return GC_malloc_atomic(n); } +	static void deallocate(void *, size_t) { } +	static void ptr_free_deallocate(void *, size_t) { } +}; + +typedef gc_alloc_template < 0 > gc_alloc; + +template < int dummy > +class alloc_template { +    public: +     	static void * allocate(size_t n) { return GC_malloc_uncollectable(n); } +     	static void * ptr_free_allocate(size_t n) +		{ return GC_malloc_atomic_uncollectable(n); } +	static void deallocate(void *p, size_t) { GC_free(p); } +	static void ptr_free_deallocate(void *p, size_t) { GC_free(p); } +}; + +typedef alloc_template < 0 > alloc; + +#ifdef _SGI_SOURCE + +// We want to specialize simple_alloc so that it does the right thing +// for all pointerfree types.  At the moment there is no portable way to +// even approximate that.  The following approximation should work for +// SGI compilers, and perhaps some others. + +# define __GC_SPECIALIZE(T,alloc) \ +class simple_alloc<T, alloc> { \ +public: \ +    static T *allocate(size_t n) \ +	{ return 0 == n? 0 : \ +			 (T*) alloc::ptr_free_allocate(n * sizeof (T)); } \ +    static T *allocate(void) \ +	{ return (T*) alloc::ptr_free_allocate(sizeof (T)); } \ +    static void deallocate(T *p, size_t n) \ +	{ if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \ +    static void deallocate(T *p) \ +	{ alloc::ptr_free_deallocate(p, sizeof (T)); } \ +}; + +__GC_SPECIALIZE(char, gc_alloc) +__GC_SPECIALIZE(int, gc_alloc) +__GC_SPECIALIZE(unsigned, gc_alloc) +__GC_SPECIALIZE(float, gc_alloc) +__GC_SPECIALIZE(double, gc_alloc) + +__GC_SPECIALIZE(char, alloc) +__GC_SPECIALIZE(int, alloc) +__GC_SPECIALIZE(unsigned, alloc) +__GC_SPECIALIZE(float, alloc) +__GC_SPECIALIZE(double, alloc) + +__GC_SPECIALIZE(char, single_client_gc_alloc) +__GC_SPECIALIZE(int, single_client_gc_alloc) +__GC_SPECIALIZE(unsigned, single_client_gc_alloc) +__GC_SPECIALIZE(float, single_client_gc_alloc) +__GC_SPECIALIZE(double, single_client_gc_alloc) + +__GC_SPECIALIZE(char, single_client_alloc) +__GC_SPECIALIZE(int, single_client_alloc) +__GC_SPECIALIZE(unsigned, single_client_alloc) +__GC_SPECIALIZE(float, single_client_alloc) +__GC_SPECIALIZE(double, single_client_alloc) + +#ifdef __STL_USE_STD_ALLOCATORS + +???copy stuff from stl_alloc.h or remove it to a different file ??? + +#endif /* __STL_USE_STD_ALLOCATORS */ + +#endif /* _SGI_SOURCE */ + +#endif /* GC_ALLOC_H */ diff --git a/gc/include/gc_cpp.h b/gc/include/gc_cpp.h new file mode 100644 index 0000000..ad7df5d --- /dev/null +++ b/gc/include/gc_cpp.h @@ -0,0 +1,290 @@ +#ifndef GC_CPP_H +#define GC_CPP_H +/**************************************************************************** +Copyright (c) 1994 by Xerox Corporation.  All rights reserved. +  +THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED +OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. +  +Permission is hereby granted to use or copy this program for any +purpose, provided the above notices are retained on all copies. +Permission to modify the code and to distribute modified code is +granted, provided the above notices are retained, and a notice that +the code was modified is included with the above copyright notice. +**************************************************************************** + +C++ Interface to the Boehm Collector + +    John R. Ellis and Jesse Hull  +    Last modified on Mon Jul 24 15:43:42 PDT 1995 by ellis + +This interface provides access to the Boehm collector.  It provides +basic facilities similar to those described in "Safe, Efficient +Garbage Collection for C++", by John R. Elis and David L. Detlefs +(ftp.parc.xerox.com:/pub/ellis/gc). + +All heap-allocated objects are either "collectable" or +"uncollectable".  Programs must explicitly delete uncollectable +objects, whereas the garbage collector will automatically delete +collectable objects when it discovers them to be inaccessible. +Collectable objects may freely point at uncollectable objects and vice +versa. + +Objects allocated with the built-in "::operator new" are uncollectable. + +Objects derived from class "gc" are collectable.  For example: + +    class A: public gc {...}; +    A* a = new A;       // a is collectable.  + +Collectable instances of non-class types can be allocated using the GC +placement: + +    typedef int A[ 10 ]; +    A* a = new (GC) A; + +Uncollectable instances of classes derived from "gc" can be allocated +using the NoGC placement: + +    class A: public gc {...}; +    A* a = new (NoGC) A;   // a is uncollectable. + +Both uncollectable and collectable objects can be explicitly deleted +with "delete", which invokes an object's destructors and frees its +storage immediately. + +A collectable object may have a clean-up function, which will be +invoked when the collector discovers the object to be inaccessible. +An object derived from "gc_cleanup" or containing a member derived +from "gc_cleanup" has a default clean-up function that invokes the +object's destructors.  Explicit clean-up functions may be specified as +an additional placement argument: + +    A* a = ::new (GC, MyCleanup) A; + +An object is considered "accessible" by the collector if it can be +reached by a path of pointers from static variables, automatic +variables of active functions, or from some object with clean-up +enabled; pointers from an object to itself are ignored. + +Thus, if objects A and B both have clean-up functions, and A points at +B, B is considered accessible.  After A's clean-up is invoked and its +storage released, B will then become inaccessible and will have its +clean-up invoked.  If A points at B and B points to A, forming a +cycle, then that's considered a storage leak, and neither will be +collectable.  See the interface gc.h for low-level facilities for +handling such cycles of objects with clean-up. + +The collector cannot guarrantee that it will find all inaccessible +objects.  In practice, it finds almost all of them. + + +Cautions: + +1. Be sure the collector has been augmented with "make c++". + +2.  If your compiler supports the new "operator new[]" syntax, then +add -DOPERATOR_NEW_ARRAY to the Makefile. + +If your compiler doesn't support "operator new[]", beware that an +array of type T, where T is derived from "gc", may or may not be +allocated as a collectable object (it depends on the compiler).  Use +the explicit GC placement to make the array collectable.  For example: + +    class A: public gc {...}; +    A* a1 = new A[ 10 ];        // collectable or uncollectable? +    A* a2 = new (GC) A[ 10 ];   // collectable + +3. The destructors of collectable arrays of objects derived from +"gc_cleanup" will not be invoked properly.  For example: + +    class A: public gc_cleanup {...}; +    A* a = new (GC) A[ 10 ];    // destructors not invoked correctly + +Typically, only the destructor for the first element of the array will +be invoked when the array is garbage-collected.  To get all the +destructors of any array executed, you must supply an explicit +clean-up function: + +    A* a = new (GC, MyCleanUp) A[ 10 ]; + +(Implementing clean-up of arrays correctly, portably, and in a way +that preserves the correct exception semantics requires a language +extension, e.g. the "gc" keyword.) + +4. Compiler bugs: + +* Solaris 2's CC (SC3.0) doesn't implement t->~T() correctly, so the +destructors of classes derived from gc_cleanup won't be invoked. +You'll have to explicitly register a clean-up function with +new-placement syntax. + +* Evidently cfront 3.0 does not allow destructors to be explicitly +invoked using the ANSI-conforming syntax t->~T().  If you're using +cfront 3.0, you'll have to comment out the class gc_cleanup, which +uses explicit invocation. + +****************************************************************************/ + +#include "gc.h" + +#ifndef THINK_CPLUS +#define _cdecl +#endif + +#if ! defined( OPERATOR_NEW_ARRAY ) \ +    && (__BORLANDC__ >= 0x450 || (__GNUC__ >= 2 && __GNUC_MINOR__ >= 6) \ +        || __WATCOMC__ >= 1050) +#   define OPERATOR_NEW_ARRAY +#endif + +enum GCPlacement {GC, NoGC, PointerFreeGC}; + +class gc {public: +    inline void* operator new( size_t size ); +    inline void* operator new( size_t size, GCPlacement gcp ); +    inline void operator delete( void* obj ); + +#ifdef OPERATOR_NEW_ARRAY +    inline void* operator new[]( size_t size ); +    inline void* operator new[]( size_t size, GCPlacement gcp ); +    inline void operator delete[]( void* obj ); +#endif /* OPERATOR_NEW_ARRAY */ +    };     +    /* +    Instances of classes derived from "gc" will be allocated in the  +    collected heap by default, unless an explicit NoGC placement is +    specified. */ + +class gc_cleanup: virtual public gc {public: +    inline gc_cleanup(); +    inline virtual ~gc_cleanup(); +private: +    inline static void _cdecl cleanup( void* obj, void* clientData );}; +    /* +    Instances of classes derived from "gc_cleanup" will be allocated +    in the collected heap by default.  When the collector discovers an +    inaccessible object derived from "gc_cleanup" or containing a +    member derived from "gc_cleanup", its destructors will be +    invoked. */ + +extern "C" {typedef void (*GCCleanUpFunc)( void* obj, void* clientData );} + +inline void* operator new(  +    size_t size,  +    GCPlacement gcp, +    GCCleanUpFunc cleanup = 0, +    void* clientData = 0 ); +    /* +    Allocates a collectable or uncollected object, according to the +    value of "gcp". + +    For collectable objects, if "cleanup" is non-null, then when the +    allocated object "obj" becomes inaccessible, the collector will +    invoke the function "cleanup( obj, clientData )" but will not +    invoke the object's destructors.  It is an error to explicitly +    delete an object allocated with a non-null "cleanup". + +    It is an error to specify a non-null "cleanup" with NoGC or for +    classes derived from "gc_cleanup" or containing members derived +    from "gc_cleanup". */ + +#ifdef OPERATOR_NEW_ARRAY + +inline void* operator new[]( +    size_t size,  +    GCPlacement gcp, +    GCCleanUpFunc cleanup = 0, +    void* clientData = 0 ); +    /* +    The operator new for arrays, identical to the above. */ + +#endif /* OPERATOR_NEW_ARRAY */ + +/**************************************************************************** + +Inline implementation + +****************************************************************************/ + +inline void* gc::operator new( size_t size ) { +    return GC_MALLOC( size );} +     +inline void* gc::operator new( size_t size, GCPlacement gcp ) { +    if (gcp == GC)  +        return GC_MALLOC( size ); +    else if (gcp == PointerFreeGC) +	return GC_MALLOC_ATOMIC( size ); +    else +        return GC_MALLOC_UNCOLLECTABLE( size );} + +inline void gc::operator delete( void* obj ) { +    GC_FREE( obj );} +     + +#ifdef OPERATOR_NEW_ARRAY + +inline void* gc::operator new[]( size_t size ) { +    return gc::operator new( size );} +     +inline void* gc::operator new[]( size_t size, GCPlacement gcp ) { +    return gc::operator new( size, gcp );} + +inline void gc::operator delete[]( void* obj ) { +    gc::operator delete( obj );} +     +#endif /* OPERATOR_NEW_ARRAY */ + + +inline gc_cleanup::~gc_cleanup() { +    GC_REGISTER_FINALIZER_IGNORE_SELF( GC_base(this), 0, 0, 0, 0 );} + +inline void gc_cleanup::cleanup( void* obj, void* displ ) { +    ((gc_cleanup*) ((char*) obj + (ptrdiff_t) displ))->~gc_cleanup();} + +inline gc_cleanup::gc_cleanup() { +    GC_finalization_proc oldProc; +    void* oldData; +    void* base = GC_base( (void *) this ); +    if (0 == base) return; +    GC_REGISTER_FINALIZER_IGNORE_SELF(  +        base, cleanup, (void*) ((char*) this - (char*) base),  +        &oldProc, &oldData ); +    if (0 != oldProc) { +        GC_REGISTER_FINALIZER_IGNORE_SELF( base, oldProc, oldData, 0, 0 );}} + +inline void* operator new(  +    size_t size,  +    GCPlacement gcp, +    GCCleanUpFunc cleanup, +    void* clientData ) +{ +    void* obj; + +    if (gcp == GC) { +        obj = GC_MALLOC( size ); +        if (cleanup != 0)  +            GC_REGISTER_FINALIZER_IGNORE_SELF(  +                obj, cleanup, clientData, 0, 0 );} +    else if (gcp == PointerFreeGC) { +        obj = GC_MALLOC_ATOMIC( size );} +    else { +        obj = GC_MALLOC_UNCOLLECTABLE( size );}; +    return obj;} +         + +#ifdef OPERATOR_NEW_ARRAY + +inline void* operator new[](  +    size_t size,  +    GCPlacement gcp, +    GCCleanUpFunc cleanup, +    void* clientData ) +{ +    return ::operator new( size, gcp, cleanup, clientData );} + +#endif /* OPERATOR_NEW_ARRAY */ + + +#endif /* GC_CPP_H */ + diff --git a/gc/include/gc_inl.h b/gc/include/gc_inl.h new file mode 100644 index 0000000..700843b --- /dev/null +++ b/gc/include/gc_inl.h @@ -0,0 +1,103 @@ +/*  + * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers + * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ +/* Boehm, October 3, 1995 2:07 pm PDT */ +  +# ifndef GC_PRIVATE_H +#   include "private/gc_priv.h" +# endif + +/* USE OF THIS FILE IS NOT RECOMMENDED unless the collector has been	*/ +/* compiled without -DALL_INTERIOR_POINTERS or with			*/ +/* -DDONT_ADD_BYTE_AT_END, or the specified size includes a pointerfree	*/ +/* word at the end.  In the standard collector configuration,		*/ +/* the final word of each object may not be scanned.			*/ +/* This is most useful for compilers that generate C.			*/ +/* Manual use is hereby discouraged.					*/ + +/* Allocate n words (NOT BYTES).  X is made to point to the result.	*/ +/* It is assumed that n < MAXOBJSZ, and					*/ +/* that n > 0.  On machines requiring double word alignment of some	*/ +/* data, we also assume that n is 1 or even.  This bypasses the		*/ +/* MERGE_SIZES mechanism.  In order to minimize the number of distinct	*/ +/* free lists that are maintained, the caller should ensure that a 	*/ +/* small number of distinct values of n are used.  (The MERGE_SIZES	*/ +/* mechanism normally does this by ensuring that only the leading three	*/ +/* bits of n may be nonzero.  See misc.c for details.)  We really 	*/ +/* recommend this only in cases in which n is a constant, and no	*/ +/* locking is required.							*/ +/* In that case it may allow the compiler to perform substantial	*/ +/* additional optimizations.						*/ +# define GC_MALLOC_WORDS(result,n) \ +{	\ +    register ptr_t op;	\ +    register ptr_t *opp;	\ +    DCL_LOCK_STATE;	\ +	\ +    opp = &(GC_objfreelist[n]);	\ +    FASTLOCK();	\ +    if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {	\ +        FASTUNLOCK();	\ +        (result) = GC_generic_malloc_words_small((n), NORMAL);	\ +    } else { 	\ +        *opp = obj_link(op);	\ +        obj_link(op) = 0;	\ +        GC_words_allocd += (n);	\ +        FASTUNLOCK();	\ +        (result) = (GC_PTR) op;	\ +    }	\ +} + + +/* The same for atomic objects:	*/ +# define GC_MALLOC_ATOMIC_WORDS(result,n) \ +{	\ +    register ptr_t op;	\ +    register ptr_t *opp;	\ +    DCL_LOCK_STATE;	\ +	\ +    opp = &(GC_aobjfreelist[n]);	\ +    FASTLOCK();	\ +    if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {	\ +        FASTUNLOCK();	\ +        (result) = GC_generic_malloc_words_small((n), PTRFREE);	\ +    } else { 	\ +        *opp = obj_link(op);	\ +        obj_link(op) = 0;	\ +        GC_words_allocd += (n);	\ +        FASTUNLOCK();	\ +        (result) = (GC_PTR) op;	\ +    }	\ +} + +/* And once more for two word initialized objects: */ +# define GC_CONS(result, first, second) \ +{	\ +    register ptr_t op;	\ +    register ptr_t *opp;	\ +    DCL_LOCK_STATE;	\ +	\ +    opp = &(GC_objfreelist[2]);	\ +    FASTLOCK();	\ +    if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {	\ +        FASTUNLOCK();	\ +        op = GC_generic_malloc_words_small(2, NORMAL);	\ +    } else {	\ +        *opp = obj_link(op);	\ +        GC_words_allocd += 2;	\ +        FASTUNLOCK();	\ +    } \ +    ((word *)op)[0] = (word)(first);	\ +    ((word *)op)[1] = (word)(second);	\ +    (result) = (GC_PTR) op;	\ +} diff --git a/gc/include/gc_inline.h b/gc/include/gc_inline.h new file mode 100644 index 0000000..db62d1d --- /dev/null +++ b/gc/include/gc_inline.h @@ -0,0 +1 @@ +# include "gc_inl.h" diff --git a/gc/include/gc_typed.h b/gc/include/gc_typed.h new file mode 100644 index 0000000..e4a6b94 --- /dev/null +++ b/gc/include/gc_typed.h @@ -0,0 +1,91 @@ +/*  + * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers + * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved. + * Copyright 1996 Silicon Graphics.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ +/* + * Some simple primitives for allocation with explicit type information. + * Facilities for dynamic type inference may be added later. + * Should be used only for extremely performance critical applications, + * or if conservative collector leakage is otherwise a problem (unlikely). + * Note that this is implemented completely separately from the rest + * of the collector, and is not linked in unless referenced. + * This does not currently support GC_DEBUG in any interesting way. + */ +/* Boehm, May 19, 1994 2:13 pm PDT */ + +#ifndef _GC_TYPED_H +# define _GC_TYPED_H +# ifndef _GC_H +#   include "gc.h" +# endif + +typedef GC_word * GC_bitmap; +	/* The least significant bit of the first word is one if	*/ +	/* the first word in the object may be a pointer.		*/ +	 +# define GC_get_bit(bm, index) \ +		(((bm)[divWORDSZ(index)] >> modWORDSZ(index)) & 1) +# define GC_set_bit(bm, index) \ +		(bm)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index) + +typedef GC_word GC_descr; + +GC_API GC_descr GC_make_descriptor GC_PROTO((GC_bitmap bm, size_t len)); +		/* Return a type descriptor for the object whose layout	*/ +		/* is described by the argument.			*/ +		/* The least significant bit of the first word is one	*/ +		/* if the first word in the object may be a pointer.	*/ +		/* The second argument specifies the number of		*/ +		/* meaningful bits in the bitmap.  The actual object 	*/ +		/* may be larger (but not smaller).  Any additional	*/ +		/* words in the object are assumed not to contain 	*/ +		/* pointers.						*/ +		/* Returns a conservative approximation in the		*/ +		/* (unlikely) case of insufficient memory to build	*/ +		/* the descriptor.  Calls to GC_make_descriptor		*/ +		/* may consume some amount of a finite resource.  This	*/ +		/* is intended to be called once per type, not once	*/ +		/* per allocation.					*/ + +GC_API GC_PTR GC_malloc_explicitly_typed +			GC_PROTO((size_t size_in_bytes, GC_descr d)); +		/* Allocate an object whose layout is described by d.	*/ +		/* The resulting object MAY NOT BE PASSED TO REALLOC.	*/ + +GC_API GC_PTR GC_malloc_explicitly_typed_ignore_off_page +                        GC_PROTO((size_t size_in_bytes, GC_descr d)); +		 +GC_API GC_PTR GC_calloc_explicitly_typed +			GC_PROTO((size_t nelements, +  				  size_t element_size_in_bytes, +  				  GC_descr d)); +  	/* Allocate an array of nelements elements, each of the	*/ +  	/* given size, and with the given descriptor.		*/ +  	/* The elemnt size must be a multiple of the byte	*/ +  	/* alignment required for pointers.  E.g. on a 32-bit	*/ +  	/* machine with 16-bit aligned pointers, size_in_bytes	*/ +  	/* must be a multiple of 2.				*/ + +#ifdef GC_DEBUG +#   define GC_MALLOC_EXPLICTLY_TYPED(bytes, d) GC_MALLOC(bytes) +#   define GC_CALLOC_EXPLICTLY_TYPED(n, bytes, d) GC_MALLOC(n*bytes) +#else +#  define GC_MALLOC_EXPLICTLY_TYPED(bytes, d) \ +	GC_malloc_explicitly_typed(bytes, d) +#  define GC_CALLOC_EXPLICTLY_TYPED(n, bytes, d) \ +	GC_calloc_explicitly_typed(n, bytes, d) +#endif /* !GC_DEBUG */ + + +#endif /* _GC_TYPED_H */ + diff --git a/gc/include/javaxfc.h b/gc/include/javaxfc.h new file mode 100644 index 0000000..880020c --- /dev/null +++ b/gc/include/javaxfc.h @@ -0,0 +1,41 @@ +# ifndef GC_H +#   include "gc.h" +# endif + +/* + * Invoke all remaining finalizers that haven't yet been run. + * This is needed for strict compliance with the Java standard,  + * which can make the runtime guarantee that all finalizers are run. + * This is problematic for several reasons: + * 1) It means that finalizers, and all methods calle by them, + *    must be prepared to deal with objects that have been finalized in + *    spite of the fact that they are still referenced by statically + *    allocated pointer variables. + * 1) It may mean that we get stuck in an infinite loop running + *    finalizers which create new finalizable objects, though that's + *    probably unlikely. + * Thus this is not recommended for general use. + */ +void GC_finalize_all(); + +/* + * A version of GC_register_finalizer that allows the object to be + * finalized before the objects it references.  This is again error + * prone, in that it makes it easy to accidentally reference finalized + * objects.  Again, recommended only for JVM implementors. + */ +void GC_register_finalizer_no_order(GC_PTR obj, +			       GC_finalization_proc fn, GC_PTR cd, +			       GC_finalization_proc *ofn, GC_PTR * ocd); + +void GC_debug_register_finalizer_no_order(GC_PTR obj, +			       GC_finalization_proc fn, GC_PTR cd, +			       GC_finalization_proc *ofn, GC_PTR * ocd); + +#ifdef GC_DEBUG +#   define GC_REGISTER_FINALIZER(p, f, d, of, od) \ +	GC_debug_register_finalizer_no_order(p, f, d, of, od) +#else +#   define GC_REGISTER_FINALIZER(p, f, d, of, od) \ +	GC_register_finalizer_no_order(p, f, d, of, od) +#endif diff --git a/gc/include/leak_detector.h b/gc/include/leak_detector.h new file mode 100644 index 0000000..6786825 --- /dev/null +++ b/gc/include/leak_detector.h @@ -0,0 +1,7 @@ +#define GC_DEBUG +#include "gc.h" +#define malloc(n) GC_MALLOC(n) +#define calloc(m,n) GC_MALLOC(m*n) +#define free(p) GC_FREE(p) +#define realloc(p,n) GC_REALLOC(n) +#define CHECK_LEAKS() GC_gcollect() diff --git a/gc/include/new_gc_alloc.h b/gc/include/new_gc_alloc.h new file mode 100644 index 0000000..5771388 --- /dev/null +++ b/gc/include/new_gc_alloc.h @@ -0,0 +1,456 @@ +/* + * Copyright (c) 1996-1998 by Silicon Graphics.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ + +// +// This is a revision of gc_alloc.h for SGI STL versions > 3.0 +// Unlike earlier versions, it supplements the standard "alloc.h" +// instead of replacing it. +// +// This is sloppy about variable names used in header files. +// It also doesn't yet understand the new header file names or +// namespaces. +// +// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE +// and -DALL_INTERIOR_POINTERS.  We also recommend +// -DREDIRECT_MALLOC=GC_uncollectable_malloc. +// +// Some of this could be faster in the explicit deallocation case. +// In particular, we spend too much time clearing objects on the +// free lists.  That could be avoided. +// +// This uses template classes with static members, and hence does not work +// with g++ 2.7.2 and earlier. +// +// Unlike its predecessor, this one simply defines +// 	gc_alloc +//	single_client_gc_alloc +//	traceable_alloc +//	single_client_traceable_alloc +// +// It does not redefine alloc.  Nor does it change the default allocator, +// though the user may wish to do so.  (The argument against changing +// the default allocator is that it may introduce subtle link compatibility +// problems.  The argument for changing it is that the usual default +// allocator is usually a very bad choice for a garbage collected environment.) +// + +#ifndef GC_ALLOC_H + +#include "gc.h" +#include <alloc.h> + +#define GC_ALLOC_H + +#include <stddef.h> +#include <string.h> + +// The following need to match collector data structures. +// We can't include gc_priv.h, since that pulls in way too much stuff. +// This should eventually be factored out into another include file. + +extern "C" { +    extern void ** const GC_objfreelist_ptr; +    extern void ** const GC_aobjfreelist_ptr; +    extern void ** const GC_uobjfreelist_ptr; +    extern void ** const GC_auobjfreelist_ptr; + +    extern void GC_incr_words_allocd(size_t words); +    extern void GC_incr_mem_freed(size_t words); + +    extern char * GC_generic_malloc_words_small(size_t word, int kind); +} + +// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and +// AUNCOLLECTABLE in gc_priv.h. + +enum { GC_PTRFREE = 0, GC_NORMAL = 1, GC_UNCOLLECTABLE = 2, +       GC_AUNCOLLECTABLE = 3 }; + +enum { GC_max_fast_bytes = 255 }; + +enum { GC_bytes_per_word = sizeof(char *) }; + +enum { GC_byte_alignment = 8 }; + +enum { GC_word_alignment = GC_byte_alignment/GC_bytes_per_word }; + +inline void * &GC_obj_link(void * p) +{   return *(void **)p;  } + +// Compute a number of words >= n+1 bytes. +// The +1 allows for pointers one past the end. +inline size_t GC_round_up(size_t n) +{ +    return ((n + GC_byte_alignment)/GC_byte_alignment)*GC_word_alignment; +} + +// The same but don't allow for extra byte. +inline size_t GC_round_up_uncollectable(size_t n) +{ +    return ((n + GC_byte_alignment - 1)/GC_byte_alignment)*GC_word_alignment; +} + +template <int dummy> +class GC_aux_template { +public: +  // File local count of allocated words.  Occasionally this is +  // added into the global count.  A separate count is necessary since the +  // real one must be updated with a procedure call. +  static size_t GC_words_recently_allocd; + +  // Same for uncollectable mmory.  Not yet reflected in either +  // GC_words_recently_allocd or GC_non_gc_bytes. +  static size_t GC_uncollectable_words_recently_allocd; + +  // Similar counter for explicitly deallocated memory. +  static size_t GC_mem_recently_freed; + +  // Again for uncollectable memory. +  static size_t GC_uncollectable_mem_recently_freed; + +  static void * GC_out_of_line_malloc(size_t nwords, int kind); +}; + +template <int dummy> +size_t GC_aux_template<dummy>::GC_words_recently_allocd = 0; + +template <int dummy> +size_t GC_aux_template<dummy>::GC_uncollectable_words_recently_allocd = 0; + +template <int dummy> +size_t GC_aux_template<dummy>::GC_mem_recently_freed = 0; + +template <int dummy> +size_t GC_aux_template<dummy>::GC_uncollectable_mem_recently_freed = 0; + +template <int dummy> +void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind) +{ +    GC_words_recently_allocd += GC_uncollectable_words_recently_allocd; +    GC_non_gc_bytes += +                GC_bytes_per_word * GC_uncollectable_words_recently_allocd; +    GC_uncollectable_words_recently_allocd = 0; + +    GC_mem_recently_freed += GC_uncollectable_mem_recently_freed; +    GC_non_gc_bytes -=  +                GC_bytes_per_word * GC_uncollectable_mem_recently_freed; +    GC_uncollectable_mem_recently_freed = 0; + +    GC_incr_words_allocd(GC_words_recently_allocd); +    GC_words_recently_allocd = 0; + +    GC_incr_mem_freed(GC_mem_recently_freed); +    GC_mem_recently_freed = 0; + +    return GC_generic_malloc_words_small(nwords, kind); +} + +typedef GC_aux_template<0> GC_aux; + +// A fast, single-threaded, garbage-collected allocator +// We assume the first word will be immediately overwritten. +// In this version, deallocation is not a noop, and explicit +// deallocation is likely to help performance. +template <int dummy> +class single_client_gc_alloc_template { +    public: +     	static void * allocate(size_t n) +        { +	    size_t nwords = GC_round_up(n); +	    void ** flh; +	    void * op; + +  	    if (n > GC_max_fast_bytes) return GC_malloc(n); +	    flh = GC_objfreelist_ptr + nwords; +	    if (0 == (op = *flh)) { +		return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL); +	    } +	    *flh = GC_obj_link(op); +	    GC_aux::GC_words_recently_allocd += nwords; +	    return op; +        } +     	static void * ptr_free_allocate(size_t n) +        { +	    size_t nwords = GC_round_up(n); +	    void ** flh; +	    void * op; + +  	    if (n > GC_max_fast_bytes) return GC_malloc_atomic(n); +	    flh = GC_aobjfreelist_ptr + nwords; +	    if (0 == (op = *flh)) { +		return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE); +	    } +	    *flh = GC_obj_link(op); +	    GC_aux::GC_words_recently_allocd += nwords; +	    return op; +        } +	static void deallocate(void *p, size_t n) +	{ +            size_t nwords = GC_round_up(n); +            void ** flh; +	    +	    if (n > GC_max_fast_bytes)  { +		GC_free(p); +	    } else { +	        flh = GC_objfreelist_ptr + nwords; +	        GC_obj_link(p) = *flh; +		memset((char *)p + GC_bytes_per_word, 0, +		       GC_bytes_per_word * (nwords - 1)); +	        *flh = p; +	        GC_aux::GC_mem_recently_freed += nwords; +	    } +	} +	static void ptr_free_deallocate(void *p, size_t n) +	{ +            size_t nwords = GC_round_up(n); +            void ** flh; +	    +	    if (n > GC_max_fast_bytes) { +		GC_free(p); +	    } else { +	    	flh = GC_aobjfreelist_ptr + nwords; +	    	GC_obj_link(p) = *flh; +	    	*flh = p; +	    	GC_aux::GC_mem_recently_freed += nwords; +	    } +	} +}; + +typedef single_client_gc_alloc_template<0> single_client_gc_alloc; + +// Once more, for uncollectable objects. +template <int dummy> +class single_client_traceable_alloc_template { +    public: +     	static void * allocate(size_t n) +        { +	    size_t nwords = GC_round_up_uncollectable(n); +	    void ** flh; +	    void * op; + +  	    if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n); +	    flh = GC_uobjfreelist_ptr + nwords; +	    if (0 == (op = *flh)) { +		return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE); +	    } +	    *flh = GC_obj_link(op); +	    GC_aux::GC_uncollectable_words_recently_allocd += nwords; +	    return op; +        } +     	static void * ptr_free_allocate(size_t n) +        { +	    size_t nwords = GC_round_up_uncollectable(n); +	    void ** flh; +	    void * op; + +  	    if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n); +	    flh = GC_auobjfreelist_ptr + nwords; +	    if (0 == (op = *flh)) { +		return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE); +	    } +	    *flh = GC_obj_link(op); +	    GC_aux::GC_uncollectable_words_recently_allocd += nwords; +	    return op; +        } +	static void deallocate(void *p, size_t n) +	{ +            size_t nwords = GC_round_up_uncollectable(n); +            void ** flh; +	    +	    if (n > GC_max_fast_bytes)  { +		GC_free(p); +	    } else { +	        flh = GC_uobjfreelist_ptr + nwords; +	        GC_obj_link(p) = *flh; +	        *flh = p; +	        GC_aux::GC_uncollectable_mem_recently_freed += nwords; +	    } +	} +	static void ptr_free_deallocate(void *p, size_t n) +	{ +            size_t nwords = GC_round_up_uncollectable(n); +            void ** flh; +	    +	    if (n > GC_max_fast_bytes) { +		GC_free(p); +	    } else { +	    	flh = GC_auobjfreelist_ptr + nwords; +	    	GC_obj_link(p) = *flh; +	    	*flh = p; +	    	GC_aux::GC_uncollectable_mem_recently_freed += nwords; +	    } +	} +}; + +typedef single_client_traceable_alloc_template<0> single_client_traceable_alloc; + +template < int dummy > +class gc_alloc_template { +    public: +     	static void * allocate(size_t n) { return GC_malloc(n); } +     	static void * ptr_free_allocate(size_t n) +		{ return GC_malloc_atomic(n); } +	static void deallocate(void *, size_t) { } +	static void ptr_free_deallocate(void *, size_t) { } +}; + +typedef gc_alloc_template < 0 > gc_alloc; + +template < int dummy > +class traceable_alloc_template { +    public: +     	static void * allocate(size_t n) { return GC_malloc_uncollectable(n); } +     	static void * ptr_free_allocate(size_t n) +		{ return GC_malloc_atomic_uncollectable(n); } +	static void deallocate(void *p, size_t) { GC_free(p); } +	static void ptr_free_deallocate(void *p, size_t) { GC_free(p); } +}; + +typedef traceable_alloc_template < 0 > traceable_alloc; + +#ifdef _SGI_SOURCE + +// We want to specialize simple_alloc so that it does the right thing +// for all pointerfree types.  At the moment there is no portable way to +// even approximate that.  The following approximation should work for +// SGI compilers, and perhaps some others. + +# define __GC_SPECIALIZE(T,alloc) \ +class simple_alloc<T, alloc> { \ +public: \ +    static T *allocate(size_t n) \ +	{ return 0 == n? 0 : \ +			 (T*) alloc::ptr_free_allocate(n * sizeof (T)); } \ +    static T *allocate(void) \ +	{ return (T*) alloc::ptr_free_allocate(sizeof (T)); } \ +    static void deallocate(T *p, size_t n) \ +	{ if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \ +    static void deallocate(T *p) \ +	{ alloc::ptr_free_deallocate(p, sizeof (T)); } \ +}; + +__GC_SPECIALIZE(char, gc_alloc) +__GC_SPECIALIZE(int, gc_alloc) +__GC_SPECIALIZE(unsigned, gc_alloc) +__GC_SPECIALIZE(float, gc_alloc) +__GC_SPECIALIZE(double, gc_alloc) + +__GC_SPECIALIZE(char, traceable_alloc) +__GC_SPECIALIZE(int, traceable_alloc) +__GC_SPECIALIZE(unsigned, traceable_alloc) +__GC_SPECIALIZE(float, traceable_alloc) +__GC_SPECIALIZE(double, traceable_alloc) + +__GC_SPECIALIZE(char, single_client_gc_alloc) +__GC_SPECIALIZE(int, single_client_gc_alloc) +__GC_SPECIALIZE(unsigned, single_client_gc_alloc) +__GC_SPECIALIZE(float, single_client_gc_alloc) +__GC_SPECIALIZE(double, single_client_gc_alloc) + +__GC_SPECIALIZE(char, single_client_traceable_alloc) +__GC_SPECIALIZE(int, single_client_traceable_alloc) +__GC_SPECIALIZE(unsigned, single_client_traceable_alloc) +__GC_SPECIALIZE(float, single_client_traceable_alloc) +__GC_SPECIALIZE(double, single_client_traceable_alloc) + +#ifdef __STL_USE_STD_ALLOCATORS + +__STL_BEGIN_NAMESPACE + +template <class _T> +struct _Alloc_traits<_T, gc_alloc > +{ +  static const bool _S_instanceless = true; +  typedef simple_alloc<_T, gc_alloc > _Alloc_type; +  typedef __allocator<_T, gc_alloc > allocator_type; +}; + +inline bool operator==(const gc_alloc&, +                       const gc_alloc&) +{ +  return true; +} + +inline bool operator!=(const gc_alloc&, +                       const gc_alloc&) +{ +  return false; +} + +template <class _T> +struct _Alloc_traits<_T, single_client_gc_alloc > +{ +  static const bool _S_instanceless = true; +  typedef simple_alloc<_T, single_client_gc_alloc > _Alloc_type; +  typedef __allocator<_T, single_client_gc_alloc > allocator_type; +}; + +inline bool operator==(const single_client_gc_alloc&, +                       const single_client_gc_alloc&) +{ +  return true; +} + +inline bool operator!=(const single_client_gc_alloc&, +                       const single_client_gc_alloc&) +{ +  return false; +} + +template <class _T> +struct _Alloc_traits<_T, traceable_alloc > +{ +  static const bool _S_instanceless = true; +  typedef simple_alloc<_T, traceable_alloc > _Alloc_type; +  typedef __allocator<_T, traceable_alloc > allocator_type; +}; + +inline bool operator==(const traceable_alloc&, +                       const traceable_alloc&) +{ +  return true; +} + +inline bool operator!=(const traceable_alloc&, +                       const traceable_alloc&) +{ +  return false; +} + +template <class _T> +struct _Alloc_traits<_T, single_client_traceable_alloc > +{ +  static const bool _S_instanceless = true; +  typedef simple_alloc<_T, single_client_traceable_alloc > _Alloc_type; +  typedef __allocator<_T, single_client_traceable_alloc > allocator_type; +}; + +inline bool operator==(const single_client_traceable_alloc&, +                       const single_client_traceable_alloc&) +{ +  return true; +} + +inline bool operator!=(const single_client_traceable_alloc&, +                       const single_client_traceable_alloc&) +{ +  return false; +} + +__STL_END_NAMESPACE + +#endif /* __STL_USE_STD_ALLOCATORS */ + +#endif /* _SGI_SOURCE */ + +#endif /* GC_ALLOC_H */ diff --git a/gc/include/private/cord_pos.h b/gc/include/private/cord_pos.h new file mode 100644 index 0000000..d2b24bb --- /dev/null +++ b/gc/include/private/cord_pos.h @@ -0,0 +1,118 @@ +/*  + * Copyright (c) 1993-1994 by Xerox Corporation.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ +/* Boehm, May 19, 1994 2:23 pm PDT */ +# ifndef CORD_POSITION_H + +/* The representation of CORD_position.  This is private to the	*/ +/* implementation, but the size is known to clients.  Also	*/ +/* the implementation of some exported macros relies on it.	*/ +/* Don't use anything defined here and not in cord.h.		*/ + +# define MAX_DEPTH 48 +	/* The maximum depth of a balanced cord + 1.		*/ +	/* We don't let cords get deeper than MAX_DEPTH.	*/ + +struct CORD_pe { +    CORD pe_cord; +    size_t pe_start_pos; +}; + +/* A structure describing an entry on the path from the root 	*/ +/* to current position.						*/ +typedef struct CORD_Pos { +    size_t cur_pos; +    int path_len; +#	define CORD_POS_INVALID (0x55555555) +		/* path_len == INVALID <==> position invalid */ +    const char *cur_leaf;	/* Current leaf, if it is a string.	*/ +    				/* If the current leaf is a function,	*/ +    				/* then this may point to function_buf	*/ +    				/* containing the next few characters.	*/ +    				/* Always points to a valid string	*/ +    				/* containing the current character 	*/ +    				/* unless cur_end is 0.			*/ +    size_t cur_start;	/* Start position of cur_leaf	*/ +    size_t cur_end;	/* Ending position of cur_leaf	*/ +    			/* 0 if cur_leaf is invalid.	*/ +    struct CORD_pe path[MAX_DEPTH + 1]; +    	/* path[path_len] is the leaf corresponding to cur_pos	*/ +    	/* path[0].pe_cord is the cord we point to.		*/ +#   define FUNCTION_BUF_SZ 8 +    char function_buf[FUNCTION_BUF_SZ];	/* Space for next few chars	*/ +    					/* from function node.		*/ +} CORD_pos[1]; + +/* Extract the cord from a position:	*/ +CORD CORD_pos_to_cord(CORD_pos p); +	 +/* Extract the current index from a position:	*/ +size_t CORD_pos_to_index(CORD_pos p); +	 +/* Fetch the character located at the given position:	*/ +char CORD_pos_fetch(CORD_pos p); +	 +/* Initialize the position to refer to the give cord and index.	*/ +/* Note that this is the most expensive function on positions:	*/ +void CORD_set_pos(CORD_pos p, CORD x, size_t i); +	 +/* Advance the position to the next character.	*/ +/* P must be initialized and valid.		*/ +/* Invalidates p if past end:			*/ +void CORD_next(CORD_pos p); + +/* Move the position to the preceding character.	*/ +/* P must be initialized and valid.			*/ +/* Invalidates p if past beginning:			*/ +void CORD_prev(CORD_pos p); +	 +/* Is the position valid, i.e. inside the cord?		*/ +int CORD_pos_valid(CORD_pos p); + +char CORD__pos_fetch(CORD_pos); +void CORD__next(CORD_pos); +void CORD__prev(CORD_pos); + +#define CORD_pos_fetch(p)	\ +    (((p)[0].cur_end != 0)? \ +     	(p)[0].cur_leaf[(p)[0].cur_pos - (p)[0].cur_start] \ +     	: CORD__pos_fetch(p)) + +#define CORD_next(p)	\ +    (((p)[0].cur_pos + 1 < (p)[0].cur_end)? \ +    	(p)[0].cur_pos++ \ +    	: (CORD__next(p), 0)) + +#define CORD_prev(p)	\ +    (((p)[0].cur_end != 0 && (p)[0].cur_pos > (p)[0].cur_start)? \ +    	(p)[0].cur_pos-- \ +    	: (CORD__prev(p), 0)) + +#define CORD_pos_to_index(p) ((p)[0].cur_pos) + +#define CORD_pos_to_cord(p) ((p)[0].path[0].pe_cord) + +#define CORD_pos_valid(p) ((p)[0].path_len != CORD_POS_INVALID) + +/* Some grubby stuff for performance-critical friends:	*/ +#define CORD_pos_chars_left(p) ((long)((p)[0].cur_end) - (long)((p)[0].cur_pos)) +	/* Number of characters in cache.  <= 0 ==> none	*/ + +#define CORD_pos_advance(p,n) ((p)[0].cur_pos += (n) - 1, CORD_next(p)) +	/* Advance position by n characters	*/ +	/* 0 < n < CORD_pos_chars_left(p)	*/ + +#define CORD_pos_cur_char_addr(p) \ +	(p)[0].cur_leaf + ((p)[0].cur_pos - (p)[0].cur_start) +	/* address of current character in cache.	*/ + +#endif diff --git a/gc/include/private/gc_hdrs.h b/gc/include/private/gc_hdrs.h new file mode 100644 index 0000000..60dc2ad --- /dev/null +++ b/gc/include/private/gc_hdrs.h @@ -0,0 +1,135 @@ +/*  + * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers + * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ +/* Boehm, July 11, 1995 11:54 am PDT */ +# ifndef GC_HEADERS_H +# define GC_HEADERS_H +typedef struct hblkhdr hdr; + +# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36 +	--> Get a real machine. +# endif + +/* + * The 2 level tree data structure that is used to find block headers. + * If there are more than 32 bits in a pointer, the top level is a hash + * table. + */ + +# if CPP_WORDSZ > 32 +#   define HASH_TL +# endif + +/* Define appropriate out-degrees for each of the two tree levels	*/ +# ifdef SMALL_CONFIG +#   define LOG_BOTTOM_SZ 11 +	/* Keep top index size reasonable with smaller blocks. */ +# else +#   define LOG_BOTTOM_SZ 10 +# endif +# ifndef HASH_TL +#   define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE) +# else +#   define LOG_TOP_SZ 11 +# endif +# define TOP_SZ (1 << LOG_TOP_SZ) +# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ) + +typedef struct bi { +    hdr * index[BOTTOM_SZ]; +	/* + 	 * The bottom level index contains one of three kinds of values: +	 * 0 means we're not responsible for this block, +	 *   or this is a block other than the first one in a free block. +	 * 1 < (long)X <= MAX_JUMP means the block starts at least +	 *        X * HBLKSIZE bytes before the current address. +	 * A valid pointer points to a hdr structure. (The above can't be +	 * valid pointers due to the GET_MEM return convention.) +	 */ +    struct bi * asc_link;	/* All indices are linked in	*/ +    				/* ascending order...		*/ +    struct bi * desc_link;	/* ... and in descending order.	*/ +    word key;			/* high order address bits.	*/ +# ifdef HASH_TL +    struct bi * hash_link;	/* Hash chain link.		*/ +# endif +} bottom_index; + +/* extern bottom_index GC_all_nils; - really part of GC_arrays */ + +/* extern bottom_index * GC_top_index []; - really part of GC_arrays */ +				/* Each entry points to a bottom_index.	*/ +				/* On a 32 bit machine, it points to 	*/ +				/* the index for a set of high order	*/ +				/* bits equal to the index.  For longer	*/ +				/* addresses, we hash the high order	*/ +				/* bits to compute the index in 	*/ +				/* GC_top_index, and each entry points	*/ +				/* to a hash chain.			*/ +				/* The last entry in each chain is	*/ +				/* GC_all_nils.				*/ + + +# define MAX_JUMP (HBLKSIZE - 1) + +# define HDR_FROM_BI(bi, p) \ +		((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)]) +# ifndef HASH_TL +#   define BI(p) (GC_top_index \ +		[(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)]) +#   define HDR_INNER(p) HDR_FROM_BI(BI(p),p) +#   ifdef SMALL_CONFIG +#	define HDR(p) GC_find_header((ptr_t)(p)) +#   else +#	define HDR(p) HDR_INNER(p) +#   endif +#   define GET_BI(p, bottom_indx) (bottom_indx) = BI(p) +#   define GET_HDR(p, hhdr) (hhdr) = HDR(p) +#   define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr) +#   define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p)) +# else /* hash */ +/*  Hash function for tree top level */ +#   define TL_HASH(hi) ((hi) & (TOP_SZ - 1)) +/*  Set bottom_indx to point to the bottom index for address p */ +#   define GET_BI(p, bottom_indx) \ +	{ \ +	    register word hi = \ +	        (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \ +	    register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \ +	    \ +	    while (_bi -> key != hi && _bi != GC_all_nils) \ +	    	_bi = _bi -> hash_link; \ +	    (bottom_indx) = _bi; \ +	} +#   define GET_HDR_ADDR(p, ha) \ +	{ \ +	    register bottom_index * bi; \ +	    \ +	    GET_BI(p, bi);	\ +	    (ha) = &(HDR_FROM_BI(bi, p)); \ +	} +#   define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \ +			      (hhdr) = *_ha; } +#   define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \ +			      *_ha = (hhdr); } +#   define HDR(p) GC_find_header((ptr_t)(p)) +# endif +			     +/* Is the result a forwarding address to someplace closer to the	*/ +/* beginning of the block or NIL?					*/ +# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((unsigned long) (hhdr) <= MAX_JUMP) + +/* Get an HBLKSIZE aligned address closer to the beginning of the block */ +/* h.  Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr).		*/ +# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (unsigned long)(hhdr)) +# endif /*  GC_HEADERS_H */ diff --git a/gc/include/private/gc_priv.h b/gc/include/private/gc_priv.h new file mode 100644 index 0000000..5ce52a7 --- /dev/null +++ b/gc/include/private/gc_priv.h @@ -0,0 +1,1748 @@ +/*  + * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers + * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ +/* Boehm, February 16, 1996 2:30 pm PST */ +  + +# ifndef GC_PRIVATE_H +# define GC_PRIVATE_H + +#if defined(mips) && defined(SYSTYPE_BSD) && defined(sony_news) +    /* sony RISC NEWS, NEWSOS 4 */ +#   define BSD_TIME +/*    typedef long ptrdiff_t;   -- necessary on some really old systems	*/ +#endif + +#if defined(mips) && defined(SYSTYPE_BSD43) +    /* MIPS RISCOS 4 */ +#   define BSD_TIME +#endif + +#ifdef BSD_TIME +#   include <sys/types.h> +#   include <sys/time.h> +#   include <sys/resource.h> +#endif /* BSD_TIME */ + +# ifndef GC_H +#   include "gc.h" +# endif + +typedef GC_word word; +typedef GC_signed_word signed_word; + +# ifndef CONFIG_H +#   include "gcconfig.h" +# endif + +# ifndef HEADERS_H +#   include "gc_hdrs.h" +# endif + +typedef int GC_bool; +# define TRUE 1 +# define FALSE 0 + +typedef char * ptr_t;	/* A generic pointer to which we can add	*/ +			/* byte displacements.				*/ +			/* Preferably identical to caddr_t, if it 	*/ +			/* exists.					*/ +			 +#if defined(__STDC__) +#   include <stdlib.h> +#   if !(defined( sony_news ) ) +#       include <stddef.h> +#   endif +#   define VOLATILE volatile +#   define CONST const +#else +#   ifdef MSWIN32 +#   	include <stdlib.h> +#   endif +#   define VOLATILE +#   define CONST +#endif + +#if 0 /* was once defined for AMIGA */ +#   define GC_FAR __far +#else +#   define GC_FAR +#endif + +/*********************************/ +/*                               */ +/* Definitions for conservative  */ +/* collector                     */ +/*                               */ +/*********************************/ + +/*********************************/ +/*                               */ +/* Easily changeable parameters  */ +/*                               */ +/*********************************/ + +#define STUBBORN_ALLOC	/* Define stubborn allocation primitives	*/ +#if defined(SRC_M3) || defined(SMALL_CONFIG) +# undef STUBBORN_ALLOC +#endif + + +/* #define ALL_INTERIOR_POINTERS */ +		    /* Forces all pointers into the interior of an 	*/ +		    /* object to be considered valid.  Also causes the	*/ +		    /* sizes of all objects to be inflated by at least 	*/ +		    /* one byte.  This should suffice to guarantee	*/ +		    /* that in the presence of a compiler that does	*/ +		    /* not perform garbage-collector-unsafe		*/ +		    /* optimizations, all portable, strictly ANSI	*/ +		    /* conforming C programs should be safely usable	*/ +		    /* with malloc replaced by GC_malloc and free	*/ +		    /* calls removed.  There are several disadvantages: */ +		    /* 1. There are probably no interesting, portable,	*/ +		    /*    strictly ANSI	conforming C programs.		*/ +		    /* 2. This option makes it hard for the collector	*/ +		    /*    to allocate space that is not ``pointed to''  */ +		    /*    by integers, etc.  Under SunOS 4.X with a 	*/ +		    /*    statically linked libc, we empiricaly		*/ +		    /*    observed that it would be difficult to 	*/ +		    /*	  allocate individual objects larger than 100K.	*/ +		    /* 	  Even if only smaller objects are allocated,	*/ +		    /*    more swap space is likely to be needed.       */ +		    /*    Fortunately, much of this will never be	*/ +		    /*    touched.					*/ +		    /* If you can easily avoid using this option, do.	*/ +		    /* If not, try to keep individual objects small.	*/ +		     +#define PRINTSTATS  /* Print garbage collection statistics          	*/ +		    /* For less verbose output, undefine in reclaim.c 	*/ + +#define PRINTTIMES  /* Print the amount of time consumed by each garbage   */ +		    /* collection.                                         */ + +#define PRINTBLOCKS /* Print object sizes associated with heap blocks,     */ +		    /* whether the objects are atomic or composite, and    */ +		    /* whether or not the block was found to be empty      */ +		    /* during the reclaim phase.  Typically generates       */ +		    /* about one screenful per garbage collection.         */ +#undef PRINTBLOCKS + +#ifdef SILENT +#  ifdef PRINTSTATS +#    undef PRINTSTATS +#  endif +#  ifdef PRINTTIMES +#    undef PRINTTIMES +#  endif +#  ifdef PRINTNBLOCKS +#    undef PRINTNBLOCKS +#  endif +#endif + +#if defined(PRINTSTATS) && !defined(GATHERSTATS) +#   define GATHERSTATS +#endif + +#ifdef FINALIZE_ON_DEMAND +#   define GC_INVOKE_FINALIZERS() +#else +#   define GC_INVOKE_FINALIZERS() (void)GC_invoke_finalizers() +#endif + +#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */ +		    /* free lists are actually maintained.  This applies  */ +		    /* only to the top level routines in misc.c, not to   */ +		    /* user generated code that calls GC_allocobj and     */ +		    /* GC_allocaobj directly.                             */ +		    /* Slows down average programs slightly.  May however */ +		    /* substantially reduce fragmentation if allocation   */ +		    /* request sizes are widely scattered.                */ +		    /* May save significant amounts of space for obj_map  */ +		    /* entries.						  */ + +#ifndef OLD_BLOCK_ALLOC +   /* Macros controlling large block allocation strategy.	*/ +#  define EXACT_FIRST  	/* Make a complete pass through the large object */ +			/* free list before splitting a block		 */ +#  define PRESERVE_LAST /* Do not divide last allocated heap segment	 */ +			/* unless we would otherwise need to expand the	 */ +			/* heap.					 */ +#endif + +/* ALIGN_DOUBLE requires MERGE_SIZES at present. */ +# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES) +#   define MERGE_SIZES +# endif + +#if defined(ALL_INTERIOR_POINTERS) && !defined(DONT_ADD_BYTE_AT_END) +# define ADD_BYTE_AT_END +#endif + + +# ifndef LARGE_CONFIG +#   define MINHINCR 16	/* Minimum heap increment, in blocks of HBLKSIZE  */ +			/* Must be multiple of largest page size.	  */ +#   define MAXHINCR 512	/* Maximum heap increment, in blocks              */ +# else +#   define MINHINCR 64 +#   define MAXHINCR 4096 +# endif + +# define TIME_LIMIT 50	   /* We try to keep pause times from exceeding	 */ +			   /* this by much. In milliseconds.		 */ + +# define BL_LIMIT GC_black_list_spacing +			   /* If we need a block of N bytes, and we have */ +			   /* a block of N + BL_LIMIT bytes available, 	 */ +			   /* and N > BL_LIMIT,				 */ +			   /* but all possible positions in it are 	 */ +			   /* blacklisted, we just use it anyway (and	 */ +			   /* print a warning, if warnings are enabled). */ +			   /* This risks subsequently leaking the block	 */ +			   /* due to a false reference.  But not using	 */ +			   /* the block risks unreasonable immediate	 */ +			   /* heap growth.				 */ + +/*********************************/ +/*                               */ +/* Stack saving for debugging	 */ +/*                               */ +/*********************************/ + +#ifdef SAVE_CALL_CHAIN + +/* + * Number of frames and arguments to save in objects allocated by + * debugging allocator. + */ +#   define NFRAMES 6	/* Number of frames to save. Even for		*/ +			/* alignment reasons.				*/ +#   define NARGS 2	/* Mumber of arguments to save for each call.	*/ + +#   define NEED_CALLINFO + +/* Fill in the pc and argument information for up to NFRAMES of my	*/ +/* callers.  Ignore my frame and my callers frame.			*/ +void GC_save_callers (/* struct callinfo info[NFRAMES] */); + +void GC_print_callers (/* struct callinfo info[NFRAMES] */); + +#else + +# ifdef GC_ADD_CALLER +#   define NFRAMES 1 +#   define NARGS 0 +#   define NEED_CALLINFO +# endif + +#endif + +#ifdef NEED_CALLINFO +    struct callinfo { +	word ci_pc; +#	if NARGS > 0 +	    word ci_arg[NARGS];	/* bit-wise complement to avoid retention */ +#	endif +#	if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1 +	    /* Likely alignment problem. */ +	    word ci_dummy; +#	endif +    }; +#endif + + +/*********************************/ +/*                               */ +/* OS interface routines	 */ +/*                               */ +/*********************************/ + +#ifdef BSD_TIME +#   undef CLOCK_TYPE +#   undef GET_TIME +#   undef MS_TIME_DIFF +#   define CLOCK_TYPE struct timeval +#   define GET_TIME(x) { struct rusage rusage; \ +			 getrusage (RUSAGE_SELF,  &rusage); \ +			 x = rusage.ru_utime; } +#   define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \ +                               + (double) (a.tv_usec - b.tv_usec) / 1000.0) +#else /* !BSD_TIME */ +#   include <time.h> +#   if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4) +      clock_t clock();	/* Not in time.h, where it belongs	*/ +#   endif +#   if defined(FREEBSD) && !defined(CLOCKS_PER_SEC) +#     include <machine/limits.h> +#     define CLOCKS_PER_SEC CLK_TCK +#   endif +#   if !defined(CLOCKS_PER_SEC) +#     define CLOCKS_PER_SEC 1000000 +/* + * This is technically a bug in the implementation.  ANSI requires that + * CLOCKS_PER_SEC be defined.  But at least under SunOS4.1.1, it isn't. + * Also note that the combination of ANSI C and POSIX is incredibly gross + * here. The type clock_t is used by both clock() and times().  But on + * some machines these use different notions of a clock tick,  CLOCKS_PER_SEC + * seems to apply only to clock.  Hence we use it here.  On many machines, + * including SunOS, clock actually uses units of microseconds (which are + * not really clock ticks). + */ +#   endif +#   define CLOCK_TYPE clock_t +#   define GET_TIME(x) x = clock() +#   define MS_TIME_DIFF(a,b) ((unsigned long) \ +		(1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC)) +#endif /* !BSD_TIME */ + +/* We use bzero and bcopy internally.  They may not be available.	*/ +# if defined(SPARC) && defined(SUNOS4) +#   define BCOPY_EXISTS +# endif +# if defined(M68K) && defined(AMIGA) +#   define BCOPY_EXISTS +# endif +# if defined(M68K) && defined(NEXT) +#   define BCOPY_EXISTS +# endif +# if defined(VAX) +#   define BCOPY_EXISTS +# endif +# if defined(AMIGA) +#   include <string.h> +#   define BCOPY_EXISTS +# endif + +# ifndef BCOPY_EXISTS +#   include <string.h> +#   define BCOPY(x,y,n) memcpy(y, x, (size_t)(n)) +#   define BZERO(x,n)  memset(x, 0, (size_t)(n)) +# else +#   define BCOPY(x,y,n) bcopy((char *)(x),(char *)(y),(int)(n)) +#   define BZERO(x,n) bzero((char *)(x),(int)(n)) +# endif + +/* HBLKSIZE aligned allocation.  0 is taken to mean failure 	*/ +/* space is assumed to be cleared.				*/ +/* In the case os USE_MMAP, the argument must also be a 	*/ +/* physical page size.						*/ +/* GET_MEM is currently not assumed to retrieve 0 filled space, */ +/* though we should perhaps take advantage of the case in which */ +/* does.							*/ +# ifdef PCR +    char * real_malloc(); +#   define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \ +				  + GC_page_size-1) +# else +#   ifdef OS2 +      void * os2_alloc(size_t bytes); +#     define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes \ +				    + GC_page_size) \ +                                    + GC_page_size-1) +#   else +#     if defined(AMIGA) || defined(NEXT) || defined(MACOSX) || defined(DOS4GW) +#       define GET_MEM(bytes) HBLKPTR((size_t) \ +				      calloc(1, (size_t)bytes + GC_page_size) \ +                                      + GC_page_size-1) +#     else +#	ifdef MSWIN32 +          extern ptr_t GC_win32_get_mem(); +#         define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes) +#	else +#	  ifdef MACOS +#	    if defined(USE_TEMPORARY_MEMORY) +		extern Ptr GC_MacTemporaryNewPtr(size_t size, +						 Boolean clearMemory); +#               define GET_MEM(bytes) HBLKPTR( \ +		    GC_MacTemporaryNewPtr(bytes + GC_page_size, true) \ +		    + GC_page_size-1) +#	    else +#         	    define GET_MEM(bytes) HBLKPTR( \ +			NewPtrClear(bytes + GC_page_size) + GC_page_size-1) +#	    endif +#	  else +              extern ptr_t GC_unix_get_mem(); +#             define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes) +#	  endif +#	endif +#     endif +#   endif +# endif + +/* + * Mutual exclusion between allocator/collector routines. + * Needed if there is more than one allocator thread. + * FASTLOCK() is assumed to try to acquire the lock in a cheap and + * dirty way that is acceptable for a few instructions, e.g. by + * inhibiting preemption.  This is assumed to have succeeded only + * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE. + * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED(). + * If signals cannot be tolerated with the FASTLOCK held, then + * FASTLOCK should disable signals.  The code executed under + * FASTLOCK is otherwise immune to interruption, provided it is + * not restarted. + * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK + * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK. + * (There is currently no equivalent for FASTLOCK.) + */   +# ifdef THREADS +#  ifdef PCR_OBSOLETE	/* Faster, but broken with multiple lwp's	*/ +#    include  "th/PCR_Th.h" +#    include  "th/PCR_ThCrSec.h" +     extern struct PCR_Th_MLRep GC_allocate_ml; +#    define DCL_LOCK_STATE  PCR_sigset_t GC_old_sig_mask +#    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)  +#    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml) +#    define FASTLOCK() PCR_ThCrSec_EnterSys() +     /* Here we cheat (a lot): */ +#        define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0) +		/* TRUE if nobody currently holds the lock */ +#    define FASTUNLOCK() PCR_ThCrSec_ExitSys() +#  endif +#  ifdef PCR +#    include <base/PCR_Base.h> +#    include <th/PCR_Th.h> +     extern PCR_Th_ML GC_allocate_ml; +#    define DCL_LOCK_STATE \ +	 PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask +#    define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml) +#    define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml) +#    define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml)) +#    define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay) +#    define FASTUNLOCK()  {\ +        if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); } +#  endif +#  ifdef SRC_M3 +     extern word RT0u__inCritical; +#    define LOCK() RT0u__inCritical++ +#    define UNLOCK() RT0u__inCritical-- +#  endif +#  ifdef SOLARIS_THREADS +#    include <thread.h> +#    include <signal.h> +     extern mutex_t GC_allocate_ml; +#    define LOCK() mutex_lock(&GC_allocate_ml); +#    define UNLOCK() mutex_unlock(&GC_allocate_ml); +#  endif +#  ifdef LINUX_THREADS +#    include <pthread.h> +#    ifdef __i386__ +       inline static int GC_test_and_set(volatile unsigned int *addr) { +	  int oldval; +	  /* Note: the "xchg" instruction does not need a "lock" prefix */ +	  __asm__ __volatile__("xchgl %0, %1" +		: "=r"(oldval), "=m"(*(addr)) +		: "0"(1), "m"(*(addr))); +	  return oldval; +       } +#    else +       -- > Need implementation of GC_test_and_set() +#    endif +#    define GC_clear(addr) (*(addr) = 0) + +     extern volatile unsigned int GC_allocate_lock; +	/* This is not a mutex because mutexes that obey the (optional)     */ +	/* POSIX scheduling rules are subject to convoys in high contention */ +	/* applications.  This is basically a spin lock.		    */ +     extern pthread_t GC_lock_holder; +     extern void GC_lock(void); +	/* Allocation lock holder.  Only set if acquired by client through */ +	/* GC_call_with_alloc_lock.					   */ +#    define SET_LOCK_HOLDER() GC_lock_holder = pthread_self() +#    define NO_THREAD (pthread_t)(-1) +#    define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD +#    define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self())) +#    ifdef UNDEFINED +#    	define LOCK() pthread_mutex_lock(&GC_allocate_ml) +#    	define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml) +#    else +#	define LOCK() \ +		{ if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); } +#	define UNLOCK() \ +		GC_clear(&GC_allocate_lock) +#    endif +     extern GC_bool GC_collecting; +#    define ENTER_GC() \ +		{ \ +		    GC_collecting = 1; \ +		} +#    define EXIT_GC() GC_collecting = 0; +#  endif /* LINUX_THREADS */ +#  if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS) +#    include <pthread.h> +#    include <mutex.h> + +#    if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \ +	|| !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700 +#        define GC_test_and_set(addr, v) test_and_set(addr,v) +#    else +#	 define GC_test_and_set(addr, v) __test_and_set(addr,v) +#    endif +     extern unsigned long GC_allocate_lock; +	/* This is not a mutex because mutexes that obey the (optional) 	*/ +	/* POSIX scheduling rules are subject to convoys in high contention	*/ +	/* applications.  This is basically a spin lock.			*/ +     extern pthread_t GC_lock_holder; +     extern void GC_lock(void); +	/* Allocation lock holder.  Only set if acquired by client through */ +	/* GC_call_with_alloc_lock.					   */ +#    define SET_LOCK_HOLDER() GC_lock_holder = pthread_self() +#    define NO_THREAD (pthread_t)(-1) +#    define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD +#    define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self())) +#    ifdef UNDEFINED +#    	define LOCK() pthread_mutex_lock(&GC_allocate_ml) +#    	define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml) +#    else +#	define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); } +#       if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) \ +	   && defined(_COMPILER_VERSION) && _COMPILER_VERSION >= 700 +#	    define UNLOCK() __lock_release(&GC_allocate_lock) +#	else +	    /* The function call in the following should prevent the	*/ +	    /* compiler from moving assignments to below the UNLOCK.	*/ +	    /* This is probably not necessary for ucode or gcc 2.8.	*/ +	    /* It may be necessary for Ragnarok and future gcc		*/ +	    /* versions.						*/ +#           define UNLOCK() { GC_noop1(&GC_allocate_lock); \ +			*(volatile unsigned long *)(&GC_allocate_lock) = 0; } +#	endif +#    endif +     extern GC_bool GC_collecting; +#    define ENTER_GC() \ +		{ \ +		    GC_collecting = 1; \ +		} +#    define EXIT_GC() GC_collecting = 0; +#  endif /* IRIX_THREADS || IRIX_JDK_THREADS */ +#  ifdef WIN32_THREADS +#    include <windows.h> +     GC_API CRITICAL_SECTION GC_allocate_ml; +#    define LOCK() EnterCriticalSection(&GC_allocate_ml); +#    define UNLOCK() LeaveCriticalSection(&GC_allocate_ml); +#  endif +#  ifndef SET_LOCK_HOLDER +#      define SET_LOCK_HOLDER() +#      define UNSET_LOCK_HOLDER() +#      define I_HOLD_LOCK() FALSE +		/* Used on platforms were locks can be reacquired,	*/ +		/* so it doesn't matter if we lie.			*/ +#  endif +# else +#    define LOCK() +#    define UNLOCK() +# endif +# ifndef SET_LOCK_HOLDER +#   define SET_LOCK_HOLDER() +#   define UNSET_LOCK_HOLDER() +#   define I_HOLD_LOCK() FALSE +		/* Used on platforms were locks can be reacquired,	*/ +		/* so it doesn't matter if we lie.			*/ +# endif +# ifndef ENTER_GC +#   define ENTER_GC() +#   define EXIT_GC() +# endif + +# ifndef DCL_LOCK_STATE +#   define DCL_LOCK_STATE +# endif +# ifndef FASTLOCK +#   define FASTLOCK() LOCK() +#   define FASTLOCK_SUCCEEDED() TRUE +#   define FASTUNLOCK() UNLOCK() +# endif + +/* Delay any interrupts or signals that may abort this thread.  Data	*/ +/* structures are in a consistent state outside this pair of calls.	*/ +/* ANSI C allows both to be empty (though the standard isn't very	*/ +/* clear on that point).  Standard malloc implementations are usually	*/ +/* neither interruptable nor thread-safe, and thus correspond to	*/ +/* empty definitions.							*/ +# ifdef PCR +#   define DISABLE_SIGNALS() \ +		 PCR_Th_SetSigMask(PCR_allSigsBlocked,&GC_old_sig_mask) +#   define ENABLE_SIGNALS() \ +		PCR_Th_SetSigMask(&GC_old_sig_mask, NIL) +# else +#   if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) \ +	|| defined(MSWIN32) || defined(MACOS) || defined(DJGPP) \ +	|| defined(NO_SIGNALS) || defined(IRIX_THREADS) \ +	|| defined(IRIX_JDK_THREADS) || defined(LINUX_THREADS)  +			/* Also useful for debugging.		*/ +	/* Should probably use thr_sigsetmask for SOLARIS_THREADS. */ +#     define DISABLE_SIGNALS() +#     define ENABLE_SIGNALS() +#   else +#     define DISABLE_SIGNALS() GC_disable_signals() +	void GC_disable_signals(); +#     define ENABLE_SIGNALS() GC_enable_signals() +	void GC_enable_signals(); +#   endif +# endif + +/* + * Stop and restart mutator threads. + */ +# ifdef PCR +#     include "th/PCR_ThCtl.h" +#     define STOP_WORLD() \ + 	PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \ + 				   PCR_allSigsBlocked, \ + 				   PCR_waitForever) +#     define START_WORLD() \ +	PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \ + 				   PCR_allSigsBlocked, \ + 				   PCR_waitForever); +# else +#   if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \ +	|| defined(IRIX_THREADS) || defined(LINUX_THREADS) \ +	|| defined(IRIX_JDK_THREADS) +      void GC_stop_world(); +      void GC_start_world(); +#     define STOP_WORLD() GC_stop_world() +#     define START_WORLD() GC_start_world() +#   else +#     define STOP_WORLD() +#     define START_WORLD() +#   endif +# endif + +/* Abandon ship */ +# ifdef PCR +#   define ABORT(s) PCR_Base_Panic(s) +# else +#   ifdef SMALL_CONFIG +#	define ABORT(msg) abort(); +#   else +	GC_API void GC_abort(); +#       define ABORT(msg) GC_abort(msg); +#   endif +# endif + +/* Exit abnormally, but without making a mess (e.g. out of memory) */ +# ifdef PCR +#   define EXIT() PCR_Base_Exit(1,PCR_waitForever) +# else +#   define EXIT() (void)exit(1) +# endif + +/* Print warning message, e.g. almost out of memory.	*/ +# define WARN(msg,arg) (*GC_current_warn_proc)(msg, (GC_word)(arg)) +extern GC_warn_proc GC_current_warn_proc; + +/*********************************/ +/*                               */ +/* Word-size-dependent defines   */ +/*                               */ +/*********************************/ + +#if CPP_WORDSZ == 32 +#  define WORDS_TO_BYTES(x)   ((x)<<2) +#  define BYTES_TO_WORDS(x)   ((x)>>2) +#  define LOGWL               ((word)5)    /* log[2] of CPP_WORDSZ */ +#  define modWORDSZ(n) ((n) & 0x1f)        /* n mod size of word	    */ +#  if ALIGNMENT != 4 +#	define UNALIGNED +#  endif +#endif + +#if CPP_WORDSZ == 64 +#  define WORDS_TO_BYTES(x)   ((x)<<3) +#  define BYTES_TO_WORDS(x)   ((x)>>3) +#  define LOGWL               ((word)6)    /* log[2] of CPP_WORDSZ */ +#  define modWORDSZ(n) ((n) & 0x3f)        /* n mod size of word	    */ +#  if ALIGNMENT != 8 +#	define UNALIGNED +#  endif +#endif + +#define WORDSZ ((word)CPP_WORDSZ) +#define SIGNB  ((word)1 << (WORDSZ-1)) +#define BYTES_PER_WORD      ((word)(sizeof (word))) +#define ONES                ((word)(-1)) +#define divWORDSZ(n) ((n) >> LOGWL)	   /* divide n by size of word      */ + +/*********************/ +/*                   */ +/*  Size Parameters  */ +/*                   */ +/*********************/ + +/*  heap block size, bytes. Should be power of 2 */ + +#ifndef HBLKSIZE +# ifdef SMALL_CONFIG +#   define CPP_LOG_HBLKSIZE 10 +# else +#   if CPP_WORDSZ == 32 +#     define CPP_LOG_HBLKSIZE 12 +#   else +#     define CPP_LOG_HBLKSIZE 13 +#   endif +# endif +#else +# if HBLKSIZE == 512 +#   define CPP_LOG_HBLKSIZE 9 +# endif +# if HBLKSIZE == 1024 +#   define CPP_LOG_HBLKSIZE 10 +# endif +# if HBLKSIZE == 2048 +#   define CPP_LOG_HBLKSIZE 11 +# endif +# if HBLKSIZE == 4096 +#   define CPP_LOG_HBLKSIZE 12 +# endif +# if HBLKSIZE == 8192 +#   define CPP_LOG_HBLKSIZE 13 +# endif +# if HBLKSIZE == 16384 +#   define CPP_LOG_HBLKSIZE 14 +# endif +# ifndef CPP_LOG_HBLKSIZE +    --> fix HBLKSIZE +# endif +# undef HBLKSIZE +#endif +# define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE) +# define LOG_HBLKSIZE   ((word)CPP_LOG_HBLKSIZE) +# define HBLKSIZE ((word)CPP_HBLKSIZE) + + +/*  max size objects supported by freelist (larger objects may be   */ +/*  allocated, but less efficiently)                                */ + +#define CPP_MAXOBJSZ    BYTES_TO_WORDS(CPP_HBLKSIZE/2) +#define MAXOBJSZ ((word)CPP_MAXOBJSZ) +		 +# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE) + +# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q) +	/* Equivalent to subtracting 2 hblk pointers.	*/ +	/* We do it this way because a compiler should	*/ +	/* find it hard to use an integer division	*/ +	/* instead of a shift.  The bundled SunOS 4.1	*/ +	/* o.w. sometimes pessimizes the subtraction to	*/ +	/* involve a call to .div.			*/ +  +# define modHBLKSZ(n) ((n) & (HBLKSIZE-1)) +  +# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1))) + +# define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1)) + +/* Round up byte allocation requests to integral number of words, etc. */ +# ifdef ADD_BYTE_AT_END +#   define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1)) +#   ifdef ALIGN_DOUBLE +#       define ALIGNED_WORDS(n) (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2)) & ~1) +#   else +#       define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n) +#   endif +#   define SMALL_OBJ(bytes) ((bytes) < WORDS_TO_BYTES(MAXOBJSZ)) +#   define ADD_SLOP(bytes) ((bytes)+1) +# else +#   define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1)) +#   ifdef ALIGN_DOUBLE +#       define ALIGNED_WORDS(n) \ +			(BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2) - 1) & ~1) +#   else +#       define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n) +#   endif +#   define SMALL_OBJ(bytes) ((bytes) <= WORDS_TO_BYTES(MAXOBJSZ)) +#   define ADD_SLOP(bytes) (bytes) +# endif + + +/* + * Hash table representation of sets of pages.  This assumes it is + * OK to add spurious entries to sets. + * Used by black-listing code, and perhaps by dirty bit maintenance code. + */ +  +# ifdef LARGE_CONFIG +#   define LOG_PHT_ENTRIES  17 +# else +#   define LOG_PHT_ENTRIES  14	/* Collisions are likely if heap grows	*/ +				/* to more than 16K hblks = 64MB.	*/ +				/* Each hash table occupies 2K bytes.   */ +# endif +# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES) +# define PHT_SIZE (PHT_ENTRIES >> LOGWL) +typedef word page_hash_table[PHT_SIZE]; + +# define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1)) + +# define get_pht_entry_from_index(bl, index) \ +		(((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1) +# define set_pht_entry_from_index(bl, index) \ +		(bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index) +# define clear_pht_entry_from_index(bl, index) \ +		(bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index)) +	 + + +/********************************************/ +/*                                          */ +/*    H e a p   B l o c k s                 */ +/*                                          */ +/********************************************/ + +/*  heap block header */ +#define HBLKMASK   (HBLKSIZE-1) + +#define BITS_PER_HBLK (HBLKSIZE * 8) + +#define MARK_BITS_PER_HBLK (BITS_PER_HBLK/CPP_WORDSZ) +	   /* upper bound                                    */ +	   /* We allocate 1 bit/word.  Only the first word   */ +	   /* in each object is actually marked.             */ + +# ifdef ALIGN_DOUBLE +#   define MARK_BITS_SZ (((MARK_BITS_PER_HBLK + 2*CPP_WORDSZ - 1) \ +			  / (2*CPP_WORDSZ))*2) +# else +#   define MARK_BITS_SZ ((MARK_BITS_PER_HBLK + CPP_WORDSZ - 1)/CPP_WORDSZ) +# endif +	   /* Upper bound on number of mark words per heap block  */ + +struct hblkhdr { +    word hb_sz;  /* If in use, size in words, of objects in the block. */ +		 /* if free, the size in bytes of the whole block      */ +    struct hblk * hb_next; 	/* Link field for hblk free list	 */ +    				/* and for lists of chunks waiting to be */ +    				/* reclaimed.				 */ +    struct hblk * hb_prev;	/* Backwards link for free list.	*/ +    word hb_descr;   		/* object descriptor for marking.  See	*/ +    				/* mark.h.				*/ +    char* hb_map;	/* A pointer to a pointer validity map of the block. */ +    		      	/* See GC_obj_map.				     */ +    		     	/* Valid for all blocks with headers.		     */ +    		     	/* Free blocks point to GC_invalid_map.		     */ +    unsigned char hb_obj_kind; +    			 /* Kind of objects in the block.  Each kind 	*/ +    			 /* identifies a mark procedure and a set of 	*/ +    			 /* list headers.  Sometimes called regions.	*/ +    unsigned char hb_flags; +#	define IGNORE_OFF_PAGE	1	/* Ignore pointers that do not	*/ +					/* point to the first page of 	*/ +					/* this object.			*/ +#	define WAS_UNMAPPED 2	/* This is a free block, which has	*/ +				/* been unmapped from the address 	*/ +				/* space.				*/ +				/* GC_remap must be invoked on it	*/ +				/* before it can be reallocated.	*/ +				/* Only set with USE_MUNMAP.		*/ +    unsigned short hb_last_reclaimed; +    				/* Value of GC_gc_no when block was	*/ +    				/* last allocated or swept. May wrap.   */ +				/* For a free block, this is maintained */ +				/* unly for USE_MUNMAP, and indicates	*/ +				/* when the header was allocated, or	*/ +				/* when the size of the block last	*/ +				/* changed.				*/ +    word hb_marks[MARK_BITS_SZ]; +			    /* Bit i in the array refers to the             */ +			    /* object starting at the ith word (header      */ +			    /* INCLUDED) in the heap block.                 */ +			    /* The lsb of word 0 is numbered 0.		    */ +}; + +/*  heap block body */ + +# define DISCARD_WORDS 0 +	/* Number of words to be dropped at the beginning of each block	*/ +	/* Must be a multiple of WORDSZ.  May reasonably be nonzero	*/ +	/* on machines that don't guarantee longword alignment of	*/ +	/* pointers, so that the number of false hits is minimized.	*/ +	/* 0 and WORDSZ are probably the only reasonable values.	*/ + +# define BODY_SZ ((HBLKSIZE-WORDS_TO_BYTES(DISCARD_WORDS))/sizeof(word)) + +struct hblk { +#   if (DISCARD_WORDS != 0) +        word garbage[DISCARD_WORDS]; +#   endif +    word hb_body[BODY_SZ]; +}; + +# define HDR_WORDS ((word)DISCARD_WORDS) +# define HDR_BYTES ((word)WORDS_TO_BYTES(DISCARD_WORDS)) + +# define OBJ_SZ_TO_BLOCKS(sz) \ +    divHBLKSZ(HDR_BYTES + WORDS_TO_BYTES(sz) + HBLKSIZE-1) +    /* Size of block (in units of HBLKSIZE) needed to hold objects of	*/ +    /* given sz (in words).						*/ + +/* Object free list link */ +# define obj_link(p) (*(ptr_t *)(p)) + +/* The type of mark procedures.  This really belongs in gc_mark.h.	*/ +/* But we put it here, so that we can avoid scanning the mark proc	*/ +/* table.								*/ +typedef struct ms_entry * (*mark_proc)(/* word * addr, mark_stack_ptr, +					  mark_stack_limit, env */); +# define LOG_MAX_MARK_PROCS 6 +# define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS) + +/* Root sets.  Logically private to mark_rts.c.  But we don't want the	*/ +/* tables scanned, so we put them here.					*/ +/* MAX_ROOT_SETS is the maximum number of ranges that can be 	*/ +/* registered as static roots. 					*/ +# ifdef LARGE_CONFIG +#   define MAX_ROOT_SETS 4096 +# else +#   ifdef PCR +#     define MAX_ROOT_SETS 1024 +#   else +#     ifdef MSWIN32 +#	define MAX_ROOT_SETS 512 +	    /* Under NT, we add only written pages, which can result 	*/ +	    /* in many small root sets.					*/ +#     else +#       define MAX_ROOT_SETS 64 +#     endif +#   endif +# endif + +# define MAX_EXCLUSIONS (MAX_ROOT_SETS/4) +/* Maximum number of segments that can be excluded from root sets.	*/ + +/* + * Data structure for excluded static roots. + */ +struct exclusion { +    ptr_t e_start; +    ptr_t e_end; +}; + +/* Data structure for list of root sets.				*/ +/* We keep a hash table, so that we can filter out duplicate additions.	*/ +/* Under Win32, we need to do a better job of filtering overlaps, so	*/ +/* we resort to sequential search, and pay the price.			*/ +struct roots { +	ptr_t r_start; +	ptr_t r_end; +#	ifndef MSWIN32 +	  struct roots * r_next; +#	endif +	GC_bool r_tmp; +	  	/* Delete before registering new dynamic libraries */ +}; + +#ifndef MSWIN32 +    /* Size of hash table index to roots.	*/ +#   define LOG_RT_SIZE 6 +#   define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */ +#endif + +/* Lists of all heap blocks and free lists	*/ +/* as well as other random data structures	*/ +/* that should not be scanned by the		*/ +/* collector.					*/ +/* These are grouped together in a struct	*/ +/* so that they can be easily skipped by the	*/ +/* GC_mark routine.				*/ +/* The ordering is weird to make GC_malloc	*/ +/* faster by keeping the important fields	*/ +/* sufficiently close together that a		*/ +/* single load of a base register will do.	*/ +/* Scalars that could easily appear to		*/ +/* be pointers are also put here.		*/ +/* The main fields should precede any 		*/ +/* conditionally included fields, so that	*/ +/* gc_inl.h will work even if a different set	*/ +/* of macros is defined when the client is	*/ +/* compiled.					*/ + +struct _GC_arrays { +  word _heapsize; +  word _max_heapsize; +  ptr_t _last_heap_addr; +  ptr_t _prev_heap_addr; +  word _large_free_bytes; +	/* Total bytes contained in blocks on large object free */ +	/* list.						*/ +  word _words_allocd_before_gc; +		/* Number of words allocated before this	*/ +		/* collection cycle.				*/ +  word _words_allocd; +  	/* Number of words allocated during this collection cycle */ +  word _words_wasted; +  	/* Number of words wasted due to internal fragmentation	*/ +  	/* in large objects, or due to dropping blacklisted     */ +	/* blocks, since last gc.  Approximate.                 */ +  word _words_finalized; +  	/* Approximate number of words in objects (and headers)	*/ +  	/* That became ready for finalization in the last 	*/ +  	/* collection.						*/ +  word _non_gc_bytes_at_gc; +  	/* Number of explicitly managed bytes of storage 	*/ +  	/* at last collection.					*/ +  word _mem_freed; +  	/* Number of explicitly deallocated words of memory	*/ +  	/* since last collection.				*/ +  mark_proc _mark_procs[MAX_MARK_PROCS]; +  	/* Table of user-defined mark procedures.  There is	*/ +	/* a small number of these, which can be referenced	*/ +	/* by DS_PROC mark descriptors.  See gc_mark.h.		*/ +  ptr_t _objfreelist[MAXOBJSZ+1]; +			  /* free list for objects */ +  ptr_t _aobjfreelist[MAXOBJSZ+1]; +			  /* free list for atomic objs 	*/ + +  ptr_t _uobjfreelist[MAXOBJSZ+1]; +			  /* uncollectable but traced objs 	*/ +			  /* objects on this and auobjfreelist  */ +			  /* are always marked, except during   */ +			  /* garbage collections.		*/ +# ifdef ATOMIC_UNCOLLECTABLE +    ptr_t _auobjfreelist[MAXOBJSZ+1]; +# endif +			  /* uncollectable but traced objs 	*/ + +# ifdef GATHERSTATS +    word _composite_in_use; +   		/* Number of words in accessible composite	*/ +		/* objects.					*/ +    word _atomic_in_use; +   		/* Number of words in accessible atomic		*/ +		/* objects.					*/ +# endif +# ifdef USE_MUNMAP +    word _unmapped_bytes; +# endif +# ifdef MERGE_SIZES +    unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)]; +    	/* Number of words to allocate for a given allocation request in */ +    	/* bytes.							 */ +# endif  + +# ifdef STUBBORN_ALLOC +    ptr_t _sobjfreelist[MAXOBJSZ+1]; +# endif +  			  /* free list for immutable objects	*/ +  ptr_t _obj_map[MAXOBJSZ+1]; +                       /* If not NIL, then a pointer to a map of valid  */ +    		       /* object addresses. _obj_map[sz][i] is j if the	*/ +    		       /* address block_start+i is a valid pointer      */ +    		       /* to an object at				*/ +    		       /* block_start+i&~3 - WORDS_TO_BYTES(j).		*/ +    		       /* (If ALL_INTERIOR_POINTERS is defined, then	*/ +    		       /* instead ((short *)(hb_map[sz])[i] is j if	*/ +    		       /* block_start+WORDS_TO_BYTES(i) is in the	*/ +    		       /* interior of an object starting at		*/ +    		       /* block_start+WORDS_TO_BYTES(i-j)).		*/ +    		       /* It is OBJ_INVALID if				*/ +    		       /* block_start+WORDS_TO_BYTES(i) is not		*/ +    		       /* valid as a pointer to an object.              */ +    		       /* We assume all values of j <= OBJ_INVALID.	*/ +    		       /* The zeroth entry corresponds to large objects.*/ +#   ifdef ALL_INTERIOR_POINTERS +#	define map_entry_type short +#       define OBJ_INVALID 0x7fff +#	define MAP_ENTRY(map, bytes) \ +		(((map_entry_type *)(map))[BYTES_TO_WORDS(bytes)]) +#	define MAP_ENTRIES BYTES_TO_WORDS(HBLKSIZE) +#	define MAP_SIZE (MAP_ENTRIES * sizeof(map_entry_type)) +#	define OFFSET_VALID(displ) TRUE +#	define CPP_MAX_OFFSET (HBLKSIZE - HDR_BYTES - 1) +#	define MAX_OFFSET ((word)CPP_MAX_OFFSET) +#   else +#	define map_entry_type char +#       define OBJ_INVALID 0x7f +#	define MAP_ENTRY(map, bytes) \ +		(map)[bytes] +#	define MAP_ENTRIES HBLKSIZE +#	define MAP_SIZE MAP_ENTRIES +#	define CPP_MAX_OFFSET (WORDS_TO_BYTES(OBJ_INVALID) - 1)	 +#	define MAX_OFFSET ((word)CPP_MAX_OFFSET) +# 	define VALID_OFFSET_SZ \ +	  (CPP_MAX_OFFSET > WORDS_TO_BYTES(CPP_MAXOBJSZ)? \ +	   CPP_MAX_OFFSET+1 \ +	   : WORDS_TO_BYTES(CPP_MAXOBJSZ)+1) +  	char _valid_offsets[VALID_OFFSET_SZ]; +				/* GC_valid_offsets[i] == TRUE ==> i 	*/ +				/* is registered as a displacement.	*/ +#	define OFFSET_VALID(displ) GC_valid_offsets[displ] +  	char _modws_valid_offsets[sizeof(word)]; +				/* GC_valid_offsets[i] ==>		  */ +				/* GC_modws_valid_offsets[i%sizeof(word)] */ +#   endif +# ifdef STUBBORN_ALLOC +    page_hash_table _changed_pages; +        /* Stubborn object pages that were changes since last call to	*/ +	/* GC_read_changed.						*/ +    page_hash_table _prev_changed_pages; +        /* Stubborn object pages that were changes before last call to	*/ +	/* GC_read_changed.						*/ +# endif +# if defined(PROC_VDB) || defined(MPROTECT_VDB) +    page_hash_table _grungy_pages; /* Pages that were dirty at last 	   */ +				     /* GC_read_dirty.			   */ +# endif +# ifdef MPROTECT_VDB +    VOLATILE page_hash_table _dirty_pages;	 +			/* Pages dirtied since last GC_read_dirty. */ +# endif +# ifdef PROC_VDB +    page_hash_table _written_pages;	/* Pages ever dirtied	*/ +# endif +# ifdef LARGE_CONFIG +#   if CPP_WORDSZ > 32 +#     define MAX_HEAP_SECTS 4096 	/* overflows at roughly 64 GB	   */ +#   else +#     define MAX_HEAP_SECTS 768		/* Separately added heap sections. */ +#   endif +# else +#   define MAX_HEAP_SECTS 256 +# endif +  struct HeapSect { +      ptr_t hs_start; word hs_bytes; +  } _heap_sects[MAX_HEAP_SECTS]; +# ifdef MSWIN32 +    ptr_t _heap_bases[MAX_HEAP_SECTS]; +    		/* Start address of memory regions obtained from kernel. */ +# endif +  struct roots _static_roots[MAX_ROOT_SETS]; +# ifndef MSWIN32 +    struct roots * _root_index[RT_SIZE]; +# endif +  struct exclusion _excl_table[MAX_EXCLUSIONS]; +  /* Block header index; see gc_headers.h */ +  bottom_index * _all_nils; +  bottom_index * _top_index [TOP_SZ]; +#ifdef SAVE_CALL_CHAIN +  struct callinfo _last_stack[NFRAMES];	/* Stack at last garbage collection.*/ +  					/* Useful for debugging	mysterious  */ +  					/* object disappearances.	    */ +  					/* In the multithreaded case, we    */ +  					/* currently only save the calling  */ +  					/* stack.			    */ +#endif +}; + +GC_API GC_FAR struct _GC_arrays GC_arrays;  + +# define GC_objfreelist GC_arrays._objfreelist +# define GC_aobjfreelist GC_arrays._aobjfreelist +# define GC_uobjfreelist GC_arrays._uobjfreelist +# ifdef ATOMIC_UNCOLLECTABLE +#   define GC_auobjfreelist GC_arrays._auobjfreelist +# endif +# define GC_sobjfreelist GC_arrays._sobjfreelist +# define GC_valid_offsets GC_arrays._valid_offsets +# define GC_modws_valid_offsets GC_arrays._modws_valid_offsets +# ifdef STUBBORN_ALLOC +#    define GC_changed_pages GC_arrays._changed_pages +#    define GC_prev_changed_pages GC_arrays._prev_changed_pages +# endif +# define GC_obj_map GC_arrays._obj_map +# define GC_last_heap_addr GC_arrays._last_heap_addr +# define GC_prev_heap_addr GC_arrays._prev_heap_addr +# define GC_words_allocd GC_arrays._words_allocd +# define GC_words_wasted GC_arrays._words_wasted +# define GC_large_free_bytes GC_arrays._large_free_bytes +# define GC_words_finalized GC_arrays._words_finalized +# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc +# define GC_mem_freed GC_arrays._mem_freed +# define GC_mark_procs GC_arrays._mark_procs +# define GC_heapsize GC_arrays._heapsize +# define GC_max_heapsize GC_arrays._max_heapsize +# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc +# define GC_heap_sects GC_arrays._heap_sects +# define GC_last_stack GC_arrays._last_stack +# ifdef USE_MUNMAP +#   define GC_unmapped_bytes GC_arrays._unmapped_bytes +# endif +# ifdef MSWIN32 +#   define GC_heap_bases GC_arrays._heap_bases +# endif +# define GC_static_roots GC_arrays._static_roots +# define GC_root_index GC_arrays._root_index +# define GC_excl_table GC_arrays._excl_table +# define GC_all_nils GC_arrays._all_nils +# define GC_top_index GC_arrays._top_index +# if defined(PROC_VDB) || defined(MPROTECT_VDB) +#   define GC_grungy_pages GC_arrays._grungy_pages +# endif +# ifdef MPROTECT_VDB +#   define GC_dirty_pages GC_arrays._dirty_pages +# endif +# ifdef PROC_VDB +#   define GC_written_pages GC_arrays._written_pages +# endif +# ifdef GATHERSTATS +#   define GC_composite_in_use GC_arrays._composite_in_use +#   define GC_atomic_in_use GC_arrays._atomic_in_use +# endif +# ifdef MERGE_SIZES +#   define GC_size_map GC_arrays._size_map +# endif + +# define beginGC_arrays ((ptr_t)(&GC_arrays)) +# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays)) + +/* Object kinds: */ +# define MAXOBJKINDS 16 + +extern struct obj_kind { +   ptr_t *ok_freelist;	/* Array of free listheaders for this kind of object */ +   			/* Point either to GC_arrays or to storage allocated */ +   			/* with GC_scratch_alloc.			     */ +   struct hblk **ok_reclaim_list; +   			/* List headers for lists of blocks waiting to be */ +   			/* swept.					  */ +   word ok_descriptor;  /* Descriptor template for objects in this	*/ +   			/* block.					*/ +   GC_bool ok_relocate_descr; +   			/* Add object size in bytes to descriptor 	*/ +   			/* template to obtain descriptor.  Otherwise	*/ +   			/* template is used as is.			*/ +   GC_bool ok_init;   /* Clear objects before putting them on the free list. */ +} GC_obj_kinds[MAXOBJKINDS]; + +# define endGC_obj_kinds (((ptr_t)(&GC_obj_kinds)) + (sizeof GC_obj_kinds)) + +# define end_gc_area ((ptr_t)endGC_arrays == (ptr_t)(&GC_obj_kinds) ? \ +			endGC_obj_kinds : endGC_arrays) + +/* Predefined kinds: */ +# define PTRFREE 0 +# define NORMAL  1 +# define UNCOLLECTABLE 2 +# ifdef ATOMIC_UNCOLLECTABLE +#   define AUNCOLLECTABLE 3 +#   define STUBBORN 4 +#   define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE) +# else +#   define STUBBORN 3 +#   define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE) +# endif + +extern int GC_n_kinds; + +GC_API word GC_fo_entries; + +extern word GC_n_heap_sects;	/* Number of separately added heap	*/ +				/* sections.				*/ + +extern word GC_page_size; + +# ifdef MSWIN32 +extern word GC_n_heap_bases;	/* See GC_heap_bases.	*/ +# endif + +extern word GC_total_stack_black_listed; +			/* Number of bytes on stack blacklist. 	*/ + +extern word GC_black_list_spacing; +			/* Average number of bytes between blacklisted	*/ +			/* blocks. Approximate.				*/ +			/* Counts only blocks that are 			*/ +			/* "stack-blacklisted", i.e. that are 		*/ +			/* problematic in the interior of an object.	*/ + +extern char * GC_invalid_map; +			/* Pointer to the nowhere valid hblk map */ +			/* Blocks pointing to this map are free. */ + +extern struct hblk * GC_hblkfreelist[]; +				/* List of completely empty heap blocks	*/ +				/* Linked through hb_next field of 	*/ +				/* header structure associated with	*/ +				/* block.				*/ + +extern GC_bool GC_is_initialized;	/* GC_init() has been run.	*/ + +extern GC_bool GC_objects_are_marked;	/* There are marked objects in  */ +					/* the heap.			*/ + +#ifndef SMALL_CONFIG +  extern GC_bool GC_incremental; +			/* Using incremental/generational collection. */ +#else +# define GC_incremental TRUE +			/* Hopefully allow optimizer to remove some code. */ +#endif + +extern GC_bool GC_dirty_maintained; +				/* Dirty bits are being maintained, 	*/ +				/* either for incremental collection,	*/ +				/* or to limit the root set.		*/ + +extern word GC_root_size;	/* Total size of registered root sections */ + +extern GC_bool GC_debugging_started;	/* GC_debug_malloc has been called. */  + +extern ptr_t GC_least_plausible_heap_addr; +extern ptr_t GC_greatest_plausible_heap_addr; +			/* Bounds on the heap.  Guaranteed valid	*/ +			/* Likely to include future heap expansion.	*/ +			 +/* Operations */ +# ifndef abs +#   define abs(x)  ((x) < 0? (-(x)) : (x)) +# endif + + +/*  Marks are in a reserved area in                          */ +/*  each heap block.  Each word has one mark bit associated  */ +/*  with it. Only those corresponding to the beginning of an */ +/*  object are used.                                         */ + + +/* Mark bit operations */ + +/* + * Retrieve, set, clear the mark bit corresponding + * to the nth word in a given heap block. + * + * (Recall that bit n corresponds to object beginning at word n + * relative to the beginning of the block, including unused words) + */ + +# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \ +			    >> (modWORDSZ(n))) & (word)1) +# define set_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \ +				|= (word)1 << modWORDSZ(n) + +# define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \ +				&= ~((word)1 << modWORDSZ(n)) + +/* Important internal collector routines */ + +ptr_t GC_approx_sp(); + +GC_bool GC_should_collect(); +#ifdef PRESERVE_LAST +    GC_bool GC_in_last_heap_sect(/* ptr_t */); +	/* In last added heap section?  If so, avoid breaking up.	*/ +#endif +void GC_apply_to_all_blocks(/*fn, client_data*/); +			/* Invoke fn(hbp, client_data) for each 	*/ +			/* allocated heap block.			*/ +struct hblk * GC_next_used_block(/* struct hblk * h */); +			/* Return first in-use block >= h	*/ +struct hblk * GC_prev_block(/* struct hblk * h */); +			/* Return last block <= h.  Returned block	*/ +			/* is managed by GC, but may or may not be in	*/ +			/* use.						*/ +void GC_mark_init(); +void GC_clear_marks();	/* Clear mark bits for all heap objects. */ +void GC_invalidate_mark_state();	/* Tell the marker that	marked 	   */ +					/* objects may point to	unmarked   */ +					/* ones, and roots may point to	   */ +					/* unmarked objects.		   */ +					/* Reset mark stack.		   */ +void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */ +				/* Return after about one pages worth of   */ +				/* work.				   */ +GC_bool GC_mark_stack_empty(); +GC_bool GC_mark_some(/* cold_gc_frame */); +			/* Perform about one pages worth of marking	*/ +			/* work of whatever kind is needed.  Returns	*/ +			/* quickly if no collection is in progress.	*/ +			/* Return TRUE if mark phase finished.		*/ +void GC_initiate_gc();		/* initiate collection.			*/ +				/* If the mark state is invalid, this	*/ +				/* becomes full colleection.  Otherwise */ +				/* it's partial.			*/ +void GC_push_all(/*b,t*/);	/* Push everything in a range 		*/ +				/* onto mark stack.			*/ +void GC_push_dirty(/*b,t*/);      /* Push all possibly changed	 	*/ +				  /* subintervals of [b,t) onto		*/ +				  /* mark stack.			*/ +#ifndef SMALL_CONFIG +  void GC_push_conditional(/* ptr_t b, ptr_t t, GC_bool all*/); +#else +# define GC_push_conditional(b, t, all) GC_push_all(b, t) +#endif +                                /* Do either of the above, depending	*/ +				/* on the third arg.			*/ +void GC_push_all_stack(/*b,t*/);    /* As above, but consider		*/ +				    /*  interior pointers as valid  	*/ +void GC_push_all_eager(/*b,t*/);    /* Same as GC_push_all_stack, but   */ +				    /* ensures that stack is scanned	*/ +				    /* immediately, not just scheduled  */ +				    /* for scanning.			*/ +#ifndef THREADS +  void GC_push_all_stack_partially_eager(/* bottom, top, cold_gc_frame */); +			/* Similar to GC_push_all_eager, but only the	*/ +			/* part hotter than cold_gc_frame is scanned	*/ +			/* immediately.  Needed to endure that callee-	*/ +			/* save registers are not missed.		*/ +#else +  /* In the threads case, we push part of the current thread stack	*/ +  /* with GC_push_all_eager when we push the registers.  This gets the  */ +  /* callee-save registers that may disappear.  The remainder of the	*/ +  /* stacks are scheduled for scanning in *GC_push_other_roots, which	*/ +  /* is thread-package-specific.					*/ +#endif +void GC_push_current_stack(/* ptr_t cold_gc_frame */); +			/* Push enough of the current stack eagerly to	*/ +			/* ensure that callee-save registers saved in	*/ +			/* GC frames are scanned.			*/ +			/* In the non-threads case, schedule entire	*/ +			/* stack for scanning.				*/ +void GC_push_roots(/* GC_bool all, ptr_t cold_gc_frame */); +			/* Push all or dirty roots.	*/ +extern void (*GC_push_other_roots)(); +			/* Push system or application specific roots	*/ +			/* onto the mark stack.  In some environments	*/ +			/* (e.g. threads environments) this is		*/ +			/* predfined to be non-zero.  A client supplied */ +			/* replacement should also call the original	*/ +			/* function.					*/ +extern void (*GC_start_call_back)(/* void */); +			/* Called at start of full collections.		*/ +			/* Not called if 0.  Called with allocation 	*/ +			/* lock held.					*/ +			/* 0 by default.				*/ +void GC_push_regs();	/* Push register contents onto mark stack.	*/ +void GC_remark();	/* Mark from all marked objects.  Used	*/ +		 	/* only if we had to drop something.	*/ +# if defined(MSWIN32) +  void __cdecl GC_push_one(); +# else +  void GC_push_one(/*p*/);    /* If p points to an object, mark it    */ +                              /* and push contents on the mark stack  */ +# endif +void GC_push_one_checked(/*p*/); /* Ditto, omits plausibility test	*/ +void GC_push_marked(/* struct hblk h, hdr * hhdr */); +		/* Push contents of all marked objects in h onto	*/ +		/* mark stack.						*/ +#ifdef SMALL_CONFIG +# define GC_push_next_marked_dirty(h) GC_push_next_marked(h) +#else +  struct hblk * GC_push_next_marked_dirty(/* h */); +		/* Invoke GC_push_marked on next dirty block above h.	*/ +		/* Return a pointer just past the end of this block.	*/ +#endif /* !SMALL_CONFIG */ +struct hblk * GC_push_next_marked(/* h */); +		/* Ditto, but also mark from clean pages.	*/ +struct hblk * GC_push_next_marked_uncollectable(/* h */); +		/* Ditto, but mark only from uncollectable pages.	*/ +GC_bool GC_stopped_mark(); /* Stop world and mark from all roots	*/ +			/* and rescuers.			*/ +void GC_clear_hdr_marks(/* hhdr */);  /* Clear the mark bits in a header */ +void GC_set_hdr_marks(/* hhdr */);  /* Set the mark bits in a header */ +void GC_add_roots_inner(); +GC_bool GC_is_static_root(/* ptr_t p */); +		/* Is the address p in one of the registered static	*/ +		/* root sections?					*/ +void GC_register_dynamic_libraries(); +		/* Add dynamic library data sections to the root set. */ + +/* Machine dependent startup routines */ +ptr_t GC_get_stack_base(); +void GC_register_data_segments(); + +/* Black listing: */ +void GC_bl_init(); 	 +# ifndef ALL_INTERIOR_POINTERS +    void GC_add_to_black_list_normal(/* bits, maybe source */); +			/* Register bits as a possible future false	*/ +			/* reference from the heap or static data	*/ +#   ifdef PRINT_BLACK_LIST +#     define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \ +			GC_add_to_black_list_normal(bits, source) +#   else +#     define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \ +			GC_add_to_black_list_normal(bits) +#   endif +# else +#   ifdef PRINT_BLACK_LIST +#     define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \ +			GC_add_to_black_list_stack(bits, source) +#   else +#     define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \ +			GC_add_to_black_list_stack(bits) +#   endif +# endif + +void GC_add_to_black_list_stack(/* bits, maybe source */); +struct hblk * GC_is_black_listed(/* h, len */); +			/* If there are likely to be false references	*/ +			/* to a block starting at h of the indicated    */ +			/* length, then return the next plausible	*/ +			/* starting location for h that might avoid	*/ +			/* these false references.			*/ +void GC_promote_black_lists(); +			/* Declare an end to a black listing phase.	*/ +void GC_unpromote_black_lists(); +			/* Approximately undo the effect of the above.	*/ +			/* This actually loses some information, but	*/ +			/* only in a reasonably safe way.		*/ +word GC_number_stack_black_listed(/*struct hblk *start, struct hblk *endp1 */); +			/* Return the number of (stack) blacklisted	*/ +			/* blocks in the range for statistical		*/ +			/* purposes.					*/ +		 	 +ptr_t GC_scratch_alloc(/*bytes*/); +				/* GC internal memory allocation for	*/ +				/* small objects.  Deallocation is not  */ +				/* possible.				*/ +	 +/* Heap block layout maps: */			 +void GC_invalidate_map(/* hdr */); +				/* Remove the object map associated	*/ +				/* with the block.  This identifies	*/ +				/* the block as invalid to the mark	*/ +				/* routines.				*/ +GC_bool GC_add_map_entry(/*sz*/); +				/* Add a heap block map for objects of	*/ +				/* size sz to obj_map.			*/ +				/* Return FALSE on failure.		*/ +void GC_register_displacement_inner(/*offset*/); +				/* Version of GC_register_displacement	*/ +				/* that assumes lock is already held	*/ +				/* and signals are already disabled.	*/ + +/*  hblk allocation: */		 +void GC_new_hblk(/*size_in_words, kind*/); +				/* Allocate a new heap block, and build */ +				/* a free list in it.			*/				 +struct hblk * GC_allochblk(/*size_in_words, kind*/); +				/* Allocate a heap block, clear it if	*/ +				/* for composite objects, inform	*/ +				/* the marker that block is valid	*/ +				/* for objects of indicated size.	*/ +				/* sz < 0 ==> atomic.			*/  +void GC_freehblk();		/* Deallocate a heap block and mark it  */ +				/* as invalid.				*/ +				 +/*  Misc GC: */ +void GC_init_inner(); +GC_bool GC_expand_hp_inner(); +void GC_start_reclaim(/*abort_if_found*/); +				/* Restore unmarked objects to free	*/ +				/* lists, or (if abort_if_found is	*/ +				/* TRUE) report them.			*/ +				/* Sweeping of small object pages is	*/ +				/* largely deferred.			*/ +void GC_continue_reclaim(/*size, kind*/); +				/* Sweep pages of the given size and	*/ +				/* kind, as long as possible, and	*/ +				/* as long as the corr. free list is    */ +				/* empty.				*/ +void GC_reclaim_or_delete_all(); +				/* Arrange for all reclaim lists to be	*/ +				/* empty.  Judiciously choose between	*/ +				/* sweeping and discarding each page.	*/ +GC_bool GC_reclaim_all(/* GC_stop_func f*/); +				/* Reclaim all blocks.  Abort (in a	*/ +				/* consistent state) if f returns TRUE. */ +GC_bool GC_block_empty(/* hhdr */); /* Block completely unmarked? 	*/ +GC_bool GC_never_stop_func();	/* Returns FALSE.		*/ +GC_bool GC_try_to_collect_inner(/* GC_stop_func f */); +				/* Collect; caller must have acquired	*/ +				/* lock and disabled signals.		*/ +				/* Collection is aborted if f returns	*/ +				/* TRUE.  Returns TRUE if it completes	*/ +				/* successfully.			*/ +# define GC_gcollect_inner() \ +	(void) GC_try_to_collect_inner(GC_never_stop_func) +void GC_finish_collection();	/* Finish collection.  Mark bits are	*/ +				/* consistent and lock is still held.	*/ +GC_bool GC_collect_or_expand(/* needed_blocks */); +				/* Collect or expand heap in an attempt */ +				/* make the indicated number of free	*/ +				/* blocks available.  Should be called	*/ +				/* until the blocks are available or	*/ +				/* until it fails by returning FALSE.	*/ +GC_API void GC_init();		/* Initialize collector.		*/ +void GC_collect_a_little_inner(/* int n */); +				/* Do n units worth of garbage 		*/ +				/* collection work, if appropriate.	*/ +				/* A unit is an amount appropriate for  */ +				/* HBLKSIZE bytes of allocation.	*/ +ptr_t GC_generic_malloc(/* bytes, kind */); +				/* Allocate an object of the given	*/ +				/* kind.  By default, there are only	*/ +				/* a few kinds: composite(pointerfree), */ +				/* atomic, uncollectable, etc.		*/ +				/* We claim it's possible for clever	*/ +				/* client code that understands GC	*/ +				/* internals to add more, e.g. to	*/ +				/* communicate object layout info	*/ +				/* to the collector.			*/ +ptr_t GC_generic_malloc_ignore_off_page(/* bytes, kind */); +				/* As above, but pointers past the 	*/ +				/* first page of the resulting object	*/ +				/* are ignored.				*/ +ptr_t GC_generic_malloc_inner(/* bytes, kind */); +				/* Ditto, but I already hold lock, etc.	*/ +ptr_t GC_generic_malloc_words_small GC_PROTO((size_t words, int kind)); +				/* As above, but size in units of words */ +				/* Bypasses MERGE_SIZES.  Assumes	*/ +				/* words <= MAXOBJSZ.			*/ +ptr_t GC_generic_malloc_inner_ignore_off_page(/* bytes, kind */); +				/* Allocate an object, where		*/ +				/* the client guarantees that there	*/ +				/* will always be a pointer to the 	*/ +				/* beginning of the object while the	*/ +				/* object is live.			*/ +ptr_t GC_allocobj(/* sz_inn_words, kind */); +				/* Make the indicated 			*/ +				/* free list nonempty, and return its	*/ +				/* head.				*/ + +void GC_init_headers(); +GC_bool GC_install_header(/*h*/); +				/* Install a header for block h.	*/ +				/* Return FALSE on failure.		*/ +GC_bool GC_install_counts(/*h, sz*/); +				/* Set up forwarding counts for block	*/ +				/* h of size sz.			*/ +				/* Return FALSE on failure.		*/ +void GC_remove_header(/*h*/); +				/* Remove the header for block h.	*/ +void GC_remove_counts(/*h, sz*/); +				/* Remove forwarding counts for h.	*/ +hdr * GC_find_header(/*p*/);	/* Debugging only.			*/ + +void GC_finalize();	/* Perform all indicated finalization actions	*/ +			/* on unmarked objects.				*/ +			/* Unreachable finalizable objects are enqueued	*/ +			/* for processing by GC_invoke_finalizers.	*/ +			/* Invoked with lock.				*/ +			 +void GC_add_to_heap(/*p, bytes*/); +			/* Add a HBLKSIZE aligned chunk to the heap.	*/ + +void GC_print_obj(/* ptr_t p */); +			/* P points to somewhere inside an object with	*/ +			/* debugging info.  Print a human readable	*/ +			/* description of the object to stderr.		*/ +extern void (*GC_check_heap)(); +			/* Check that all objects in the heap with 	*/ +			/* debugging info are intact.  Print 		*/ +			/* descriptions of any that are not.		*/ +extern void (*GC_print_heap_obj)(/* ptr_t p */); +			/* If possible print s followed by a more	*/ +			/* detailed description of the object 		*/ +			/* referred to by p.				*/ +			 +/* Memory unmapping: */ +#ifdef USE_MUNMAP +  void GC_unmap_old(void); +  void GC_merge_unmapped(void); +  void GC_unmap(ptr_t start, word bytes); +  void GC_remap(ptr_t start, word bytes); +  void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2); +#endif + +/* Virtual dirty bit implementation:		*/ +/* Each implementation exports the following:	*/ +void GC_read_dirty();	/* Retrieve dirty bits.	*/ +GC_bool GC_page_was_dirty(/* struct hblk * h  */); +			/* Read retrieved dirty bits.	*/ +GC_bool GC_page_was_ever_dirty(/* struct hblk * h  */); +			/* Could the page contain valid heap pointers?	*/ +void GC_is_fresh(/* struct hblk * h, word number_of_blocks  */); +			/* Assert the region currently contains no	*/ +			/* valid pointers.				*/ +void GC_write_hint(/* struct hblk * h  */); +			/* h is about to be written.	*/ +void GC_dirty_init(); + +/* Slow/general mark bit manipulation: */ +GC_API GC_bool GC_is_marked(); +void GC_clear_mark_bit(); +void GC_set_mark_bit(); + +/* Stubborn objects: */ +void GC_read_changed();	/* Analogous to GC_read_dirty */ +GC_bool GC_page_was_changed(/* h */);	/* Analogous to GC_page_was_dirty */ +void GC_clean_changing_list();	/* Collect obsolete changing list entries */ +void GC_stubborn_init(); + +/* Debugging print routines: */ +void GC_print_block_list(); +void GC_print_hblkfreelist(); +void GC_print_heap_sects(); +void GC_print_static_roots(); +void GC_dump(); + +#ifdef KEEP_BACK_PTRS +   void GC_store_back_pointer(ptr_t source, ptr_t dest); +   void GC_marked_for_finalization(ptr_t dest); +#  define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest) +#  define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest) +#else +#  define GC_STORE_BACK_PTR(source, dest)  +#  define GC_MARKED_FOR_FINALIZATION(dest) +#endif + +/* Make arguments appear live to compiler */ +# ifdef __WATCOMC__ +  void GC_noop(void*, ...); +# else +  GC_API void GC_noop(); +# endif + +void GC_noop1(/* word arg */); + +/* Logging and diagnostic output: 	*/ +GC_API void GC_printf GC_PROTO((char * format, long, long, long, long, long, long)); +			/* A version of printf that doesn't allocate,	*/ +			/* is restricted to long arguments, and		*/ +			/* (unfortunately) doesn't use varargs for	*/ +			/* portability.  Restricted to 6 args and	*/ +			/* 1K total output length.			*/ +			/* (We use sprintf.  Hopefully that doesn't	*/ +			/* allocate for long arguments.)  		*/ +# define GC_printf0(f) GC_printf(f, 0l, 0l, 0l, 0l, 0l, 0l) +# define GC_printf1(f,a) GC_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l) +# define GC_printf2(f,a,b) GC_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l) +# define GC_printf3(f,a,b,c) GC_printf(f, (long)a, (long)b, (long)c, 0l, 0l, 0l) +# define GC_printf4(f,a,b,c,d) GC_printf(f, (long)a, (long)b, (long)c, \ +					    (long)d, 0l, 0l) +# define GC_printf5(f,a,b,c,d,e) GC_printf(f, (long)a, (long)b, (long)c, \ +					      (long)d, (long)e, 0l) +# define GC_printf6(f,a,b,c,d,e,g) GC_printf(f, (long)a, (long)b, (long)c, \ +						(long)d, (long)e, (long)g) + +void GC_err_printf(/* format, a, b, c, d, e, f */); +# define GC_err_printf0(f) GC_err_puts(f) +# define GC_err_printf1(f,a) GC_err_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l) +# define GC_err_printf2(f,a,b) GC_err_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l) +# define GC_err_printf3(f,a,b,c) GC_err_printf(f, (long)a, (long)b, (long)c, \ +						  0l, 0l, 0l) +# define GC_err_printf4(f,a,b,c,d) GC_err_printf(f, (long)a, (long)b, \ +						    (long)c, (long)d, 0l, 0l) +# define GC_err_printf5(f,a,b,c,d,e) GC_err_printf(f, (long)a, (long)b, \ +						      (long)c, (long)d, \ +						      (long)e, 0l) +# define GC_err_printf6(f,a,b,c,d,e,g) GC_err_printf(f, (long)a, (long)b, \ +							(long)c, (long)d, \ +							(long)e, (long)g) +			/* Ditto, writes to stderr.			*/ +			 +void GC_err_puts(/* char *s */); +			/* Write s to stderr, don't buffer, don't add	*/ +			/* newlines, don't ...				*/ + + +#   ifdef GC_ASSERTIONS +#	define GC_ASSERT(expr) if(!(expr)) {\ +		GC_err_printf2("Assertion failure: %s:%ld\n", \ +				__FILE__, (unsigned long)__LINE__); \ +		ABORT("assertion failure"); } +#   else  +#	define GC_ASSERT(expr) +#   endif + +# endif /* GC_PRIVATE_H */ diff --git a/gc/include/private/gcconfig.h b/gc/include/private/gcconfig.h new file mode 100644 index 0000000..c9017d3 --- /dev/null +++ b/gc/include/private/gcconfig.h @@ -0,0 +1,1099 @@ +/*  + * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers + * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved. + * Copyright (c) 1996 by Silicon Graphics.  All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED + * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program + * for any purpose,  provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + */ +  +#ifndef CONFIG_H + +# define CONFIG_H + +/* Machine dependent parameters.  Some tuning parameters can be found	*/ +/* near the top of gc_private.h.					*/ + +/* Machine specific parts contributed by various people.  See README file. */ + +/* First a unified test for Linux: */ +# if defined(linux) || defined(__linux__) +#    define LINUX +# endif + +/* Determine the machine type: */ +# if defined(sun) && defined(mc68000) +#    define M68K +#    define SUNOS4 +#    define mach_type_known +# endif +# if defined(hp9000s300) +#    define M68K +#    define HP +#    define mach_type_known +# endif +# if defined(__OpenBSD__) && defined(m68k) +#    define M68K +#    define OPENBSD +#    define mach_type_known +# endif +# if defined(__OpenBSD__) && defined(__sparc__) +#    define SPARC +#    define OPENBSD +#    define mach_type_known +# endif +# if defined(__NetBSD__) && defined(m68k) +#    define M68K +#    define NETBSD +#    define mach_type_known +# endif +# if defined(vax) +#    define VAX +#    ifdef ultrix +#	define ULTRIX +#    else +#	define BSD +#    endif +#    define mach_type_known +# endif +# if defined(mips) || defined(__mips) +#    define MIPS +#    if defined(ultrix) || defined(__ultrix) || defined(__NetBSD__) +#	define ULTRIX +#    else +#	if defined(_SYSTYPE_SVR4) || defined(SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__) +#	  define IRIX5   /* or IRIX 6.X */ +#	else +#	  define RISCOS  /* or IRIX 4.X */ +#	endif +#    endif +#    define mach_type_known +# endif +# if defined(sequent) && defined(i386) +#    define I386 +#    define SEQUENT +#    define mach_type_known +# endif +# if defined(sun) && defined(i386) +#    define I386 +#    define SUNOS5 +#    define mach_type_known +# endif +# if (defined(__OS2__) || defined(__EMX__)) && defined(__32BIT__) +#    define I386 +#    define OS2 +#    define mach_type_known +# endif +# if defined(ibm032) +#   define RT +#   define mach_type_known +# endif +# if defined(sun) && (defined(sparc) || defined(__sparc)) +#   define SPARC +    /* Test for SunOS 5.x */ +#     include <errno.h> +#     ifdef ECHRNG +#       define SUNOS5 +#     else +#	define SUNOS4 +#     endif +#   define mach_type_known +# endif +# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \ +     && !defined(__OpenBSD__) +#   define SPARC +#   define DRSNX +#   define mach_type_known +# endif +# if defined(_IBMR2) +#   define RS6000 +#   define mach_type_known +# endif +# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386) +	/* The above test may need refinement	*/ +#   define I386 +#   if defined(_SCO_ELF) +#     define SCO_ELF +#   else +#     define SCO +#   endif +#   define mach_type_known +# endif +# if defined(_AUX_SOURCE) +#   define M68K +#   define SYSV +#   define mach_type_known +# endif +# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1) \ +     || defined(hppa) || defined(__hppa__) +#   define HP_PA +#   define mach_type_known +# endif +# if defined(LINUX) && (defined(i386) || defined(__i386__)) +#    define I386 +#    define mach_type_known +# endif +# if defined(LINUX) && defined(powerpc) +#    define POWERPC +#    define mach_type_known +# endif +# if defined(LINUX) && defined(__mc68000__) +#    define M68K +#    define mach_type_known +# endif +# if defined(LINUX) && defined(sparc) +#    define SPARC +#    define mach_type_known +# endif +# if defined(__alpha) || defined(__alpha__) +#   define ALPHA +#   if !defined(LINUX) +#     define OSF1	/* a.k.a Digital Unix */ +#   endif +#   define mach_type_known +# endif +# if defined(_AMIGA) && !defined(AMIGA) +#   define AMIGA +# endif +# ifdef AMIGA  +#   define M68K +#   define mach_type_known +# endif +# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc) +#   define M68K +#   define MACOS +#   define mach_type_known +# endif +# if defined(__MWERKS__) && defined(__powerc) +#   define POWERPC +#   define MACOS +#   define mach_type_known +# endif +# if defined(macosx) +#    define MACOSX +#    define POWERPC +#    define mach_type_known +# endif +# if defined(NeXT) && defined(mc68000) +#   define M68K +#   define NEXT +#   define mach_type_known +# endif +# if defined(NeXT) && defined(i386) +#   define I386 +#   define NEXT +#   define mach_type_known +# endif +# if defined(__OpenBSD__) && defined(i386) +#   define I386 +#   define OPENBSD +#   define mach_type_known +# endif +# if defined(__FreeBSD__) && defined(i386) +#   define I386 +#   define FREEBSD +#   define mach_type_known +# endif +# if defined(__NetBSD__) && defined(i386) +#   define I386 +#   define NETBSD +#   define mach_type_known +# endif +# if defined(bsdi) && defined(i386) +#    define I386 +#    define BSDI +#    define mach_type_known +# endif +# if !defined(mach_type_known) && defined(__386BSD__) +#   define I386 +#   define THREE86BSD +#   define mach_type_known +# endif +# if defined(_CX_UX) && defined(_M88K) +#   define M88K +#   define CX_UX +#   define mach_type_known +# endif +# if defined(DGUX) +#   define M88K +    /* DGUX defined */ +#   define mach_type_known +# endif +# if (defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \ +     || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__) +#   define I386 +#   define MSWIN32	/* or Win32s */ +#   define mach_type_known +# endif +# if defined(__DJGPP__) +#   define I386 +#   ifndef DJGPP +#     define DJGPP  /* MSDOS running the DJGPP port of GCC */ +#   endif +#   define mach_type_known +# endif +# if defined(__CYGWIN32__) || defined(__CYGWIN__) +#   define I386 +#   define CYGWIN32 +#   define mach_type_known +# endif +# if defined(__BORLANDC__) +#   define I386 +#   define MSWIN32 +#   define mach_type_known +# endif +# if defined(_UTS) && !defined(mach_type_known) +#   define S370 +#   define UTS4 +#   define mach_type_known +# endif +/* Ivan Demakov */ +# if defined(__WATCOMC__) && defined(__386__) +#   define I386 +#   if !defined(OS2) && !defined(MSWIN32) && !defined(DOS4GW) +#     if defined(__OS2__) +#       define OS2 +#     else +#       if defined(__WINDOWS_386__) || defined(__NT__) +#         define MSWIN32 +#       else +#         define DOS4GW +#       endif +#     endif +#   endif +#   define mach_type_known +# endif + +/* Feel free to add more clauses here */ + +/* Or manually define the machine type here.  A machine type is 	*/ +/* characterized by the architecture.  Some				*/ +/* machine types are further subdivided by OS.				*/ +/* the macros ULTRIX, RISCOS, and BSD to distinguish.			*/ +/* Note that SGI IRIX is treated identically to RISCOS.			*/ +/* SYSV on an M68K actually means A/UX.					*/ +/* The distinction in these cases is usually the stack starting address */ +# ifndef mach_type_known +	--> unknown machine type +# endif +		    /* Mapping is: M68K       ==> Motorola 680X0	*/ +		    /*		   (SUNOS4,HP,NEXT, and SYSV (A/UX),	*/ +		    /*		   MACOS and AMIGA variants)		*/ +		    /*             I386       ==> Intel 386	 	*/ +		    /*		    (SEQUENT, OS2, SCO, LINUX, NETBSD,	*/ +		    /*		     FREEBSD, THREE86BSD, MSWIN32,	*/ +		    /* 		     BSDI,SUNOS5, NEXT, other variants)	*/ +                    /*             NS32K      ==> Encore Multimax 	*/ +                    /*             MIPS       ==> R2000 or R3000	*/ +                    /*			(RISCOS, ULTRIX variants)	*/ +                    /*		   VAX	      ==> DEC VAX		*/ +                    /*			(BSD, ULTRIX variants)		*/ +                    /*		   RS6000     ==> IBM RS/6000 AIX3.X	*/ +                    /*		   RT	      ==> IBM PC/RT		*/ +                    /*		   HP_PA      ==> HP9000/700 & /800	*/ +                    /*				  HP/UX			*/ +		    /*		   SPARC      ==> SPARC under SunOS	*/ +		    /*			(SUNOS4, SUNOS5,		*/ +		    /*			 DRSNX variants)		*/ +		    /* 		   ALPHA      ==> DEC Alpha 		*/ +		    /*			(OSF1 and LINUX variants)	*/ +		    /* 		   M88K       ==> Motorola 88XX0        */ +		    /* 		        (CX_UX and DGUX)		*/ +		    /* 		   S370	      ==> 370-like machine	*/ +		    /* 			running Amdahl UTS4		*/ + + +/* + * For each architecture and OS, the following need to be defined: + * + * CPP_WORD_SZ is a simple integer constant representing the word size. + * in bits.  We assume byte addressibility, where a byte has 8 bits. + * We also assume CPP_WORD_SZ is either 32 or 64. + * (We care about the length of pointers, not hardware + * bus widths.  Thus a 64 bit processor with a C compiler that uses + * 32 bit pointers should use CPP_WORD_SZ of 32, not 64. Default is 32.) + * + * MACH_TYPE is a string representation of the machine type. + * OS_TYPE is analogous for the OS. + * + * ALIGNMENT is the largest N, such that + * all pointer are guaranteed to be aligned on N byte boundaries. + * defining it to be 1 will always work, but perform poorly. + * + * DATASTART is the beginning of the data segment. + * On UNIX systems, the collector will scan the area between DATASTART + * and DATAEND for root pointers. + * + * DATAEND, if not &end. + * + * ALIGN_DOUBLE of GC_malloc should return blocks aligned to twice + * the pointer size. + * + * STACKBOTTOM is the cool end of the stack, which is usually the + * highest address in the stack. + * Under PCR or OS/2, we have other ways of finding thread stacks. + * For each machine, the following should: + * 1) define STACK_GROWS_UP if the stack grows toward higher addresses, and + * 2) define exactly one of + *	STACKBOTTOM (should be defined to be an expression) + *	HEURISTIC1 + *	HEURISTIC2 + * If either of the last two macros are defined, then STACKBOTTOM is computed + * during collector startup using one of the following two heuristics: + * HEURISTIC1:  Take an address inside GC_init's frame, and round it up to + *		the next multiple of STACK_GRAN. + * HEURISTIC2:  Take an address inside GC_init's frame, increment it repeatedly + *		in small steps (decrement if STACK_GROWS_UP), and read the value + *		at each location.  Remember the value when the first + *		Segmentation violation or Bus error is signalled.  Round that + *		to the nearest plausible page boundary, and use that instead + *		of STACKBOTTOM. + * + * If no expression for STACKBOTTOM can be found, and neither of the above + * heuristics are usable, the collector can still be used with all of the above + * undefined, provided one of the following is done: + * 1) GC_mark_roots can be changed to somehow mark from the correct stack(s) + *    without reference to STACKBOTTOM.  This is appropriate for use in + *    conjunction with thread packages, since there will be multiple stacks. + *    (Allocating thread stacks in the heap, and treating them as ordinary + *    heap data objects is also possible as a last resort.  However, this is + *    likely to introduce significant amounts of excess storage retention + *    unless the dead parts of the thread stacks are periodically cleared.) + * 2) Client code may set GC_stackbottom before calling any GC_ routines. + *    If the author of the client code controls the main program, this is + *    easily accomplished by introducing a new main program, setting + *    GC_stackbottom to the address of a local variable, and then calling + *    the original main program.  The new main program would read something + *    like: + * + *		# include "gc_private.h" + * + *		main(argc, argv, envp) + *		int argc; + *		char **argv, **envp; + *		{ + *		    int dummy; + * + *		    GC_stackbottom = (ptr_t)(&dummy); + *		    return(real_main(argc, argv, envp)); + *		} + * + * + * Each architecture may also define the style of virtual dirty bit + * implementation to be used: + *   MPROTECT_VDB: Write protect the heap and catch faults. + *   PROC_VDB: Use the SVR4 /proc primitives to read dirty bits. + * + * An architecture may define DYNAMIC_LOADING if dynamic_load.c + * defined GC_register_dynamic_libraries() for the architecture. + */ + + +# define STACK_GRAN 0x1000000 +# ifdef M68K +#   define MACH_TYPE "M68K" +#   define ALIGNMENT 2 +#   ifdef OPENBSD +#	define OS_TYPE "OPENBSD" +#	define HEURISTIC2 +	extern char etext; +#	define DATASTART ((ptr_t)(&etext)) +#   endif +#   ifdef NETBSD +#	define OS_TYPE "NETBSD" +#	define HEURISTIC2 +	extern char etext; +#	define DATASTART ((ptr_t)(&etext)) +#   endif +#   ifdef LINUX +#       define OS_TYPE "LINUX" +#       define STACKBOTTOM ((ptr_t)0xf0000000) +#       define MPROTECT_VDB +#       ifdef __ELF__ +#            define DYNAMIC_LOADING +             extern char **__environ; +#            define DATASTART ((ptr_t)(&__environ)) +                             /* hideous kludge: __environ is the first */ +                             /* word in crt0.o, and delimits the start */ +                             /* of the data segment, no matter which   */ +                             /* ld options were passed through.        */ +                             /* We could use _etext instead, but that  */ +                             /* would include .rodata, which may       */ +                             /* contain large read-only data tables    */ +                             /* that we'd rather not scan.             */ +             extern int _end; +#            define DATAEND (&_end) +#       else +             extern int etext; +#            define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff)) +#       endif +#   endif +#   ifdef SUNOS4 +#	define OS_TYPE "SUNOS4" +	extern char etext; +#	define DATASTART ((ptr_t)((((word) (&etext)) + 0x1ffff) & ~0x1ffff)) +#	define HEURISTIC1	/* differs	*/ +#	define DYNAMIC_LOADING +#   endif +#   ifdef HP +#	define OS_TYPE "HP" +	extern char etext; +#       define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff)) +#       define STACKBOTTOM ((ptr_t) 0xffeffffc) +			      /* empirically determined.  seems to work. */ +#  	include <unistd.h> +#	define GETPAGESIZE() sysconf(_SC_PAGE_SIZE) +#   endif +#   ifdef SYSV +#	define OS_TYPE "SYSV" +	extern etext; +#   	define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \ +				   & ~0x3fffff) \ +				  +((word)&etext & 0x1fff)) +	/* This only works for shared-text binaries with magic number 0413. +	   The other sorts of SysV binaries put the data at the end of the text, +	   in which case the default of &etext would work.  Unfortunately, +	   handling both would require having the magic-number available. +	   	   		-- Parag +	   */ +#	define STACKBOTTOM ((ptr_t)0xFFFFFFFE) +			/* The stack starts at the top of memory, but   */ +			/* 0x0 cannot be used as setjump_test complains */ +			/* that the stack direction is incorrect.  Two  */ +			/* bytes down from 0x0 should be safe enough.   */ +			/* 		--Parag				*/ +#   	include <sys/mmu.h> +#	define GETPAGESIZE() PAGESIZE	/* Is this still right? */ +#   endif +#   ifdef AMIGA +#	define OS_TYPE "AMIGA" + 	    	/* STACKBOTTOM and DATASTART handled specially	*/ + 	    	/* in os_dep.c					*/ +# 	define DATAEND	/* not needed */ +#	define GETPAGESIZE() 4096 +#   endif +#   ifdef MACOS +#     ifndef __LOWMEM__ +#     include <LowMem.h> +#     endif +#     define OS_TYPE "MACOS" +			/* see os_dep.c for details of global data segments. */ +#     define STACKBOTTOM ((ptr_t) LMGetCurStackBase()) +#     define DATAEND	/* not needed */ +#     define GETPAGESIZE() 4096 +#   endif +#   ifdef NEXT +#	define OS_TYPE "NEXT" +#	define DATASTART ((ptr_t) get_etext()) +#	define STACKBOTTOM ((ptr_t) 0x4000000) +#	define DATAEND	/* not needed */ +#   endif +# endif + +# ifdef POWERPC +#   define MACH_TYPE "POWERPC" +#   ifdef MACOS +#     define ALIGNMENT 2  /* Still necessary?  Could it be 4?	*/ +#     ifndef __LOWMEM__ +#     include <LowMem.h> +#     endif +#     define OS_TYPE "MACOS" +			/* see os_dep.c for details of global data segments. */ +#     define STACKBOTTOM ((ptr_t) LMGetCurStackBase()) +#     define DATAEND  /* not needed */ +#   endif +#   ifdef LINUX +#     define ALIGNMENT 4	/* Guess.  Can someone verify?	*/ +				/* This was 2, but that didn't sound right. */ +#     define OS_TYPE "LINUX" +#     define HEURISTIC1 +#     undef STACK_GRAN +#     define STACK_GRAN 0x10000000 +	/* Stack usually starts at 0x80000000 */ +#     define DATASTART GC_data_start +      extern int _end; +#     define DATAEND (&_end) +#   endif +#   ifdef MACOSX +#     define ALIGNMENT 4 +#     define OS_TYPE "MACOSX" +#     define DATASTART ((ptr_t) get_etext()) +#     define STACKBOTTOM ((ptr_t) 0xc0000000) +#     define DATAEND	/* not needed */ +#   endif +# endif + +# ifdef VAX +#   define MACH_TYPE "VAX" +#   define ALIGNMENT 4	/* Pointers are longword aligned by 4.2 C compiler */ +    extern char etext; +#   define DATASTART ((ptr_t)(&etext)) +#   ifdef BSD +#	define OS_TYPE "BSD" +#	define HEURISTIC1 +			/* HEURISTIC2 may be OK, but it's hard to test. */ +#   endif +#   ifdef ULTRIX +#	define OS_TYPE "ULTRIX" +#	define STACKBOTTOM ((ptr_t) 0x7fffc800) +#   endif +# endif + +# ifdef RT +#   define MACH_TYPE "RT" +#   define ALIGNMENT 4 +#   define DATASTART ((ptr_t) 0x10000000) +#   define STACKBOTTOM ((ptr_t) 0x1fffd800) +# endif + +# ifdef SPARC +#   define MACH_TYPE "SPARC" +#   define ALIGNMENT 4	/* Required by hardware	*/ +#   define ALIGN_DOUBLE +    extern int etext; +#   ifdef SUNOS5 +#	define OS_TYPE "SUNOS5" +	extern int _etext; +	extern int _end; +	extern char * GC_SysVGetDataStart(); +#       define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &_etext) +#	define DATAEND (&_end) +#	ifndef USE_MMAP +#	    define USE_MMAP +#	endif +#       ifdef USE_MMAP +#         define HEAP_START (ptr_t)0x40000000 +#       else +#	  define HEAP_START DATAEND +#       endif +#	define PROC_VDB +/*	HEURISTIC1 reportedly no longer works under 2.7.  Thus we	*/ +/* 	switched to HEURISTIC2, eventhough it creates some debugging	*/ +/*	issues.								*/ +#	define HEURISTIC2 +#	include <unistd.h> +#       define GETPAGESIZE()  sysconf(_SC_PAGESIZE) +		/* getpagesize() appeared to be missing from at least one */ +		/* Solaris 5.4 installation.  Weird.			  */ +#	define DYNAMIC_LOADING +#   endif +#   ifdef SUNOS4 +#	define OS_TYPE "SUNOS4" +	/* [If you have a weak stomach, don't read this.]		*/ +	/* We would like to use:					*/ +/* #       define DATASTART ((ptr_t)((((word) (&etext)) + 0x1fff) & ~0x1fff)) */ +	/* This fails occasionally, due to an ancient, but very 	*/ +	/* persistent ld bug.  &etext is set 32 bytes too high.		*/ +	/* We instead read the text segment size from the a.out		*/ +	/* header, which happens to be mapped into our address space	*/ +	/* at the start of the text segment.  The detective work here	*/ +	/* was done by Robert Ehrlich, Manuel Serrano, and Bernard	*/ +	/* Serpette of INRIA.						*/ +	/* This assumes ZMAGIC, i.e. demand-loadable executables.	*/ +#	define TEXTSTART 0x2000 +#       define DATASTART ((ptr_t)(*(int *)(TEXTSTART+0x4)+TEXTSTART)) +#	define MPROTECT_VDB +#	define HEURISTIC1 +# 	define DYNAMIC_LOADING +#   endif +#   ifdef DRSNX +#       define CPP_WORDSZ 32 +#	define OS_TYPE "DRSNX" +	extern char * GC_SysVGetDataStart(); +	extern int etext; +#       define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &etext) +#	define MPROTECT_VDB +#       define STACKBOTTOM ((ptr_t) 0xdfff0000) +#	define DYNAMIC_LOADING +#   endif +#   ifdef LINUX +#     define OS_TYPE "LINUX" +#     ifdef __ELF__ +#         define DATASTART GC_data_start +#         define DYNAMIC_LOADING +#     else +          Linux Sparc non elf ? +#     endif +      extern int _end; +#     define DATAEND (&_end) +#     define SVR4 +#     define STACKBOTTOM ((ptr_t) 0xf0000000) +#   endif +#   ifdef OPENBSD +#     define OS_TYPE "OPENBSD" +#     define STACKBOTTOM ((ptr_t) 0xf8000000) +#     define DATASTART ((ptr_t)(&etext)) +#   endif +# endif + +# ifdef I386 +#   define MACH_TYPE "I386" +#   define ALIGNMENT 4	/* Appears to hold for all "32 bit" compilers	*/ +			/* except Borland.  The -a4 option fixes 	*/ +			/* Borland.					*/ +                        /* Ivan Demakov: For Watcom the option is -zp4. */ +#   ifndef SMALL_CONFIG +#     define ALIGN_DOUBLE /* Not strictly necessary, but may give speed   */ +			  /* improvement on Pentiums.			  */ +#   endif +#   ifdef SEQUENT +#	define OS_TYPE "SEQUENT" +	extern int etext; +#       define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff)) +#       define STACKBOTTOM ((ptr_t) 0x3ffff000)  +#   endif +#   ifdef SUNOS5 +#	define OS_TYPE "SUNOS5" +  	extern int etext, _start; +  	extern char * GC_SysVGetDataStart(); +#       define DATASTART GC_SysVGetDataStart(0x1000, &etext) +#	define STACKBOTTOM ((ptr_t)(&_start)) +/** At least in Solaris 2.5, PROC_VDB gives wrong values for dirty bits. */ +/*#	define PROC_VDB*/ +#	define DYNAMIC_LOADING +#	ifndef USE_MMAP +#	    define USE_MMAP +#	endif +#       ifdef USE_MMAP +#         define HEAP_START (ptr_t)0x40000000 +#       else +#	  define HEAP_START DATAEND +#       endif +#   endif +#   ifdef SCO +#	define OS_TYPE "SCO" +	extern int etext; +#   	define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \ +				  & ~0x3fffff) \ +				 +((word)&etext & 0xfff)) +#	define STACKBOTTOM ((ptr_t) 0x7ffffffc) +#   endif +#   ifdef SCO_ELF +#       define OS_TYPE "SCO_ELF" +        extern int etext; +#       define DATASTART ((ptr_t)(&etext)) +#       define STACKBOTTOM ((ptr_t) 0x08048000) +#       define DYNAMIC_LOADING +#	define ELF_CLASS ELFCLASS32 +#   endif +#   ifdef LINUX +#	define OS_TYPE "LINUX" +#       define HEURISTIC1 +#       undef STACK_GRAN +#       define STACK_GRAN 0x10000000 +	/* STACKBOTTOM is usually 0xc0000000, but this changes with	*/ +	/* different kernel configurations.  In particular, systems	*/ +	/* with 2GB physical memory will usually move the user		*/ +	/* address space limit, and hence initial SP to 0x80000000.	*/ +#       if !defined(LINUX_THREADS) || !defined(REDIRECT_MALLOC) +#	    define MPROTECT_VDB +#	else +	    /* We seem to get random errors in incremental mode,	*/ +	    /* possibly because Linux threads is itself a malloc client */ +	    /* and can't deal with the signals.				*/ +#	endif +#       ifdef __ELF__ +#            define DYNAMIC_LOADING +#	     ifdef UNDEFINED	/* includes ro data */ +	       extern int _etext; +#              define DATASTART ((ptr_t)((((word) (&_etext)) + 0xfff) & ~0xfff)) +#	     endif +#	     include <features.h> +#	     if defined(__GLIBC__) && __GLIBC__ >= 2 +		 extern int __data_start; +#		 define DATASTART ((ptr_t)(&__data_start)) +#	     else +     	         extern char **__environ; +#                define DATASTART ((ptr_t)(&__environ)) +			      /* hideous kludge: __environ is the first */ +			      /* word in crt0.o, and delimits the start */ +			      /* of the data segment, no matter which   */ +			      /* ld options were passed through.        */ +			      /* We could use _etext instead, but that  */ +			      /* would include .rodata, which may       */ +			      /* contain large read-only data tables    */ +			      /* that we'd rather not scan.		*/ +#	     endif +	     extern int _end; +#	     define DATAEND (&_end) +#	else +	     extern int etext; +#            define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff)) +#       endif +#   endif +#   ifdef CYGWIN32 +#       define OS_TYPE "CYGWIN32" +          extern int _data_start__; +          extern int _data_end__; +          extern int _bss_start__; +          extern int _bss_end__; +  	/* For binutils 2.9.1, we have			*/ +  	/*	DATASTART   = _data_start__		*/ +  	/*	DATAEND	    = _bss_end__		*/ +  	/* whereas for some earlier versions it was	*/ +  	/*	DATASTART   = _bss_start__		*/ +  	/*	DATAEND	    = _data_end__		*/ +  	/* To get it right for both, we take the	*/ +  	/* minumum/maximum of the two.			*/ +#   	define MAX(x,y) ((x) > (y) ? (x) : (y)) +#   	define MIN(x,y) ((x) < (y) ? (x) : (y)) +#       define DATASTART ((ptr_t) MIN(&_data_start__, &_bss_start__)) +#       define DATAEND	 ((ptr_t) MAX(&_data_end__, &_bss_end__)) +#	undef STACK_GRAN +#       define STACK_GRAN 0x10000 +#       define HEURISTIC1 +#   endif +#   ifdef OS2 +#	define OS_TYPE "OS2" + 	    	/* STACKBOTTOM and DATASTART are handled specially in 	*/ +		/* os_dep.c. OS2 actually has the right			*/ +		/* system call!						*/ +#	define DATAEND	/* not needed */ +#   endif +#   ifdef MSWIN32 +#	define OS_TYPE "MSWIN32" +		/* STACKBOTTOM and DATASTART are handled specially in 	*/ +		/* os_dep.c.						*/ +#       ifndef __WATCOMC__ +#	  define MPROTECT_VDB +#	endif +#       define DATAEND  /* not needed */ +#   endif +#   ifdef DJGPP +#       define OS_TYPE "DJGPP" +#       include "stubinfo.h" +        extern int etext; +        extern int _stklen; +        extern int __djgpp_stack_limit; +#       define DATASTART ((ptr_t)((((word) (&etext)) + 0x1ff) & ~0x1ff)) +/* #       define STACKBOTTOM ((ptr_t)((word) _stubinfo + _stubinfo->size \ +                                                     + _stklen)) */ +#       define STACKBOTTOM ((ptr_t)((word) __djgpp_stack_limit + _stklen)) +		/* This may not be right.  */ +#   endif +#   ifdef OPENBSD +#	define OS_TYPE "OPENBSD" +#   endif +#   ifdef FREEBSD +#	define OS_TYPE "FREEBSD" +#	define MPROTECT_VDB +#   endif +#   ifdef NETBSD +#	define OS_TYPE "NETBSD" +#   endif +#   ifdef THREE86BSD +#	define OS_TYPE "THREE86BSD" +#   endif +#   ifdef BSDI +#	define OS_TYPE "BSDI" +#   endif +#   if defined(OPENBSD) || defined(FREEBSD) || defined(NETBSD) \ +        || defined(THREE86BSD) || defined(BSDI) +#	define HEURISTIC2 +	extern char etext; +#	define DATASTART ((ptr_t)(&etext)) +#   endif +#   ifdef NEXT +#	define OS_TYPE "NEXT" +#	define DATASTART ((ptr_t) get_etext()) +#	define STACKBOTTOM ((ptr_t)0xc0000000) +#	define DATAEND	/* not needed */ +#   endif +#   ifdef DOS4GW +#     define OS_TYPE "DOS4GW" +      extern long __nullarea; +      extern char _end; +      extern char *_STACKTOP; +      /* Depending on calling conventions Watcom C either precedes +         or does not precedes with undescore names of C-variables. +         Make sure startup code variables always have the same names.  */ +      #pragma aux __nullarea "*"; +      #pragma aux _end "*"; +#     define STACKBOTTOM ((ptr_t) _STACKTOP) +                         /* confused? me too. */ +#     define DATASTART ((ptr_t) &__nullarea) +#     define DATAEND ((ptr_t) &_end) +#   endif +# endif + +# ifdef NS32K +#   define MACH_TYPE "NS32K" +#   define ALIGNMENT 4 +    extern char **environ; +#   define DATASTART ((ptr_t)(&environ)) +			      /* hideous kludge: environ is the first   */ +			      /* word in crt0.o, and delimits the start */ +			      /* of the data segment, no matter which   */ +			      /* ld options were passed through.        */ +#   define STACKBOTTOM ((ptr_t) 0xfffff000) /* for Encore */ +# endif + +# ifdef MIPS +#   define MACH_TYPE "MIPS" +#   ifndef IRIX5 +#     define DATASTART (ptr_t)0x10000000 +			      /* Could probably be slightly higher since */ +			      /* startup code allocates lots of stuff.   */ +#   else +      extern int _fdata; +#     define DATASTART ((ptr_t)(&_fdata)) +#     ifdef USE_MMAP +#         define HEAP_START (ptr_t)0x30000000 +#     else +#	  define HEAP_START DATASTART +#     endif +			      /* Lowest plausible heap address.		*/ +			      /* In the MMAP case, we map there.	*/ +			      /* In either case it is used to identify	*/ +			      /* heap sections so they're not 		*/ +			      /* considered as roots.			*/ +#   endif /* IRIX5 */ +#   define HEURISTIC2 +/* #   define STACKBOTTOM ((ptr_t)0x7fff8000)  sometimes also works.  */ +#   ifdef ULTRIX +#	define OS_TYPE "ULTRIX" +#       define ALIGNMENT 4 +#   endif +#   ifdef RISCOS +#	define OS_TYPE "RISCOS" +#   	define ALIGNMENT 4  /* Required by hardware */ +#   endif +#   ifdef IRIX5 +#	define OS_TYPE "IRIX5" +#       define MPROTECT_VDB +#       ifdef _MIPS_SZPTR +#	  define CPP_WORDSZ _MIPS_SZPTR +#	  define ALIGNMENT (_MIPS_SZPTR/8) +#	  if CPP_WORDSZ != 64 +#	    define ALIGN_DOUBLE +#	  endif +#	else +#         define ALIGNMENT 4 +#	  define ALIGN_DOUBLE +#	endif +#	define DYNAMIC_LOADING +#   endif +# endif + +# ifdef RS6000 +#   define MACH_TYPE "RS6000" +#   define ALIGNMENT 4 +#   define DATASTART ((ptr_t)0x20000000) +    extern int errno; +#   define STACKBOTTOM ((ptr_t)((ulong)&errno)) +#   define DYNAMIC_LOADING +	/* For really old versions of AIX, this may have to be removed. */ +# endif + +# ifdef HP_PA +#   define MACH_TYPE "HP_PA" +#   define ALIGNMENT 4 +#   define ALIGN_DOUBLE +    extern int __data_start; +#   define DATASTART ((ptr_t)(&__data_start)) +#   if 0 +	/* The following appears to work for 7xx systems running HP/UX	*/ +	/* 9.xx Furthermore, it might result in much faster		*/ +	/* collections than HEURISTIC2, which may involve scanning	*/ +	/* segments that directly precede the stack.  It is not the	*/ +	/* default, since it may not work on older machine/OS		*/ +	/* combinations. (Thanks to Raymond X.T. Nijssen for uncovering	*/ +	/* this.)							*/ +#       define STACKBOTTOM ((ptr_t) 0x7b033000)  /* from /etc/conf/h/param.h */ +#   else +#       define HEURISTIC2 +#   endif +#   define STACK_GROWS_UP +#   define DYNAMIC_LOADING +#   include <unistd.h> +#   define GETPAGESIZE() sysconf(_SC_PAGE_SIZE) +	/* They misspelled the Posix macro?	*/ +# endif + +# ifdef ALPHA +#   define MACH_TYPE "ALPHA" +#   define ALIGNMENT 8 +#   ifdef OSF1 +#	define OS_TYPE "OSF1" +#   	define DATASTART ((ptr_t) 0x140000000) +	extern _end; +#   	define DATAEND ((ptr_t) &_end) +#   	define HEURISTIC2 +	/* Normally HEURISTIC2 is too conervative, since		*/ +	/* the text segment immediately follows the stack.		*/ +	/* Hence we give an upper pound.				*/ +    	extern int __start; +#   	define HEURISTIC2_LIMIT ((ptr_t)((word)(&__start) & ~(getpagesize()-1))) +#   	define CPP_WORDSZ 64 +#   	define MPROTECT_VDB +#   	define DYNAMIC_LOADING +#   endif +#   ifdef LINUX +#       define OS_TYPE "LINUX" +#       define CPP_WORDSZ 64 +#       define STACKBOTTOM ((ptr_t) 0x120000000) +#       ifdef __ELF__ +#   	  if 0 +	    /* __data_start apparently disappeared in some recent releases. */ +            extern int __data_start; +#           define DATASTART &__data_start +#	  endif +#         define DATASTART GC_data_start +#         define DYNAMIC_LOADING +#       else +#           define DATASTART ((ptr_t) 0x140000000) +#       endif +	extern int _end; +#	define DATAEND (&_end) +#	define MPROTECT_VDB +		/* Has only been superficially tested.  May not	*/ +		/* work on all versions.			*/ +#   endif +# endif + +# ifdef M88K +#   define MACH_TYPE "M88K" +#   define ALIGNMENT 4 +#   define ALIGN_DOUBLE +    extern int etext; +#   ifdef CX_UX +#	define OS_TYPE "CX_UX" +#       define DATASTART ((((word)&etext + 0x3fffff) & ~0x3fffff) + 0x10000) +#   endif +#   ifdef  DGUX +#	define OS_TYPE "DGUX" +	extern char * GC_SysVGetDataStart(); +#       define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &etext) +#   endif +#   define STACKBOTTOM ((char*)0xf0000000) /* determined empirically */ +# endif + +# ifdef S370 +#   define MACH_TYPE "S370" +#   define OS_TYPE "UTS4" +#   define ALIGNMENT 4	/* Required by hardware	*/ +    extern int etext; +	extern int _etext; +	extern int _end; +	extern char * GC_SysVGetDataStart(); +#       define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &_etext) +#	define DATAEND (&_end) +#	define HEURISTIC2 +# endif + +# ifndef STACK_GROWS_UP +#   define STACK_GROWS_DOWN +# endif + +# ifndef CPP_WORDSZ +#   define CPP_WORDSZ 32 +# endif + +# ifndef OS_TYPE +#   define OS_TYPE "" +# endif + +# ifndef DATAEND +    extern int end; +#   define DATAEND (&end) +# endif + +# if defined(SVR4) && !defined(GETPAGESIZE) +#    include <unistd.h> +#    define GETPAGESIZE()  sysconf(_SC_PAGESIZE) +# endif + +# ifndef GETPAGESIZE +#   if defined(SUNOS5) || defined(IRIX5) +#	include <unistd.h> +#   endif +#   define GETPAGESIZE() getpagesize() +# endif + +# if defined(SUNOS5) || defined(DRSNX) || defined(UTS4) +    /* OS has SVR4 generic features.  Probably others also qualify.	*/ +#   define SVR4 +# endif + +# if defined(SUNOS5) || defined(DRSNX) +    /* OS has SUNOS5 style semi-undocumented interface to dynamic 	*/ +    /* loader.								*/ +#   define SUNOS5DL +    /* OS has SUNOS5 style signal handlers.				*/ +#   define SUNOS5SIGS +# endif + +# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64 +   -> bad word size +# endif + +# ifdef PCR +#   undef DYNAMIC_LOADING +#   undef STACKBOTTOM +#   undef HEURISTIC1 +#   undef HEURISTIC2 +#   undef PROC_VDB +#   undef MPROTECT_VDB +#   define PCR_VDB +# endif + +# ifdef SRC_M3 +/* Postponed for now. */ +#   undef PROC_VDB +#   undef MPROTECT_VDB +# endif + +# ifdef SMALL_CONFIG +/* Presumably not worth the space it takes. */ +#   undef PROC_VDB +#   undef MPROTECT_VDB +# endif + +# ifdef USE_MUNMAP +#   undef MPROTECT_VDB  /* Can't deal with address space holes. */ +# endif + +# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB) +#   define DEFAULT_VDB +# endif + +# if defined(_SOLARIS_PTHREADS) && !defined(SOLARIS_THREADS) +#   define SOLARIS_THREADS +# endif +# if defined(IRIX_THREADS) && !defined(IRIX5) +--> inconsistent configuration +# endif +# if defined(IRIX_JDK_THREADS) && !defined(IRIX5) +--> inconsistent configuration +# endif +# if defined(LINUX_THREADS) && !defined(LINUX) +--> inconsistent configuration +# endif +# if defined(SOLARIS_THREADS) && !defined(SUNOS5) +--> inconsistent configuration +# endif +# if defined(PCR) || defined(SRC_M3) || \ +	defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \ +	defined(IRIX_THREADS) || defined(LINUX_THREADS) || \ +	defined(IRIX_JDK_THREADS) +#   define THREADS +# endif + +# if defined(HP_PA) || defined(M88K) || defined(POWERPC) \ +     || (defined(I386) && defined(OS2)) || defined(UTS4) || defined(LINT) +	/* Use setjmp based hack to mark from callee-save registers. */ +#	define USE_GENERIC_PUSH_REGS +# endif +# if defined(SPARC) && !defined(LINUX) +#   define SAVE_CALL_CHAIN +#   define ASM_CLEAR_CODE	/* Stack clearing is crucial, and we 	*/ +				/* include assembly code to do it well.	*/ +# endif + +# endif diff --git a/gc/include/weakpointer.h b/gc/include/weakpointer.h new file mode 100644 index 0000000..84906b0 --- /dev/null +++ b/gc/include/weakpointer.h @@ -0,0 +1,221 @@ +#ifndef	_weakpointer_h_ +#define	_weakpointer_h_ + +/**************************************************************************** + +WeakPointer and CleanUp + +    Copyright (c) 1991 by Xerox Corporation.  All rights reserved. + +    THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED +    OR IMPLIED.  ANY USE IS AT YOUR OWN RISK. + +    Permission is hereby granted to copy this code for any purpose, +    provided the above notices are retained on all copies. + +    Last modified on Mon Jul 17 18:16:01 PDT 1995 by ellis + +****************************************************************************/ + +/**************************************************************************** + +WeakPointer + +A weak pointer is a pointer to a heap-allocated object that doesn't +prevent the object from being garbage collected. Weak pointers can be +used to track which objects haven't yet been reclaimed by the +collector. A weak pointer is deactivated when the collector discovers +its referent object is unreachable by normal pointers (reachability +and deactivation are defined more precisely below). A deactivated weak +pointer remains deactivated forever. + +****************************************************************************/ + + +template< class T > class WeakPointer { +public: + +WeakPointer( T* t = 0 ) +    /* Constructs a weak pointer for *t. t may be null. It is an error +       if t is non-null and *t is not a collected object. */ +    {impl = _WeakPointer_New( t );} + +T* Pointer() +    /* wp.Pointer() returns a pointer to the referent object of wp or +       null if wp has been deactivated (because its referent object +       has been discovered unreachable by the collector). */ +    {return (T*) _WeakPointer_Pointer( this->impl );} + +int operator==( WeakPointer< T > wp2 ) +    /* Given weak pointers wp1 and wp2, if wp1 == wp2, then wp1 and +       wp2 refer to the same object. If wp1 != wp2, then either wp1 +       and wp2 don't refer to the same object, or if they do, one or +       both of them has been deactivated. (Note: If objects t1 and t2 +       are never made reachable by their clean-up functions, then +       WeakPointer<T>(t1) == WeakPointer<T>(t2) if and only t1 == t2.) */ +    {return _WeakPointer_Equal( this->impl, wp2.impl );} + +int Hash() +    /* Returns a hash code suitable for use by multiplicative- and +       division-based hash tables. If wp1 == wp2, then wp1.Hash() == +       wp2.Hash(). */ +    {return _WeakPointer_Hash( this->impl );} + +private: +void* impl; +}; + +/***************************************************************************** + +CleanUp + +A garbage-collected object can have an associated clean-up function +that will be invoked some time after the collector discovers the +object is unreachable via normal pointers. Clean-up functions can be +used to release resources such as open-file handles or window handles +when their containing objects become unreachable.  If a C++ object has +a non-empty explicit destructor (i.e. it contains programmer-written +code), the destructor will be automatically registered as the object's +initial clean-up function. + +There is no guarantee that the collector will detect every unreachable +object (though it will find almost all of them). Clients should not +rely on clean-up to cause some action to occur immediately -- clean-up +is only a mechanism for improving resource usage. + +Every object with a clean-up function also has a clean-up queue. When +the collector finds the object is unreachable, it enqueues it on its +queue. The clean-up function is applied when the object is removed +from the queue. By default, objects are enqueued on the garbage +collector's queue, and the collector removes all objects from its +queue after each collection. If a client supplies another queue for +objects, it is his responsibility to remove objects (and cause their +functions to be called) by polling it periodically. + +Clean-up queues allow clean-up functions accessing global data to +synchronize with the main program. Garbage collection can occur at any +time, and clean-ups invoked by the collector might access data in an +inconsistent state. A client can control this by defining an explicit +queue for objects and polling it at safe points. + +The following definitions are used by the specification below: + +Given a pointer t to a collected object, the base object BO(t) is the +value returned by new when it created the object. (Because of multiple +inheritance, t and BO(t) may not be the same address.) + +A weak pointer wp references an object *t if BO(wp.Pointer()) == +BO(t). + +***************************************************************************/ + +template< class T, class Data > class CleanUp { +public: + +static void Set( T* t, void c( Data* d, T* t ), Data* d = 0 ) +    /* Sets the clean-up function of object BO(t) to be <c, d>, +       replacing any previously defined clean-up function for BO(t); c +       and d can be null, but t cannot. Sets the clean-up queue for +       BO(t) to be the collector's queue. When t is removed from its +       clean-up queue, its clean-up will be applied by calling c(d, +       t). It is an error if *t is not a collected object. */  +       {_CleanUp_Set( t, c, d );} + +static void Call( T* t ) +    /* Sets the new clean-up function for BO(t) to be null and, if the +       old one is non-null, calls it immediately, even if BO(t) is +       still reachable. Deactivates any weak pointers to BO(t). */ +       {_CleanUp_Call( t );} + +class Queue {public: +    Queue() +        /* Constructs a new queue. */ +            {this->head = _CleanUp_Queue_NewHead();} + +    void Set( T* t ) +        /* q.Set(t) sets the clean-up queue of BO(t) to be q. */ +            {_CleanUp_Queue_Set( this->head, t );} + +    int Call() +        /* If q is non-empty, q.Call() removes the first object and +           calls its clean-up function; does nothing if q is +           empty. Returns true if there are more objects in the +           queue. */ +           {return _CleanUp_Queue_Call( this->head );} + +    private: +    void* head; +    }; +}; + +/********************************************************************** + +Reachability and Clean-up + +An object O is reachable if it can be reached via a non-empty path of +normal pointers from the registers, stacks, global variables, or an +object with a non-null clean-up function (including O itself), +ignoring pointers from an object to itself. + +This definition of reachability ensures that if object B is accessible +from object A (and not vice versa) and if both A and B have clean-up +functions, then A will always be cleaned up before B. Note that as +long as an object with a clean-up function is contained in a cycle of +pointers, it will always be reachable and will never be cleaned up or +collected. + +When the collector finds an unreachable object with a null clean-up +function, it atomically deactivates all weak pointers referencing the +object and recycles its storage. If object B is accessible from object +A via a path of normal pointers, A will be discovered unreachable no +later than B, and a weak pointer to A will be deactivated no later +than a weak pointer to B. + +When the collector finds an unreachable object with a non-null +clean-up function, the collector atomically deactivates all weak +pointers referencing the object, redefines its clean-up function to be +null, and enqueues it on its clean-up queue. The object then becomes +reachable again and remains reachable at least until its clean-up +function executes. + +The clean-up function is assured that its argument is the only +accessible pointer to the object. Nothing prevents the function from +redefining the object's clean-up function or making the object +reachable again (for example, by storing the pointer in a global +variable). + +If the clean-up function does not make its object reachable again and +does not redefine its clean-up function, then the object will be +collected by a subsequent collection (because the object remains +unreachable and now has a null clean-up function). If the clean-up +function does make its object reachable again and a clean-up function +is subsequently redefined for the object, then the new clean-up +function will be invoked the next time the collector finds the object +unreachable. + +Note that a destructor for a collected object cannot safely redefine a +clean-up function for its object, since after the destructor executes, +the object has been destroyed into "raw memory". (In most +implementations, destroying an object mutates its vtbl.) + +Finally, note that calling delete t on a collected object first +deactivates any weak pointers to t and then invokes its clean-up +function (destructor). + +**********************************************************************/ + +extern "C" { +    void* _WeakPointer_New( void* t ); +    void* _WeakPointer_Pointer( void* wp ); +    int _WeakPointer_Equal( void* wp1, void* wp2 ); +    int _WeakPointer_Hash( void* wp ); +    void _CleanUp_Set( void* t, void (*c)( void* d, void* t ), void* d ); +    void _CleanUp_Call( void* t ); +    void* _CleanUp_Queue_NewHead (); +    void _CleanUp_Queue_Set( void* h, void* t ); +    int _CleanUp_Queue_Call( void* h ); +} + +#endif /* _weakpointer_h_ */ + + | 
