Ruby  1.9.3p448(2013-06-27revision41675)
gc.c
Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   gc.c -
00004 
00005   $Author: usa $
00006   created at: Tue Oct  5 09:44:46 JST 1993
00007 
00008   Copyright (C) 1993-2007 Yukihiro Matsumoto
00009   Copyright (C) 2000  Network Applied Communication Laboratory, Inc.
00010   Copyright (C) 2000  Information-technology Promotion Agency, Japan
00011 
00012 **********************************************************************/
00013 
00014 #include "ruby/ruby.h"
00015 #include "ruby/st.h"
00016 #include "ruby/re.h"
00017 #include "ruby/io.h"
00018 #include "ruby/util.h"
00019 #include "eval_intern.h"
00020 #include "vm_core.h"
00021 #include "internal.h"
00022 #include "gc.h"
00023 #include "constant.h"
00024 #include "ruby_atomic.h"
00025 #include <stdio.h>
00026 #include <setjmp.h>
00027 #include <sys/types.h>
00028 #include <assert.h>
00029 
00030 #ifdef HAVE_SYS_TIME_H
00031 #include <sys/time.h>
00032 #endif
00033 
00034 #ifdef HAVE_SYS_RESOURCE_H
00035 #include <sys/resource.h>
00036 #endif
00037 
00038 #if defined _WIN32 || defined __CYGWIN__
00039 #include <windows.h>
00040 #endif
00041 
00042 #ifdef HAVE_VALGRIND_MEMCHECK_H
00043 # include <valgrind/memcheck.h>
00044 # ifndef VALGRIND_MAKE_MEM_DEFINED
00045 #  define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
00046 # endif
00047 # ifndef VALGRIND_MAKE_MEM_UNDEFINED
00048 #  define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
00049 # endif
00050 #else
00051 # define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
00052 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
00053 #endif
00054 
00055 #define rb_setjmp(env) RUBY_SETJMP(env)
00056 #define rb_jmp_buf rb_jmpbuf_t
00057 
00058 /* Make alloca work the best possible way.  */
00059 #ifdef __GNUC__
00060 # ifndef atarist
00061 #  ifndef alloca
00062 #   define alloca __builtin_alloca
00063 #  endif
00064 # endif /* atarist */
00065 #else
00066 # ifdef HAVE_ALLOCA_H
00067 #  include <alloca.h>
00068 # else
00069 #  ifdef _AIX
00070  #pragma alloca
00071 #  else
00072 #   ifndef alloca /* predefined by HP cc +Olibcalls */
00073 void *alloca ();
00074 #   endif
00075 #  endif /* AIX */
00076 # endif /* HAVE_ALLOCA_H */
00077 #endif /* __GNUC__ */
00078 
00079 #ifndef GC_MALLOC_LIMIT
00080 #define GC_MALLOC_LIMIT 8000000
00081 #endif
00082 #define HEAP_MIN_SLOTS 10000
00083 #define FREE_MIN  4096
00084 
00085 typedef struct {
00086     unsigned int initial_malloc_limit;
00087     unsigned int initial_heap_min_slots;
00088     unsigned int initial_free_min;
00089     int gc_stress;
00090 } ruby_gc_params_t;
00091 
00092 ruby_gc_params_t initial_params = {
00093     GC_MALLOC_LIMIT,
00094     HEAP_MIN_SLOTS,
00095     FREE_MIN,
00096 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00097     FALSE,
00098 #endif
00099 };
00100 
00101 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
00102 
00103 #if SIZEOF_LONG == SIZEOF_VOIDP
00104 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
00105 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
00106 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
00107 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
00108 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
00109    ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
00110 #else
00111 # error not supported
00112 #endif
00113 
00114 int ruby_gc_debug_indent = 0;
00115 
00116 /* for GC profile */
00117 #define GC_PROFILE_MORE_DETAIL 0
00118 typedef struct gc_profile_record {
00119     double gc_time;
00120     double gc_mark_time;
00121     double gc_sweep_time;
00122     double gc_invoke_time;
00123 
00124     size_t heap_use_slots;
00125     size_t heap_live_objects;
00126     size_t heap_free_objects;
00127     size_t heap_total_objects;
00128     size_t heap_use_size;
00129     size_t heap_total_size;
00130 
00131     int have_finalize;
00132     int is_marked;
00133 
00134     size_t allocate_increase;
00135     size_t allocate_limit;
00136 } gc_profile_record;
00137 
00138 static double
00139 getrusage_time(void)
00140 {
00141 #ifdef RUSAGE_SELF
00142     struct rusage usage;
00143     struct timeval time;
00144     getrusage(RUSAGE_SELF, &usage);
00145     time = usage.ru_utime;
00146     return time.tv_sec + time.tv_usec * 1e-6;
00147 #elif defined _WIN32
00148     FILETIME creation_time, exit_time, kernel_time, user_time;
00149     ULARGE_INTEGER ui;
00150     LONG_LONG q;
00151     double t;
00152 
00153     if (GetProcessTimes(GetCurrentProcess(),
00154                         &creation_time, &exit_time, &kernel_time, &user_time) == 0)
00155     {
00156         return 0.0;
00157     }
00158     memcpy(&ui, &user_time, sizeof(FILETIME));
00159     q = ui.QuadPart / 10L;
00160     t = (DWORD)(q % 1000000L) * 1e-6;
00161     q /= 1000000L;
00162 #ifdef __GNUC__
00163     t += q;
00164 #else
00165     t += (double)(DWORD)(q >> 16) * (1 << 16);
00166     t += (DWORD)q & ~(~0 << 16);
00167 #endif
00168     return t;
00169 #else
00170     return 0.0;
00171 #endif
00172 }
00173 
00174 #define GC_PROF_TIMER_START do {\
00175         if (objspace->profile.run) {\
00176             if (!objspace->profile.record) {\
00177                 objspace->profile.size = 1000;\
00178                 objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
00179             }\
00180             if (count >= objspace->profile.size) {\
00181                 objspace->profile.size += 1000;\
00182                 objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
00183             }\
00184             if (!objspace->profile.record) {\
00185                 rb_bug("gc_profile malloc or realloc miss");\
00186             }\
00187             MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
00188             gc_time = getrusage_time();\
00189             objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
00190         }\
00191     } while(0)
00192 
00193 #define GC_PROF_TIMER_STOP(marked) do {\
00194         if (objspace->profile.run) {\
00195             gc_time = getrusage_time() - gc_time;\
00196             if (gc_time < 0) gc_time = 0;\
00197             objspace->profile.record[count].gc_time = gc_time;\
00198             objspace->profile.record[count].is_marked = !!(marked);\
00199             GC_PROF_SET_HEAP_INFO(objspace->profile.record[count]);\
00200             objspace->profile.count++;\
00201         }\
00202     } while(0)
00203 
00204 #if GC_PROFILE_MORE_DETAIL
00205 #define INIT_GC_PROF_PARAMS double gc_time = 0, sweep_time = 0;\
00206     size_t count = objspace->profile.count, total = 0, live = 0
00207 
00208 #define GC_PROF_MARK_TIMER_START double mark_time = 0;\
00209     do {\
00210         if (objspace->profile.run) {\
00211             mark_time = getrusage_time();\
00212         }\
00213     } while(0)
00214 
00215 #define GC_PROF_MARK_TIMER_STOP do {\
00216         if (objspace->profile.run) {\
00217             mark_time = getrusage_time() - mark_time;\
00218             if (mark_time < 0) mark_time = 0;\
00219             objspace->profile.record[objspace->profile.count].gc_mark_time = mark_time;\
00220         }\
00221     } while(0)
00222 
00223 #define GC_PROF_SWEEP_TIMER_START do {\
00224         if (objspace->profile.run) {\
00225             sweep_time = getrusage_time();\
00226         }\
00227     } while(0)
00228 
00229 #define GC_PROF_SWEEP_TIMER_STOP do {\
00230         if (objspace->profile.run) {\
00231             sweep_time = getrusage_time() - sweep_time;\
00232             if (sweep_time < 0) sweep_time = 0;\
00233             objspace->profile.record[count].gc_sweep_time = sweep_time;\
00234         }\
00235     } while(0)
00236 #define GC_PROF_SET_MALLOC_INFO do {\
00237         if (objspace->profile.run) {\
00238             gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
00239             record->allocate_increase = malloc_increase;\
00240             record->allocate_limit = malloc_limit; \
00241         }\
00242     } while(0)
00243 #define GC_PROF_SET_HEAP_INFO(record) do {\
00244         live = objspace->heap.live_num;\
00245         total = heaps_used * HEAP_OBJ_LIMIT;\
00246         (record).heap_use_slots = heaps_used;\
00247         (record).heap_live_objects = live;\
00248         (record).heap_free_objects = total - live;\
00249         (record).heap_total_objects = total;\
00250         (record).have_finalize = deferred_final_list ? Qtrue : Qfalse;\
00251         (record).heap_use_size = live * sizeof(RVALUE);\
00252         (record).heap_total_size = total * sizeof(RVALUE);\
00253     } while(0)
00254 #define GC_PROF_INC_LIVE_NUM objspace->heap.live_num++
00255 #define GC_PROF_DEC_LIVE_NUM objspace->heap.live_num--
00256 #else
00257 #define INIT_GC_PROF_PARAMS double gc_time = 0;\
00258     size_t count = objspace->profile.count, total = 0, live = 0
00259 #define GC_PROF_MARK_TIMER_START
00260 #define GC_PROF_MARK_TIMER_STOP
00261 #define GC_PROF_SWEEP_TIMER_START
00262 #define GC_PROF_SWEEP_TIMER_STOP
00263 #define GC_PROF_SET_MALLOC_INFO
00264 #define GC_PROF_SET_HEAP_INFO(record) do {\
00265         live = objspace->heap.live_num;\
00266         total = heaps_used * HEAP_OBJ_LIMIT;\
00267         (record).heap_total_objects = total;\
00268         (record).heap_use_size = live * sizeof(RVALUE);\
00269         (record).heap_total_size = total * sizeof(RVALUE);\
00270     } while(0)
00271 #define GC_PROF_INC_LIVE_NUM
00272 #define GC_PROF_DEC_LIVE_NUM
00273 #endif
00274 
00275 
00276 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00277 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
00278 #endif
00279 
00280 typedef struct RVALUE {
00281     union {
00282         struct {
00283             VALUE flags;                /* always 0 for freed obj */
00284             struct RVALUE *next;
00285         } free;
00286         struct RBasic  basic;
00287         struct RObject object;
00288         struct RClass  klass;
00289         struct RFloat  flonum;
00290         struct RString string;
00291         struct RArray  array;
00292         struct RRegexp regexp;
00293         struct RHash   hash;
00294         struct RData   data;
00295         struct RTypedData   typeddata;
00296         struct RStruct rstruct;
00297         struct RBignum bignum;
00298         struct RFile   file;
00299         struct RNode   node;
00300         struct RMatch  match;
00301         struct RRational rational;
00302         struct RComplex complex;
00303     } as;
00304 #ifdef GC_DEBUG
00305     const char *file;
00306     int   line;
00307 #endif
00308 } RVALUE;
00309 
00310 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00311 #pragma pack(pop)
00312 #endif
00313 
00314 struct heaps_slot {
00315     void *membase;
00316     RVALUE *slot;
00317     size_t limit;
00318     struct heaps_slot *next;
00319     struct heaps_slot *prev;
00320 };
00321 
00322 struct sorted_heaps_slot {
00323     RVALUE *start;
00324     RVALUE *end;
00325     struct heaps_slot *slot;
00326 };
00327 
00328 struct gc_list {
00329     VALUE *varptr;
00330     struct gc_list *next;
00331 };
00332 
00333 #define STACK_CHUNK_SIZE 500
00334 
00335 typedef struct stack_chunk {
00336     VALUE data[STACK_CHUNK_SIZE];
00337     struct stack_chunk *next;
00338 } stack_chunk_t;
00339 
00340 typedef struct mark_stack {
00341     stack_chunk_t *chunk;
00342     stack_chunk_t *cache;
00343     size_t index;
00344     size_t limit;
00345     size_t cache_size;
00346     size_t unused_cache_size;
00347 } mark_stack_t;
00348 
00349 #define CALC_EXACT_MALLOC_SIZE 0
00350 
00351 typedef struct rb_objspace {
00352     struct {
00353         size_t limit;
00354         size_t increase;
00355 #if CALC_EXACT_MALLOC_SIZE
00356         size_t allocated_size;
00357         size_t allocations;
00358 #endif
00359     } malloc_params;
00360     struct {
00361         size_t increment;
00362         struct heaps_slot *ptr;
00363         struct heaps_slot *sweep_slots;
00364         struct sorted_heaps_slot *sorted;
00365         size_t length;
00366         size_t used;
00367         RVALUE *freelist;
00368         RVALUE *range[2];
00369         RVALUE *freed;
00370         size_t live_num;
00371         size_t free_num;
00372         size_t free_min;
00373         size_t final_num;
00374         size_t do_heap_free;
00375     } heap;
00376     struct {
00377         int dont_gc;
00378         int dont_lazy_sweep;
00379         int during_gc;
00380     } flags;
00381     struct {
00382         st_table *table;
00383         RVALUE *deferred;
00384     } final;
00385     mark_stack_t mark_stack;
00386     struct {
00387         int run;
00388         gc_profile_record *record;
00389         size_t count;
00390         size_t size;
00391         double invoke_time;
00392     } profile;
00393     struct gc_list *global_list;
00394     size_t count;
00395     int gc_stress;
00396 } rb_objspace_t;
00397 
00398 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00399 #define rb_objspace (*GET_VM()->objspace)
00400 #define ruby_initial_gc_stress  initial_params.gc_stress
00401 int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
00402 #else
00403 static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, {HEAP_MIN_SLOTS}};
00404 int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
00405 #endif
00406 #define malloc_limit            objspace->malloc_params.limit
00407 #define malloc_increase         objspace->malloc_params.increase
00408 #define heaps                   objspace->heap.ptr
00409 #define heaps_length            objspace->heap.length
00410 #define heaps_used              objspace->heap.used
00411 #define freelist                objspace->heap.freelist
00412 #define lomem                   objspace->heap.range[0]
00413 #define himem                   objspace->heap.range[1]
00414 #define heaps_inc               objspace->heap.increment
00415 #define heaps_freed             objspace->heap.freed
00416 #define dont_gc                 objspace->flags.dont_gc
00417 #define during_gc               objspace->flags.during_gc
00418 #define finalizer_table         objspace->final.table
00419 #define deferred_final_list     objspace->final.deferred
00420 #define global_List             objspace->global_list
00421 #define ruby_gc_stress          objspace->gc_stress
00422 #define initial_malloc_limit    initial_params.initial_malloc_limit
00423 #define initial_heap_min_slots  initial_params.initial_heap_min_slots
00424 #define initial_free_min        initial_params.initial_free_min
00425 
00426 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
00427 
00428 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00429 rb_objspace_t *
00430 rb_objspace_alloc(void)
00431 {
00432     rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
00433     memset(objspace, 0, sizeof(*objspace));
00434     malloc_limit = initial_malloc_limit;
00435     ruby_gc_stress = ruby_initial_gc_stress;
00436 
00437     return objspace;
00438 }
00439 #endif
00440 
00441 static void initial_expand_heap(rb_objspace_t *objspace);
00442 static void init_mark_stack(mark_stack_t *stack);
00443 
00444 void
00445 rb_gc_set_params(void)
00446 {
00447     char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
00448 
00449     if (rb_safe_level() > 0) return;
00450 
00451     malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
00452     if (malloc_limit_ptr != NULL) {
00453         int malloc_limit_i = atoi(malloc_limit_ptr);
00454         if (RTEST(ruby_verbose))
00455             fprintf(stderr, "malloc_limit=%d (%d)\n",
00456                     malloc_limit_i, initial_malloc_limit);
00457         if (malloc_limit_i > 0) {
00458             initial_malloc_limit = malloc_limit_i;
00459         }
00460     }
00461 
00462     heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
00463     if (heap_min_slots_ptr != NULL) {
00464         int heap_min_slots_i = atoi(heap_min_slots_ptr);
00465         if (RTEST(ruby_verbose))
00466             fprintf(stderr, "heap_min_slots=%d (%d)\n",
00467                     heap_min_slots_i, initial_heap_min_slots);
00468         if (heap_min_slots_i > 0) {
00469             initial_heap_min_slots = heap_min_slots_i;
00470             initial_expand_heap(&rb_objspace);
00471         }
00472     }
00473 
00474     free_min_ptr = getenv("RUBY_FREE_MIN");
00475     if (free_min_ptr != NULL) {
00476         int free_min_i = atoi(free_min_ptr);
00477         if (RTEST(ruby_verbose))
00478             fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
00479         if (free_min_i > 0) {
00480             initial_free_min = free_min_i;
00481         }
00482     }
00483 }
00484 
00485 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00486 static void gc_sweep(rb_objspace_t *);
00487 static void slot_sweep(rb_objspace_t *, struct heaps_slot *);
00488 static void rest_sweep(rb_objspace_t *);
00489 static void free_stack_chunks(mark_stack_t *);
00490 
00491 void
00492 rb_objspace_free(rb_objspace_t *objspace)
00493 {
00494     rest_sweep(objspace);
00495     if (objspace->profile.record) {
00496         free(objspace->profile.record);
00497         objspace->profile.record = 0;
00498     }
00499     if (global_List) {
00500         struct gc_list *list, *next;
00501         for (list = global_List; list; list = next) {
00502             next = list->next;
00503             free(list);
00504         }
00505     }
00506     if (objspace->heap.sorted) {
00507         size_t i;
00508         for (i = 0; i < heaps_used; ++i) {
00509             free(objspace->heap.sorted[i].slot->membase);
00510             free(objspace->heap.sorted[i].slot);
00511         }
00512         free(objspace->heap.sorted);
00513         heaps_used = 0;
00514         heaps = 0;
00515     }
00516     free_stack_chunks(&objspace->mark_stack);
00517     free(objspace);
00518 }
00519 #endif
00520 
00521 /* tiny heap size */
00522 /* 32KB */
00523 /*#define HEAP_SIZE 0x8000 */
00524 /* 128KB */
00525 /*#define HEAP_SIZE 0x20000 */
00526 /* 64KB */
00527 /*#define HEAP_SIZE 0x10000 */
00528 /* 16KB */
00529 #define HEAP_SIZE 0x4000
00530 /* 8KB */
00531 /*#define HEAP_SIZE 0x2000 */
00532 /* 4KB */
00533 /*#define HEAP_SIZE 0x1000 */
00534 /* 2KB */
00535 /*#define HEAP_SIZE 0x800 */
00536 
00537 #define HEAP_OBJ_LIMIT (unsigned int)(HEAP_SIZE / sizeof(struct RVALUE))
00538 
00539 extern st_table *rb_class_tbl;
00540 
00541 int ruby_disable_gc_stress = 0;
00542 
00543 static void run_final(rb_objspace_t *objspace, VALUE obj);
00544 static int garbage_collect(rb_objspace_t *objspace);
00545 static int gc_lazy_sweep(rb_objspace_t *objspace);
00546 
00547 void
00548 rb_global_variable(VALUE *var)
00549 {
00550     rb_gc_register_address(var);
00551 }
00552 
00553 static void *
00554 ruby_memerror_body(void *dummy)
00555 {
00556     rb_memerror();
00557     return 0;
00558 }
00559 
00560 static void
00561 ruby_memerror(void)
00562 {
00563     if (ruby_thread_has_gvl_p()) {
00564         rb_memerror();
00565     }
00566     else {
00567         if (ruby_native_thread_p()) {
00568             rb_thread_call_with_gvl(ruby_memerror_body, 0);
00569         }
00570         else {
00571             /* no ruby thread */
00572             fprintf(stderr, "[FATAL] failed to allocate memory\n");
00573             exit(EXIT_FAILURE);
00574         }
00575     }
00576 }
00577 
00578 void
00579 rb_memerror(void)
00580 {
00581     rb_thread_t *th = GET_THREAD();
00582     if (!nomem_error ||
00583         (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
00584         fprintf(stderr, "[FATAL] failed to allocate memory\n");
00585         exit(EXIT_FAILURE);
00586     }
00587     if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
00588         rb_thread_raised_clear(th);
00589         GET_THREAD()->errinfo = nomem_error;
00590         JUMP_TAG(TAG_RAISE);
00591     }
00592     rb_thread_raised_set(th, RAISED_NOMEMORY);
00593     rb_exc_raise(nomem_error);
00594 }
00595 
00596 /*
00597  *  call-seq:
00598  *    GC.stress                 -> true or false
00599  *
00600  *  returns current status of GC stress mode.
00601  */
00602 
00603 static VALUE
00604 gc_stress_get(VALUE self)
00605 {
00606     rb_objspace_t *objspace = &rb_objspace;
00607     return ruby_gc_stress ? Qtrue : Qfalse;
00608 }
00609 
00610 /*
00611  *  call-seq:
00612  *    GC.stress = bool          -> bool
00613  *
00614  *  Updates the GC stress mode.
00615  *
00616  *  When stress mode is enabled the GC is invoked at every GC opportunity:
00617  *  all memory and object allocations.
00618  *
00619  *  Enabling stress mode makes Ruby very slow, it is only for debugging.
00620  */
00621 
00622 static VALUE
00623 gc_stress_set(VALUE self, VALUE flag)
00624 {
00625     rb_objspace_t *objspace = &rb_objspace;
00626     rb_secure(2);
00627     ruby_gc_stress = RTEST(flag);
00628     return flag;
00629 }
00630 
00631 /*
00632  *  call-seq:
00633  *    GC::Profiler.enable?                 -> true or false
00634  *
00635  *  The current status of GC profile mode.
00636  */
00637 
00638 static VALUE
00639 gc_profile_enable_get(VALUE self)
00640 {
00641     rb_objspace_t *objspace = &rb_objspace;
00642     return objspace->profile.run ? Qtrue : Qfalse;
00643 }
00644 
00645 /*
00646  *  call-seq:
00647  *    GC::Profiler.enable          -> nil
00648  *
00649  *  Starts the GC profiler.
00650  *
00651  */
00652 
00653 static VALUE
00654 gc_profile_enable(void)
00655 {
00656     rb_objspace_t *objspace = &rb_objspace;
00657 
00658     objspace->profile.run = TRUE;
00659     return Qnil;
00660 }
00661 
00662 /*
00663  *  call-seq:
00664  *    GC::Profiler.disable          -> nil
00665  *
00666  *  Stops the GC profiler.
00667  *
00668  */
00669 
00670 static VALUE
00671 gc_profile_disable(void)
00672 {
00673     rb_objspace_t *objspace = &rb_objspace;
00674 
00675     objspace->profile.run = FALSE;
00676     return Qnil;
00677 }
00678 
00679 /*
00680  *  call-seq:
00681  *    GC::Profiler.clear          -> nil
00682  *
00683  *  Clears the GC profiler data.
00684  *
00685  */
00686 
00687 static VALUE
00688 gc_profile_clear(void)
00689 {
00690     rb_objspace_t *objspace = &rb_objspace;
00691     MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
00692     objspace->profile.count = 0;
00693     return Qnil;
00694 }
00695 
00696 static void *
00697 negative_size_allocation_error_with_gvl(void *ptr)
00698 {
00699     rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
00700     return 0; /* should not be reached */
00701 }
00702 
00703 static void
00704 negative_size_allocation_error(const char *msg)
00705 {
00706     if (ruby_thread_has_gvl_p()) {
00707         rb_raise(rb_eNoMemError, "%s", msg);
00708     }
00709     else {
00710         if (ruby_native_thread_p()) {
00711             rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
00712         }
00713         else {
00714             fprintf(stderr, "[FATAL] %s\n", msg);
00715             exit(EXIT_FAILURE);
00716         }
00717     }
00718 }
00719 
00720 static void *
00721 gc_with_gvl(void *ptr)
00722 {
00723     return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr);
00724 }
00725 
00726 static int
00727 garbage_collect_with_gvl(rb_objspace_t *objspace)
00728 {
00729     if (dont_gc) return TRUE;
00730     if (ruby_thread_has_gvl_p()) {
00731         return garbage_collect(objspace);
00732     }
00733     else {
00734         if (ruby_native_thread_p()) {
00735             return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace);
00736         }
00737         else {
00738             /* no ruby thread */
00739             fprintf(stderr, "[FATAL] failed to allocate memory\n");
00740             exit(EXIT_FAILURE);
00741         }
00742     }
00743 }
00744 
00745 static void vm_xfree(rb_objspace_t *objspace, void *ptr);
00746 
00747 static inline size_t
00748 vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
00749 {
00750     if ((ssize_t)size < 0) {
00751         negative_size_allocation_error("negative allocation size (or too big)");
00752     }
00753     if (size == 0) size = 1;
00754 
00755 #if CALC_EXACT_MALLOC_SIZE
00756     size += sizeof(size_t);
00757 #endif
00758 
00759     if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
00760         (malloc_increase+size) > malloc_limit) {
00761         garbage_collect_with_gvl(objspace);
00762     }
00763 
00764     return size;
00765 }
00766 
00767 static inline void *
00768 vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
00769 {
00770     malloc_increase += size;
00771 
00772 #if CALC_EXACT_MALLOC_SIZE
00773     objspace->malloc_params.allocated_size += size;
00774     objspace->malloc_params.allocations++;
00775     ((size_t *)mem)[0] = size;
00776     mem = (size_t *)mem + 1;
00777 #endif
00778 
00779     return mem;
00780 }
00781 
00782 #define TRY_WITH_GC(alloc) do { \
00783         if (!(alloc) && \
00784             (!garbage_collect_with_gvl(objspace) || \
00785              !(alloc))) { \
00786             ruby_memerror(); \
00787         } \
00788     } while (0)
00789 
00790 static void *
00791 vm_xmalloc(rb_objspace_t *objspace, size_t size)
00792 {
00793     void *mem;
00794 
00795     size = vm_malloc_prepare(objspace, size);
00796     TRY_WITH_GC(mem = malloc(size));
00797     return vm_malloc_fixup(objspace, mem, size);
00798 }
00799 
00800 static void *
00801 vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
00802 {
00803     void *mem;
00804 
00805     if ((ssize_t)size < 0) {
00806         negative_size_allocation_error("negative re-allocation size");
00807     }
00808     if (!ptr) return vm_xmalloc(objspace, size);
00809     if (size == 0) {
00810         vm_xfree(objspace, ptr);
00811         return 0;
00812     }
00813     if (ruby_gc_stress && !ruby_disable_gc_stress)
00814         garbage_collect_with_gvl(objspace);
00815 
00816 #if CALC_EXACT_MALLOC_SIZE
00817     size += sizeof(size_t);
00818     objspace->malloc_params.allocated_size -= size;
00819     ptr = (size_t *)ptr - 1;
00820 #endif
00821 
00822     mem = realloc(ptr, size);
00823     if (!mem) {
00824         if (garbage_collect_with_gvl(objspace)) {
00825             mem = realloc(ptr, size);
00826         }
00827         if (!mem) {
00828             ruby_memerror();
00829         }
00830     }
00831     malloc_increase += size;
00832 
00833 #if CALC_EXACT_MALLOC_SIZE
00834     objspace->malloc_params.allocated_size += size;
00835     ((size_t *)mem)[0] = size;
00836     mem = (size_t *)mem + 1;
00837 #endif
00838 
00839     return mem;
00840 }
00841 
00842 static void
00843 vm_xfree(rb_objspace_t *objspace, void *ptr)
00844 {
00845 #if CALC_EXACT_MALLOC_SIZE
00846     size_t size;
00847     ptr = ((size_t *)ptr) - 1;
00848     size = ((size_t*)ptr)[0];
00849     objspace->malloc_params.allocated_size -= size;
00850     objspace->malloc_params.allocations--;
00851 #endif
00852 
00853     free(ptr);
00854 }
00855 
00856 void *
00857 ruby_xmalloc(size_t size)
00858 {
00859     return vm_xmalloc(&rb_objspace, size);
00860 }
00861 
00862 static inline size_t
00863 xmalloc2_size(size_t n, size_t size)
00864 {
00865     size_t len = size * n;
00866     if (n != 0 && size != len / n) {
00867         rb_raise(rb_eArgError, "malloc: possible integer overflow");
00868     }
00869     return len;
00870 }
00871 
00872 void *
00873 ruby_xmalloc2(size_t n, size_t size)
00874 {
00875     return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size));
00876 }
00877 
00878 static void *
00879 vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
00880 {
00881     void *mem;
00882     size_t size;
00883 
00884     size = xmalloc2_size(count, elsize);
00885     size = vm_malloc_prepare(objspace, size);
00886 
00887     TRY_WITH_GC(mem = calloc(1, size));
00888     return vm_malloc_fixup(objspace, mem, size);
00889 }
00890 
00891 void *
00892 ruby_xcalloc(size_t n, size_t size)
00893 {
00894     return vm_xcalloc(&rb_objspace, n, size);
00895 }
00896 
00897 void *
00898 ruby_xrealloc(void *ptr, size_t size)
00899 {
00900     return vm_xrealloc(&rb_objspace, ptr, size);
00901 }
00902 
00903 void *
00904 ruby_xrealloc2(void *ptr, size_t n, size_t size)
00905 {
00906     size_t len = size * n;
00907     if (n != 0 && size != len / n) {
00908         rb_raise(rb_eArgError, "realloc: possible integer overflow");
00909     }
00910     return ruby_xrealloc(ptr, len);
00911 }
00912 
00913 void
00914 ruby_xfree(void *x)
00915 {
00916     if (x)
00917         vm_xfree(&rb_objspace, x);
00918 }
00919 
00920 
00921 /*
00922  *  call-seq:
00923  *     GC.enable    -> true or false
00924  *
00925  *  Enables garbage collection, returning <code>true</code> if garbage
00926  *  collection was previously disabled.
00927  *
00928  *     GC.disable   #=> false
00929  *     GC.enable    #=> true
00930  *     GC.enable    #=> false
00931  *
00932  */
00933 
00934 VALUE
00935 rb_gc_enable(void)
00936 {
00937     rb_objspace_t *objspace = &rb_objspace;
00938     int old = dont_gc;
00939 
00940     dont_gc = FALSE;
00941     return old ? Qtrue : Qfalse;
00942 }
00943 
00944 /*
00945  *  call-seq:
00946  *     GC.disable    -> true or false
00947  *
00948  *  Disables garbage collection, returning <code>true</code> if garbage
00949  *  collection was already disabled.
00950  *
00951  *     GC.disable   #=> false
00952  *     GC.disable   #=> true
00953  *
00954  */
00955 
00956 VALUE
00957 rb_gc_disable(void)
00958 {
00959     rb_objspace_t *objspace = &rb_objspace;
00960     int old = dont_gc;
00961 
00962     dont_gc = TRUE;
00963     return old ? Qtrue : Qfalse;
00964 }
00965 
00966 VALUE rb_mGC;
00967 
00968 void
00969 rb_gc_register_mark_object(VALUE obj)
00970 {
00971     VALUE ary = GET_THREAD()->vm->mark_object_ary;
00972     rb_ary_push(ary, obj);
00973 }
00974 
00975 void
00976 rb_gc_register_address(VALUE *addr)
00977 {
00978     rb_objspace_t *objspace = &rb_objspace;
00979     struct gc_list *tmp;
00980 
00981     tmp = ALLOC(struct gc_list);
00982     tmp->next = global_List;
00983     tmp->varptr = addr;
00984     global_List = tmp;
00985 }
00986 
00987 void
00988 rb_gc_unregister_address(VALUE *addr)
00989 {
00990     rb_objspace_t *objspace = &rb_objspace;
00991     struct gc_list *tmp = global_List;
00992 
00993     if (tmp->varptr == addr) {
00994         global_List = tmp->next;
00995         xfree(tmp);
00996         return;
00997     }
00998     while (tmp->next) {
00999         if (tmp->next->varptr == addr) {
01000             struct gc_list *t = tmp->next;
01001 
01002             tmp->next = tmp->next->next;
01003             xfree(t);
01004             break;
01005         }
01006         tmp = tmp->next;
01007     }
01008 }
01009 
01010 
01011 static void
01012 allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
01013 {
01014     struct sorted_heaps_slot *p;
01015     size_t size;
01016 
01017     size = next_heaps_length*sizeof(struct sorted_heaps_slot);
01018 
01019     if (heaps_used > 0) {
01020         p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size);
01021         if (p) objspace->heap.sorted = p;
01022     }
01023     else {
01024         p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size);
01025     }
01026 
01027     if (p == 0) {
01028         during_gc = 0;
01029         rb_memerror();
01030     }
01031     heaps_length = next_heaps_length;
01032 }
01033 
01034 static void
01035 assign_heap_slot(rb_objspace_t *objspace)
01036 {
01037     RVALUE *p, *pend, *membase;
01038     struct heaps_slot *slot;
01039     size_t hi, lo, mid;
01040     size_t objs;
01041 
01042     objs = HEAP_OBJ_LIMIT;
01043     p = (RVALUE*)malloc(HEAP_SIZE);
01044     if (p == 0) {
01045         during_gc = 0;
01046         rb_memerror();
01047     }
01048     slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
01049     if (slot == 0) {
01050         xfree(p);
01051         during_gc = 0;
01052         rb_memerror();
01053     }
01054     MEMZERO((void*)slot, struct heaps_slot, 1);
01055 
01056     slot->next = heaps;
01057     if (heaps) heaps->prev = slot;
01058     heaps = slot;
01059 
01060     membase = p;
01061     if ((VALUE)p % sizeof(RVALUE) != 0) {
01062         p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
01063         if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
01064             objs--;
01065         }
01066     }
01067 
01068     lo = 0;
01069     hi = heaps_used;
01070     while (lo < hi) {
01071         register RVALUE *mid_membase;
01072         mid = (lo + hi) / 2;
01073         mid_membase = objspace->heap.sorted[mid].slot->membase;
01074         if (mid_membase < membase) {
01075             lo = mid + 1;
01076         }
01077         else if (mid_membase > membase) {
01078             hi = mid;
01079         }
01080         else {
01081             rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
01082         }
01083     }
01084     if (hi < heaps_used) {
01085         MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi);
01086     }
01087     objspace->heap.sorted[hi].slot = slot;
01088     objspace->heap.sorted[hi].start = p;
01089     objspace->heap.sorted[hi].end = (p + objs);
01090     heaps->membase = membase;
01091     heaps->slot = p;
01092     heaps->limit = objs;
01093     objspace->heap.free_num += objs;
01094     pend = p + objs;
01095     if (lomem == 0 || lomem > p) lomem = p;
01096     if (himem < pend) himem = pend;
01097     heaps_used++;
01098 
01099     while (p < pend) {
01100         p->as.free.flags = 0;
01101         p->as.free.next = freelist;
01102         freelist = p;
01103         p++;
01104     }
01105 }
01106 
01107 static void
01108 add_heap_slots(rb_objspace_t *objspace, size_t add)
01109 {
01110     size_t i;
01111 
01112     if ((heaps_used + add) > heaps_length) {
01113         allocate_sorted_heaps(objspace, heaps_used + add);
01114     }
01115 
01116     for (i = 0; i < add; i++) {
01117         assign_heap_slot(objspace);
01118     }
01119     heaps_inc = 0;
01120 }
01121 
01122 static void
01123 init_heap(rb_objspace_t *objspace)
01124 {
01125     add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT);
01126     init_mark_stack(&objspace->mark_stack);
01127 #ifdef USE_SIGALTSTACK
01128     {
01129         /* altstack of another threads are allocated in another place */
01130         rb_thread_t *th = GET_THREAD();
01131         void *tmp = th->altstack;
01132         th->altstack = malloc(ALT_STACK_SIZE);
01133         free(tmp); /* free previously allocated area */
01134     }
01135 #endif
01136 
01137     objspace->profile.invoke_time = getrusage_time();
01138     finalizer_table = st_init_numtable();
01139 }
01140 
01141 static void
01142 initial_expand_heap(rb_objspace_t *objspace)
01143 {
01144     size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
01145 
01146     if (min_size > heaps_used) {
01147         add_heap_slots(objspace, min_size - heaps_used);
01148     }
01149 }
01150 
01151 static void
01152 set_heaps_increment(rb_objspace_t *objspace)
01153 {
01154     size_t next_heaps_length = (size_t)(heaps_used * 1.8);
01155 
01156     if (next_heaps_length == heaps_used) {
01157         next_heaps_length++;
01158     }
01159 
01160     heaps_inc = next_heaps_length - heaps_used;
01161 
01162     if (next_heaps_length > heaps_length) {
01163         allocate_sorted_heaps(objspace, next_heaps_length);
01164     }
01165 }
01166 
01167 static int
01168 heaps_increment(rb_objspace_t *objspace)
01169 {
01170     if (heaps_inc > 0) {
01171         assign_heap_slot(objspace);
01172         heaps_inc--;
01173         return TRUE;
01174     }
01175     return FALSE;
01176 }
01177 
01178 int
01179 rb_during_gc(void)
01180 {
01181     rb_objspace_t *objspace = &rb_objspace;
01182     return during_gc;
01183 }
01184 
01185 #define RANY(o) ((RVALUE*)(o))
01186 
01187 VALUE
01188 rb_newobj(void)
01189 {
01190     rb_objspace_t *objspace = &rb_objspace;
01191     VALUE obj;
01192 
01193     if (UNLIKELY(during_gc)) {
01194         dont_gc = 1;
01195         during_gc = 0;
01196         rb_bug("object allocation during garbage collection phase");
01197     }
01198 
01199     if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
01200         if (!garbage_collect(objspace)) {
01201             during_gc = 0;
01202             rb_memerror();
01203         }
01204     }
01205 
01206     if (UNLIKELY(!freelist)) {
01207         if (!gc_lazy_sweep(objspace)) {
01208             during_gc = 0;
01209             rb_memerror();
01210         }
01211     }
01212 
01213     obj = (VALUE)freelist;
01214     freelist = freelist->as.free.next;
01215 
01216     MEMZERO((void*)obj, RVALUE, 1);
01217 #ifdef GC_DEBUG
01218     RANY(obj)->file = rb_sourcefile();
01219     RANY(obj)->line = rb_sourceline();
01220 #endif
01221     GC_PROF_INC_LIVE_NUM;
01222 
01223     return obj;
01224 }
01225 
01226 NODE*
01227 rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
01228 {
01229     NODE *n = (NODE*)rb_newobj();
01230 
01231     n->flags |= T_NODE;
01232     nd_set_type(n, type);
01233 
01234     n->u1.value = a0;
01235     n->u2.value = a1;
01236     n->u3.value = a2;
01237 
01238     return n;
01239 }
01240 
01241 VALUE
01242 rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
01243 {
01244     NEWOBJ(data, struct RData);
01245     if (klass) Check_Type(klass, T_CLASS);
01246     OBJSETUP(data, klass, T_DATA);
01247     data->data = datap;
01248     data->dfree = dfree;
01249     data->dmark = dmark;
01250 
01251     return (VALUE)data;
01252 }
01253 
01254 VALUE
01255 rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
01256 {
01257     NEWOBJ(data, struct RTypedData);
01258 
01259     if (klass) Check_Type(klass, T_CLASS);
01260 
01261     OBJSETUP(data, klass, T_DATA);
01262 
01263     data->data = datap;
01264     data->typed_flag = 1;
01265     data->type = type;
01266 
01267     return (VALUE)data;
01268 }
01269 
01270 size_t
01271 rb_objspace_data_type_memsize(VALUE obj)
01272 {
01273     if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
01274         return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
01275     }
01276     else {
01277         return 0;
01278     }
01279 }
01280 
01281 const char *
01282 rb_objspace_data_type_name(VALUE obj)
01283 {
01284     if (RTYPEDDATA_P(obj)) {
01285         return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
01286     }
01287     else {
01288         return 0;
01289     }
01290 }
01291 
01292 #ifdef __ia64
01293 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
01294 #else
01295 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
01296 #endif
01297 
01298 #define STACK_START (th->machine_stack_start)
01299 #define STACK_END (th->machine_stack_end)
01300 #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
01301 
01302 #if STACK_GROW_DIRECTION < 0
01303 # define STACK_LENGTH  (size_t)(STACK_START - STACK_END)
01304 #elif STACK_GROW_DIRECTION > 0
01305 # define STACK_LENGTH  (size_t)(STACK_END - STACK_START + 1)
01306 #else
01307 # define STACK_LENGTH  ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
01308                         : (size_t)(STACK_END - STACK_START + 1))
01309 #endif
01310 #if !STACK_GROW_DIRECTION
01311 int ruby_stack_grow_direction;
01312 int
01313 ruby_get_stack_grow_direction(volatile VALUE *addr)
01314 {
01315     VALUE *end;
01316     SET_MACHINE_STACK_END(&end);
01317 
01318     if (end > addr) return ruby_stack_grow_direction = 1;
01319     return ruby_stack_grow_direction = -1;
01320 }
01321 #endif
01322 
01323 /* Marking stack */
01324 
01325 static void push_mark_stack(mark_stack_t *, VALUE);
01326 static int pop_mark_stack(mark_stack_t *, VALUE *);
01327 static void shrink_stack_chunk_cache(mark_stack_t *stack);
01328 
01329 static stack_chunk_t *
01330 stack_chunk_alloc(void)
01331 {
01332     stack_chunk_t *res;
01333 
01334     res = malloc(sizeof(stack_chunk_t));
01335     if (!res)
01336         rb_memerror();
01337 
01338     return res;
01339 }
01340 
01341 static inline int
01342 is_mark_stask_empty(mark_stack_t *stack)
01343 {
01344     return stack->chunk == NULL;
01345 }
01346 
01347 static void
01348 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
01349 {
01350     chunk->next = stack->cache;
01351     stack->cache = chunk;
01352     stack->cache_size++;
01353 }
01354 
01355 static void
01356 shrink_stack_chunk_cache(mark_stack_t *stack)
01357 {
01358     stack_chunk_t *chunk;
01359 
01360     if (stack->unused_cache_size > (stack->cache_size/2)) {
01361         chunk = stack->cache;
01362         stack->cache = stack->cache->next;
01363         stack->cache_size--;
01364         free(chunk);
01365     }
01366     stack->unused_cache_size = stack->cache_size;
01367 }
01368 
01369 static void
01370 push_mark_stack_chunk(mark_stack_t *stack)
01371 {
01372     stack_chunk_t *next;
01373 
01374     if (stack->cache_size > 0) {
01375         next = stack->cache;
01376         stack->cache = stack->cache->next;
01377         stack->cache_size--;
01378         if (stack->unused_cache_size > stack->cache_size)
01379             stack->unused_cache_size = stack->cache_size;
01380     }
01381     else {
01382         next = stack_chunk_alloc();
01383     }
01384     next->next = stack->chunk;
01385     stack->chunk = next;
01386     stack->index = 0;
01387 }
01388 
01389 static void
01390 pop_mark_stack_chunk(mark_stack_t *stack)
01391 {
01392     stack_chunk_t *prev;
01393 
01394     prev = stack->chunk->next;
01395     add_stack_chunk_cache(stack, stack->chunk);
01396     stack->chunk = prev;
01397     stack->index = stack->limit;
01398 }
01399 
01400 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01401 static void
01402 free_stack_chunks(mark_stack_t *stack)
01403 {
01404     stack_chunk_t *chunk = stack->chunk;
01405     stack_chunk_t *next = NULL;
01406 
01407     while (chunk != NULL) {
01408         next = chunk->next;
01409         free(chunk);
01410         chunk = next;
01411     }
01412 }
01413 #endif
01414 
01415 static void
01416 push_mark_stack(mark_stack_t *stack, VALUE data)
01417 {
01418     if (stack->index == stack->limit) {
01419         push_mark_stack_chunk(stack);
01420     }
01421     stack->chunk->data[stack->index++] = data;
01422 }
01423 
01424 static int
01425 pop_mark_stack(mark_stack_t *stack, VALUE *data)
01426 {
01427     if (is_mark_stask_empty(stack)) {
01428         return FALSE;
01429     }
01430     if (stack->index == 1) {
01431         *data = stack->chunk->data[--stack->index];
01432         pop_mark_stack_chunk(stack);
01433         return TRUE;
01434     }
01435     *data = stack->chunk->data[--stack->index];
01436     return TRUE;
01437 }
01438 
01439 static void
01440 init_mark_stack(mark_stack_t *stack)
01441 {
01442     int i;
01443 
01444     push_mark_stack_chunk(stack);
01445     stack->limit = STACK_CHUNK_SIZE;
01446 
01447     for(i=0; i < 4; i++) {
01448         add_stack_chunk_cache(stack, stack_chunk_alloc());
01449     }
01450     stack->unused_cache_size = stack->cache_size;
01451 }
01452 
01453 
01454 size_t
01455 ruby_stack_length(VALUE **p)
01456 {
01457     rb_thread_t *th = GET_THREAD();
01458     SET_STACK_END;
01459     if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
01460     return STACK_LENGTH;
01461 }
01462 
01463 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
01464 static int
01465 stack_check(int water_mark)
01466 {
01467     int ret;
01468     rb_thread_t *th = GET_THREAD();
01469     SET_STACK_END;
01470     ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
01471 #ifdef __ia64
01472     if (!ret) {
01473         ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
01474               th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark;
01475     }
01476 #endif
01477     return ret;
01478 }
01479 #endif
01480 
01481 #define STACKFRAME_FOR_CALL_CFUNC 512
01482 
01483 int
01484 ruby_stack_check(void)
01485 {
01486 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
01487     return 0;
01488 #else
01489     return stack_check(STACKFRAME_FOR_CALL_CFUNC);
01490 #endif
01491 }
01492 
01493 #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
01494 
01495 static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
01496 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
01497 
01498 static void
01499 gc_mark_stacked_objects(rb_objspace_t *objspace)
01500 {
01501     mark_stack_t *mstack = &objspace->mark_stack;
01502     VALUE obj = 0;
01503 
01504     if (!mstack->index) return;
01505     while (pop_mark_stack(mstack, &obj)) {
01506         gc_mark_children(objspace, obj);
01507     }
01508     shrink_stack_chunk_cache(mstack);
01509 }
01510 
01511 static inline int
01512 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
01513 {
01514     register RVALUE *p = RANY(ptr);
01515     register struct sorted_heaps_slot *heap;
01516     register size_t hi, lo, mid;
01517 
01518     if (p < lomem || p > himem) return FALSE;
01519     if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
01520 
01521     /* check if p looks like a pointer using bsearch*/
01522     lo = 0;
01523     hi = heaps_used;
01524     while (lo < hi) {
01525         mid = (lo + hi) / 2;
01526         heap = &objspace->heap.sorted[mid];
01527         if (heap->start <= p) {
01528             if (p < heap->end)
01529                 return TRUE;
01530             lo = mid + 1;
01531         }
01532         else {
01533             hi = mid;
01534         }
01535     }
01536     return FALSE;
01537 }
01538 
01539 static void
01540 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
01541 {
01542     VALUE v;
01543     while (n--) {
01544         v = *x;
01545         VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
01546         if (is_pointer_to_heap(objspace, (void *)v)) {
01547             gc_mark(objspace, v);
01548         }
01549         x++;
01550     }
01551 }
01552 
01553 static void
01554 gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
01555 {
01556     long n;
01557 
01558     if (end <= start) return;
01559     n = end - start;
01560     mark_locations_array(objspace, start, n);
01561 }
01562 
01563 void
01564 rb_gc_mark_locations(VALUE *start, VALUE *end)
01565 {
01566     gc_mark_locations(&rb_objspace, start, end);
01567 }
01568 
01569 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
01570 
01571 struct mark_tbl_arg {
01572     rb_objspace_t *objspace;
01573 };
01574 
01575 static int
01576 mark_entry(ID key, VALUE value, st_data_t data)
01577 {
01578     struct mark_tbl_arg *arg = (void*)data;
01579     gc_mark(arg->objspace, value);
01580     return ST_CONTINUE;
01581 }
01582 
01583 static void
01584 mark_tbl(rb_objspace_t *objspace, st_table *tbl)
01585 {
01586     struct mark_tbl_arg arg;
01587     if (!tbl || tbl->num_entries == 0) return;
01588     arg.objspace = objspace;
01589     st_foreach(tbl, mark_entry, (st_data_t)&arg);
01590 }
01591 
01592 static int
01593 mark_key(VALUE key, VALUE value, st_data_t data)
01594 {
01595     struct mark_tbl_arg *arg = (void*)data;
01596     gc_mark(arg->objspace, key);
01597     return ST_CONTINUE;
01598 }
01599 
01600 static void
01601 mark_set(rb_objspace_t *objspace, st_table *tbl)
01602 {
01603     struct mark_tbl_arg arg;
01604     if (!tbl) return;
01605     arg.objspace = objspace;
01606     st_foreach(tbl, mark_key, (st_data_t)&arg);
01607 }
01608 
01609 void
01610 rb_mark_set(st_table *tbl)
01611 {
01612     mark_set(&rb_objspace, tbl);
01613 }
01614 
01615 static int
01616 mark_keyvalue(VALUE key, VALUE value, st_data_t data)
01617 {
01618     struct mark_tbl_arg *arg = (void*)data;
01619     gc_mark(arg->objspace, key);
01620     gc_mark(arg->objspace, value);
01621     return ST_CONTINUE;
01622 }
01623 
01624 static void
01625 mark_hash(rb_objspace_t *objspace, st_table *tbl)
01626 {
01627     struct mark_tbl_arg arg;
01628     if (!tbl) return;
01629     arg.objspace = objspace;
01630     st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
01631 }
01632 
01633 void
01634 rb_mark_hash(st_table *tbl)
01635 {
01636     mark_hash(&rb_objspace, tbl);
01637 }
01638 
01639 static void
01640 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
01641 {
01642     const rb_method_definition_t *def = me->def;
01643 
01644     gc_mark(objspace, me->klass);
01645     if (!def) return;
01646     switch (def->type) {
01647       case VM_METHOD_TYPE_ISEQ:
01648         gc_mark(objspace, def->body.iseq->self);
01649         break;
01650       case VM_METHOD_TYPE_BMETHOD:
01651         gc_mark(objspace, def->body.proc);
01652         break;
01653       case VM_METHOD_TYPE_ATTRSET:
01654       case VM_METHOD_TYPE_IVAR:
01655         gc_mark(objspace, def->body.attr.location);
01656         break;
01657       default:
01658         break; /* ignore */
01659     }
01660 }
01661 
01662 void
01663 rb_mark_method_entry(const rb_method_entry_t *me)
01664 {
01665     mark_method_entry(&rb_objspace, me);
01666 }
01667 
01668 static int
01669 mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
01670 {
01671     struct mark_tbl_arg *arg = (void*)data;
01672     mark_method_entry(arg->objspace, me);
01673     return ST_CONTINUE;
01674 }
01675 
01676 static void
01677 mark_m_tbl(rb_objspace_t *objspace, st_table *tbl)
01678 {
01679     struct mark_tbl_arg arg;
01680     if (!tbl) return;
01681     arg.objspace = objspace;
01682     st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
01683 }
01684 
01685 static int
01686 free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
01687 {
01688     if (!me->mark) {
01689         rb_free_method_entry(me);
01690     }
01691     return ST_CONTINUE;
01692 }
01693 
01694 void
01695 rb_free_m_table(st_table *tbl)
01696 {
01697     st_foreach(tbl, free_method_entry_i, 0);
01698     st_free_table(tbl);
01699 }
01700 
01701 static int
01702 mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
01703 {
01704     struct mark_tbl_arg *arg = (void*)data;
01705     gc_mark(arg->objspace, ce->value);
01706     return ST_CONTINUE;
01707 }
01708 
01709 static void
01710 mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
01711 {
01712     struct mark_tbl_arg arg;
01713     if (!tbl) return;
01714     arg.objspace = objspace;
01715     st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
01716 }
01717 
01718 static int
01719 free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
01720 {
01721     xfree(ce);
01722     return ST_CONTINUE;
01723 }
01724 
01725 void
01726 rb_free_const_table(st_table *tbl)
01727 {
01728     st_foreach(tbl, free_const_entry_i, 0);
01729     st_free_table(tbl);
01730 }
01731 
01732 void
01733 rb_mark_tbl(st_table *tbl)
01734 {
01735     mark_tbl(&rb_objspace, tbl);
01736 }
01737 
01738 void
01739 rb_gc_mark_maybe(VALUE obj)
01740 {
01741     if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
01742         gc_mark(&rb_objspace, obj);
01743     }
01744 }
01745 
01746 static void
01747 gc_mark(rb_objspace_t *objspace, VALUE ptr)
01748 {
01749     register RVALUE *obj;
01750 
01751     obj = RANY(ptr);
01752     if (rb_special_const_p(ptr)) return; /* special const not marked */
01753     if (obj->as.basic.flags == 0) return;       /* free cell */
01754     if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
01755     obj->as.basic.flags |= FL_MARK;
01756     objspace->heap.live_num++;
01757 
01758     push_mark_stack(&objspace->mark_stack, ptr);
01759 }
01760 
01761 void
01762 rb_gc_mark(VALUE ptr)
01763 {
01764     gc_mark(&rb_objspace, ptr);
01765 }
01766 
01767 static void
01768 gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
01769 {
01770     register RVALUE *obj = RANY(ptr);
01771 
01772     goto marking;               /* skip */
01773 
01774   again:
01775     obj = RANY(ptr);
01776     if (rb_special_const_p(ptr)) return; /* special const not marked */
01777     if (obj->as.basic.flags == 0) return;       /* free cell */
01778     if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
01779     obj->as.basic.flags |= FL_MARK;
01780     objspace->heap.live_num++;
01781 
01782   marking:
01783     if (FL_TEST(obj, FL_EXIVAR)) {
01784         rb_mark_generic_ivar(ptr);
01785     }
01786 
01787     switch (BUILTIN_TYPE(obj)) {
01788       case T_NIL:
01789       case T_FIXNUM:
01790         rb_bug("rb_gc_mark() called for broken object");
01791         break;
01792 
01793       case T_NODE:
01794         switch (nd_type(obj)) {
01795           case NODE_IF:         /* 1,2,3 */
01796           case NODE_FOR:
01797           case NODE_ITER:
01798           case NODE_WHEN:
01799           case NODE_MASGN:
01800           case NODE_RESCUE:
01801           case NODE_RESBODY:
01802           case NODE_CLASS:
01803           case NODE_BLOCK_PASS:
01804             gc_mark(objspace, (VALUE)obj->as.node.u2.node);
01805             /* fall through */
01806           case NODE_BLOCK:      /* 1,3 */
01807           case NODE_OPTBLOCK:
01808           case NODE_ARRAY:
01809           case NODE_DSTR:
01810           case NODE_DXSTR:
01811           case NODE_DREGX:
01812           case NODE_DREGX_ONCE:
01813           case NODE_ENSURE:
01814           case NODE_CALL:
01815           case NODE_DEFS:
01816           case NODE_OP_ASGN1:
01817           case NODE_ARGS:
01818             gc_mark(objspace, (VALUE)obj->as.node.u1.node);
01819             /* fall through */
01820           case NODE_SUPER:      /* 3 */
01821           case NODE_FCALL:
01822           case NODE_DEFN:
01823           case NODE_ARGS_AUX:
01824             ptr = (VALUE)obj->as.node.u3.node;
01825             goto again;
01826 
01827           case NODE_WHILE:      /* 1,2 */
01828           case NODE_UNTIL:
01829           case NODE_AND:
01830           case NODE_OR:
01831           case NODE_CASE:
01832           case NODE_SCLASS:
01833           case NODE_DOT2:
01834           case NODE_DOT3:
01835           case NODE_FLIP2:
01836           case NODE_FLIP3:
01837           case NODE_MATCH2:
01838           case NODE_MATCH3:
01839           case NODE_OP_ASGN_OR:
01840           case NODE_OP_ASGN_AND:
01841           case NODE_MODULE:
01842           case NODE_ALIAS:
01843           case NODE_VALIAS:
01844           case NODE_ARGSCAT:
01845             gc_mark(objspace, (VALUE)obj->as.node.u1.node);
01846             /* fall through */
01847           case NODE_GASGN:      /* 2 */
01848           case NODE_LASGN:
01849           case NODE_DASGN:
01850           case NODE_DASGN_CURR:
01851           case NODE_IASGN:
01852           case NODE_IASGN2:
01853           case NODE_CVASGN:
01854           case NODE_COLON3:
01855           case NODE_OPT_N:
01856           case NODE_EVSTR:
01857           case NODE_UNDEF:
01858           case NODE_POSTEXE:
01859             ptr = (VALUE)obj->as.node.u2.node;
01860             goto again;
01861 
01862           case NODE_HASH:       /* 1 */
01863           case NODE_LIT:
01864           case NODE_STR:
01865           case NODE_XSTR:
01866           case NODE_DEFINED:
01867           case NODE_MATCH:
01868           case NODE_RETURN:
01869           case NODE_BREAK:
01870           case NODE_NEXT:
01871           case NODE_YIELD:
01872           case NODE_COLON2:
01873           case NODE_SPLAT:
01874           case NODE_TO_ARY:
01875             ptr = (VALUE)obj->as.node.u1.node;
01876             goto again;
01877 
01878           case NODE_SCOPE:      /* 2,3 */
01879           case NODE_CDECL:
01880           case NODE_OPT_ARG:
01881             gc_mark(objspace, (VALUE)obj->as.node.u3.node);
01882             ptr = (VALUE)obj->as.node.u2.node;
01883             goto again;
01884 
01885           case NODE_ZARRAY:     /* - */
01886           case NODE_ZSUPER:
01887           case NODE_VCALL:
01888           case NODE_GVAR:
01889           case NODE_LVAR:
01890           case NODE_DVAR:
01891           case NODE_IVAR:
01892           case NODE_CVAR:
01893           case NODE_NTH_REF:
01894           case NODE_BACK_REF:
01895           case NODE_REDO:
01896           case NODE_RETRY:
01897           case NODE_SELF:
01898           case NODE_NIL:
01899           case NODE_TRUE:
01900           case NODE_FALSE:
01901           case NODE_ERRINFO:
01902           case NODE_BLOCK_ARG:
01903             break;
01904           case NODE_ALLOCA:
01905             mark_locations_array(objspace,
01906                                  (VALUE*)obj->as.node.u1.value,
01907                                  obj->as.node.u3.cnt);
01908             ptr = (VALUE)obj->as.node.u2.node;
01909             goto again;
01910 
01911           default:              /* unlisted NODE */
01912             if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
01913                 gc_mark(objspace, (VALUE)obj->as.node.u1.node);
01914             }
01915             if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
01916                 gc_mark(objspace, (VALUE)obj->as.node.u2.node);
01917             }
01918             if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
01919                 gc_mark(objspace, (VALUE)obj->as.node.u3.node);
01920             }
01921         }
01922         return;                 /* no need to mark class. */
01923     }
01924 
01925     gc_mark(objspace, obj->as.basic.klass);
01926     switch (BUILTIN_TYPE(obj)) {
01927       case T_ICLASS:
01928       case T_CLASS:
01929       case T_MODULE:
01930         mark_m_tbl(objspace, RCLASS_M_TBL(obj));
01931         mark_tbl(objspace, RCLASS_IV_TBL(obj));
01932         mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
01933         ptr = RCLASS_SUPER(obj);
01934         goto again;
01935 
01936       case T_ARRAY:
01937         if (FL_TEST(obj, ELTS_SHARED)) {
01938             ptr = obj->as.array.as.heap.aux.shared;
01939             goto again;
01940         }
01941         else {
01942             long i, len = RARRAY_LEN(obj);
01943             VALUE *ptr = RARRAY_PTR(obj);
01944             for (i=0; i < len; i++) {
01945                 gc_mark(objspace, *ptr++);
01946             }
01947         }
01948         break;
01949 
01950       case T_HASH:
01951         mark_hash(objspace, obj->as.hash.ntbl);
01952         ptr = obj->as.hash.ifnone;
01953         goto again;
01954 
01955       case T_STRING:
01956 #define STR_ASSOC FL_USER3   /* copied from string.c */
01957         if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
01958             ptr = obj->as.string.as.heap.aux.shared;
01959             goto again;
01960         }
01961         break;
01962 
01963       case T_DATA:
01964         if (RTYPEDDATA_P(obj)) {
01965             RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
01966             if (mark_func) (*mark_func)(DATA_PTR(obj));
01967         }
01968         else {
01969             if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
01970         }
01971         break;
01972 
01973       case T_OBJECT:
01974         {
01975             long i, len = ROBJECT_NUMIV(obj);
01976             VALUE *ptr = ROBJECT_IVPTR(obj);
01977             for (i  = 0; i < len; i++) {
01978                 gc_mark(objspace, *ptr++);
01979             }
01980         }
01981         break;
01982 
01983       case T_FILE:
01984         if (obj->as.file.fptr) {
01985             gc_mark(objspace, obj->as.file.fptr->pathv);
01986             gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
01987             gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
01988             gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
01989             gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
01990             gc_mark(objspace, obj->as.file.fptr->write_lock);
01991         }
01992         break;
01993 
01994       case T_REGEXP:
01995         ptr = obj->as.regexp.src;
01996         goto again;
01997 
01998       case T_FLOAT:
01999       case T_BIGNUM:
02000       case T_ZOMBIE:
02001         break;
02002 
02003       case T_MATCH:
02004         gc_mark(objspace, obj->as.match.regexp);
02005         if (obj->as.match.str) {
02006             ptr = obj->as.match.str;
02007             goto again;
02008         }
02009         break;
02010 
02011       case T_RATIONAL:
02012         gc_mark(objspace, obj->as.rational.num);
02013         ptr = obj->as.rational.den;
02014         goto again;
02015 
02016       case T_COMPLEX:
02017         gc_mark(objspace, obj->as.complex.real);
02018         ptr = obj->as.complex.imag;
02019         goto again;
02020 
02021       case T_STRUCT:
02022         {
02023             long len = RSTRUCT_LEN(obj);
02024             VALUE *ptr = RSTRUCT_PTR(obj);
02025 
02026             while (len--) {
02027                 gc_mark(objspace, *ptr++);
02028             }
02029         }
02030         break;
02031 
02032       default:
02033         rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
02034                BUILTIN_TYPE(obj), (void *)obj,
02035                is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
02036     }
02037 }
02038 
02039 static int obj_free(rb_objspace_t *, VALUE);
02040 
02041 static inline void
02042 add_freelist(rb_objspace_t *objspace, RVALUE *p)
02043 {
02044     VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
02045     p->as.free.flags = 0;
02046     p->as.free.next = freelist;
02047     freelist = p;
02048 }
02049 
02050 static void
02051 finalize_list(rb_objspace_t *objspace, RVALUE *p)
02052 {
02053     while (p) {
02054         RVALUE *tmp = p->as.free.next;
02055         run_final(objspace, (VALUE)p);
02056         if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
02057             if (objspace->heap.sweep_slots) {
02058                 p->as.free.flags = 0;
02059             }
02060             else {
02061                 GC_PROF_DEC_LIVE_NUM;
02062                 add_freelist(objspace, p);
02063             }
02064         }
02065         else {
02066             struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
02067             slot->limit--;
02068         }
02069         p = tmp;
02070     }
02071 }
02072 
02073 static void
02074 unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
02075 {
02076     if (slot->prev)
02077         slot->prev->next = slot->next;
02078     if (slot->next)
02079         slot->next->prev = slot->prev;
02080     if (heaps == slot)
02081         heaps = slot->next;
02082     if (objspace->heap.sweep_slots == slot)
02083         objspace->heap.sweep_slots = slot->next;
02084     slot->prev = NULL;
02085     slot->next = NULL;
02086 }
02087 
02088 
02089 static void
02090 free_unused_heaps(rb_objspace_t *objspace)
02091 {
02092     size_t i, j;
02093     RVALUE *last = 0;
02094 
02095     for (i = j = 1; j < heaps_used; i++) {
02096         if (objspace->heap.sorted[i].slot->limit == 0) {
02097             if (!last) {
02098                 last = objspace->heap.sorted[i].slot->membase;
02099             }
02100             else {
02101                 free(objspace->heap.sorted[i].slot->membase);
02102             }
02103             free(objspace->heap.sorted[i].slot);
02104             heaps_used--;
02105         }
02106         else {
02107             if (i != j) {
02108                 objspace->heap.sorted[j] = objspace->heap.sorted[i];
02109             }
02110             j++;
02111         }
02112     }
02113     if (last) {
02114         if (last < heaps_freed) {
02115             free(heaps_freed);
02116             heaps_freed = last;
02117         }
02118         else {
02119             free(last);
02120         }
02121     }
02122 }
02123 
02124 static void
02125 slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
02126 {
02127     size_t free_num = 0, final_num = 0;
02128     RVALUE *p, *pend;
02129     RVALUE *free = freelist, *final = deferred_final_list;
02130     int deferred;
02131 
02132     p = sweep_slot->slot; pend = p + sweep_slot->limit;
02133     while (p < pend) {
02134         if (!(p->as.basic.flags & FL_MARK)) {
02135             if (p->as.basic.flags &&
02136                 ((deferred = obj_free(objspace, (VALUE)p)) ||
02137                  (FL_TEST(p, FL_FINALIZE)))) {
02138                 if (!deferred) {
02139                     p->as.free.flags = T_ZOMBIE;
02140                     RDATA(p)->dfree = 0;
02141                 }
02142                 p->as.free.flags |= FL_MARK;
02143                 p->as.free.next = deferred_final_list;
02144                 deferred_final_list = p;
02145                 final_num++;
02146             }
02147             else {
02148                 add_freelist(objspace, p);
02149                 free_num++;
02150             }
02151         }
02152         else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
02153             /* objects to be finalized */
02154             /* do nothing remain marked */
02155         }
02156         else {
02157             RBASIC(p)->flags &= ~FL_MARK;
02158         }
02159         p++;
02160     }
02161     if (final_num + free_num == sweep_slot->limit &&
02162         objspace->heap.free_num > objspace->heap.do_heap_free) {
02163         RVALUE *pp;
02164 
02165         for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
02166             RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
02167             pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
02168         }
02169         sweep_slot->limit = final_num;
02170         freelist = free;        /* cancel this page from freelist */
02171         unlink_heap_slot(objspace, sweep_slot);
02172     }
02173     else {
02174         objspace->heap.free_num += free_num;
02175     }
02176     objspace->heap.final_num += final_num;
02177 
02178     if (deferred_final_list) {
02179         rb_thread_t *th = GET_THREAD();
02180         if (th) {
02181             RUBY_VM_SET_FINALIZER_INTERRUPT(th);
02182         }
02183     }
02184 }
02185 
02186 static int
02187 ready_to_gc(rb_objspace_t *objspace)
02188 {
02189     if (dont_gc || during_gc) {
02190         if (!freelist) {
02191             if (!heaps_increment(objspace)) {
02192                 set_heaps_increment(objspace);
02193                 heaps_increment(objspace);
02194             }
02195         }
02196         return FALSE;
02197     }
02198     return TRUE;
02199 }
02200 
02201 static void
02202 before_gc_sweep(rb_objspace_t *objspace)
02203 {
02204     freelist = 0;
02205     objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
02206     objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT)  * 0.2);
02207     if (objspace->heap.free_min < initial_free_min) {
02208         objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
02209         objspace->heap.free_min = initial_free_min;
02210     }
02211     objspace->heap.sweep_slots = heaps;
02212     objspace->heap.free_num = 0;
02213 
02214     /* sweep unlinked method entries */
02215     if (GET_VM()->unlinked_method_entry_list) {
02216         rb_sweep_method_entry(GET_VM());
02217     }
02218 }
02219 
02220 static void
02221 after_gc_sweep(rb_objspace_t *objspace)
02222 {
02223     GC_PROF_SET_MALLOC_INFO;
02224 
02225     if (objspace->heap.free_num < objspace->heap.free_min) {
02226         set_heaps_increment(objspace);
02227         heaps_increment(objspace);
02228     }
02229 
02230     if (malloc_increase > malloc_limit) {
02231         malloc_limit += (size_t)((malloc_increase - malloc_limit) * (double)objspace->heap.live_num / (heaps_used * HEAP_OBJ_LIMIT));
02232         if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
02233     }
02234     malloc_increase = 0;
02235 
02236     free_unused_heaps(objspace);
02237 }
02238 
02239 static int
02240 lazy_sweep(rb_objspace_t *objspace)
02241 {
02242     struct heaps_slot *next;
02243 
02244     heaps_increment(objspace);
02245     while (objspace->heap.sweep_slots) {
02246         next = objspace->heap.sweep_slots->next;
02247         slot_sweep(objspace, objspace->heap.sweep_slots);
02248         objspace->heap.sweep_slots = next;
02249         if (freelist) {
02250             during_gc = 0;
02251             return TRUE;
02252         }
02253     }
02254     return FALSE;
02255 }
02256 
02257 static void
02258 rest_sweep(rb_objspace_t *objspace)
02259 {
02260     if (objspace->heap.sweep_slots) {
02261        while (objspace->heap.sweep_slots) {
02262            lazy_sweep(objspace);
02263        }
02264        after_gc_sweep(objspace);
02265     }
02266 }
02267 
02268 static void gc_marks(rb_objspace_t *objspace);
02269 
02270 static int
02271 gc_lazy_sweep(rb_objspace_t *objspace)
02272 {
02273     int res;
02274     INIT_GC_PROF_PARAMS;
02275 
02276     if (objspace->flags.dont_lazy_sweep)
02277         return garbage_collect(objspace);
02278 
02279 
02280     if (!ready_to_gc(objspace)) return TRUE;
02281 
02282     during_gc++;
02283     GC_PROF_TIMER_START;
02284     GC_PROF_SWEEP_TIMER_START;
02285 
02286     if (objspace->heap.sweep_slots) {
02287         res = lazy_sweep(objspace);
02288         if (res) {
02289             GC_PROF_SWEEP_TIMER_STOP;
02290             GC_PROF_SET_MALLOC_INFO;
02291             GC_PROF_TIMER_STOP(Qfalse);
02292             return res;
02293         }
02294         after_gc_sweep(objspace);
02295     }
02296     else {
02297         if (heaps_increment(objspace)) {
02298             during_gc = 0;
02299             return TRUE;
02300         }
02301     }
02302 
02303     gc_marks(objspace);
02304 
02305     before_gc_sweep(objspace);
02306     if (objspace->heap.free_min > (heaps_used * HEAP_OBJ_LIMIT - objspace->heap.live_num)) {
02307         set_heaps_increment(objspace);
02308     }
02309 
02310     GC_PROF_SWEEP_TIMER_START;
02311     if(!(res = lazy_sweep(objspace))) {
02312         after_gc_sweep(objspace);
02313         if(freelist) {
02314             res = TRUE;
02315             during_gc = 0;
02316         }
02317     }
02318     GC_PROF_SWEEP_TIMER_STOP;
02319 
02320     GC_PROF_TIMER_STOP(Qtrue);
02321     return res;
02322 }
02323 
02324 static void
02325 gc_sweep(rb_objspace_t *objspace)
02326 {
02327     struct heaps_slot *next;
02328 
02329     before_gc_sweep(objspace);
02330 
02331     while (objspace->heap.sweep_slots) {
02332         next = objspace->heap.sweep_slots->next;
02333         slot_sweep(objspace, objspace->heap.sweep_slots);
02334         objspace->heap.sweep_slots = next;
02335     }
02336 
02337     after_gc_sweep(objspace);
02338 
02339     during_gc = 0;
02340 }
02341 
02342 void
02343 rb_gc_force_recycle(VALUE p)
02344 {
02345     rb_objspace_t *objspace = &rb_objspace;
02346     GC_PROF_DEC_LIVE_NUM;
02347     if (RBASIC(p)->flags & FL_MARK) {
02348         RANY(p)->as.free.flags = 0;
02349     }
02350     else {
02351         add_freelist(objspace, (RVALUE *)p);
02352     }
02353 }
02354 
02355 static inline void
02356 make_deferred(RVALUE *p)
02357 {
02358     p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
02359 }
02360 
02361 static inline void
02362 make_io_deferred(RVALUE *p)
02363 {
02364     rb_io_t *fptr = p->as.file.fptr;
02365     make_deferred(p);
02366     p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
02367     p->as.data.data = fptr;
02368 }
02369 
02370 static int
02371 obj_free(rb_objspace_t *objspace, VALUE obj)
02372 {
02373     switch (BUILTIN_TYPE(obj)) {
02374       case T_NIL:
02375       case T_FIXNUM:
02376       case T_TRUE:
02377       case T_FALSE:
02378         rb_bug("obj_free() called for broken object");
02379         break;
02380     }
02381 
02382     if (FL_TEST(obj, FL_EXIVAR)) {
02383         rb_free_generic_ivar((VALUE)obj);
02384         FL_UNSET(obj, FL_EXIVAR);
02385     }
02386 
02387     switch (BUILTIN_TYPE(obj)) {
02388       case T_OBJECT:
02389         if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
02390             RANY(obj)->as.object.as.heap.ivptr) {
02391             xfree(RANY(obj)->as.object.as.heap.ivptr);
02392         }
02393         break;
02394       case T_MODULE:
02395       case T_CLASS:
02396         rb_clear_cache_by_class((VALUE)obj);
02397         rb_free_m_table(RCLASS_M_TBL(obj));
02398         if (RCLASS_IV_TBL(obj)) {
02399             st_free_table(RCLASS_IV_TBL(obj));
02400         }
02401         if (RCLASS_CONST_TBL(obj)) {
02402             rb_free_const_table(RCLASS_CONST_TBL(obj));
02403         }
02404         if (RCLASS_IV_INDEX_TBL(obj)) {
02405             st_free_table(RCLASS_IV_INDEX_TBL(obj));
02406         }
02407         xfree(RANY(obj)->as.klass.ptr);
02408         break;
02409       case T_STRING:
02410         rb_str_free(obj);
02411         break;
02412       case T_ARRAY:
02413         rb_ary_free(obj);
02414         break;
02415       case T_HASH:
02416         if (RANY(obj)->as.hash.ntbl) {
02417             st_free_table(RANY(obj)->as.hash.ntbl);
02418         }
02419         break;
02420       case T_REGEXP:
02421         if (RANY(obj)->as.regexp.ptr) {
02422             onig_free(RANY(obj)->as.regexp.ptr);
02423         }
02424         break;
02425       case T_DATA:
02426         if (DATA_PTR(obj)) {
02427             if (RTYPEDDATA_P(obj)) {
02428                 RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
02429             }
02430             if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
02431                 xfree(DATA_PTR(obj));
02432             }
02433             else if (RANY(obj)->as.data.dfree) {
02434                 make_deferred(RANY(obj));
02435                 return 1;
02436             }
02437         }
02438         break;
02439       case T_MATCH:
02440         if (RANY(obj)->as.match.rmatch) {
02441             struct rmatch *rm = RANY(obj)->as.match.rmatch;
02442             onig_region_free(&rm->regs, 0);
02443             if (rm->char_offset)
02444                 xfree(rm->char_offset);
02445             xfree(rm);
02446         }
02447         break;
02448       case T_FILE:
02449         if (RANY(obj)->as.file.fptr) {
02450             make_io_deferred(RANY(obj));
02451             return 1;
02452         }
02453         break;
02454       case T_RATIONAL:
02455       case T_COMPLEX:
02456         break;
02457       case T_ICLASS:
02458         /* iClass shares table with the module */
02459         xfree(RANY(obj)->as.klass.ptr);
02460         break;
02461 
02462       case T_FLOAT:
02463         break;
02464 
02465       case T_BIGNUM:
02466         if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
02467             xfree(RBIGNUM_DIGITS(obj));
02468         }
02469         break;
02470       case T_NODE:
02471         switch (nd_type(obj)) {
02472           case NODE_SCOPE:
02473             if (RANY(obj)->as.node.u1.tbl) {
02474                 xfree(RANY(obj)->as.node.u1.tbl);
02475             }
02476             break;
02477           case NODE_ALLOCA:
02478             xfree(RANY(obj)->as.node.u1.node);
02479             break;
02480         }
02481         break;                  /* no need to free iv_tbl */
02482 
02483       case T_STRUCT:
02484         if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
02485             RANY(obj)->as.rstruct.as.heap.ptr) {
02486             xfree(RANY(obj)->as.rstruct.as.heap.ptr);
02487         }
02488         break;
02489 
02490       default:
02491         rb_bug("gc_sweep(): unknown data type 0x%x(%p)",
02492                BUILTIN_TYPE(obj), (void*)obj);
02493     }
02494 
02495     return 0;
02496 }
02497 
02498 #define GC_NOTIFY 0
02499 
02500 #if STACK_GROW_DIRECTION < 0
02501 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
02502 #elif STACK_GROW_DIRECTION > 0
02503 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
02504 #else
02505 #define GET_STACK_BOUNDS(start, end, appendix) \
02506     ((STACK_END < STACK_START) ? \
02507      ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
02508 #endif
02509 
02510 #define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
02511 
02512 static void
02513 mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
02514 {
02515     union {
02516         rb_jmp_buf j;
02517         VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
02518     } save_regs_gc_mark;
02519     VALUE *stack_start, *stack_end;
02520 
02521     FLUSH_REGISTER_WINDOWS;
02522     /* This assumes that all registers are saved into the jmp_buf (and stack) */
02523     rb_setjmp(save_regs_gc_mark.j);
02524 
02525     SET_STACK_END;
02526     GET_STACK_BOUNDS(stack_start, stack_end, 1);
02527 
02528     mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
02529 
02530     rb_gc_mark_locations(stack_start, stack_end);
02531 #ifdef __ia64
02532     rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
02533 #endif
02534 #if defined(__mc68000__)
02535     mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
02536                          (STACK_START - STACK_END));
02537 #endif
02538 }
02539 
02540 static void
02541 gc_marks(rb_objspace_t *objspace)
02542 {
02543     struct gc_list *list;
02544     rb_thread_t *th = GET_THREAD();
02545     GC_PROF_MARK_TIMER_START;
02546 
02547     objspace->heap.live_num = 0;
02548     objspace->count++;
02549 
02550 
02551     SET_STACK_END;
02552 
02553     th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
02554 
02555     mark_tbl(objspace, finalizer_table);
02556     mark_current_machine_context(objspace, th);
02557 
02558     rb_gc_mark_symbols();
02559     rb_gc_mark_encodings();
02560 
02561     /* mark protected global variables */
02562     for (list = global_List; list; list = list->next) {
02563         rb_gc_mark_maybe(*list->varptr);
02564     }
02565     rb_mark_end_proc();
02566     rb_gc_mark_global_tbl();
02567 
02568     mark_tbl(objspace, rb_class_tbl);
02569 
02570     /* mark generic instance variables for special constants */
02571     rb_mark_generic_ivar_tbl();
02572 
02573     rb_gc_mark_parser();
02574 
02575     rb_gc_mark_unlinked_live_method_entries(th->vm);
02576 
02577     /* marking-loop */
02578     gc_mark_stacked_objects(objspace);
02579 
02580     GC_PROF_MARK_TIMER_STOP;
02581 }
02582 
02583 static int
02584 garbage_collect(rb_objspace_t *objspace)
02585 {
02586     INIT_GC_PROF_PARAMS;
02587 
02588     if (GC_NOTIFY) printf("start garbage_collect()\n");
02589 
02590     if (!heaps) {
02591         return FALSE;
02592     }
02593     if (!ready_to_gc(objspace)) {
02594         return TRUE;
02595     }
02596 
02597     GC_PROF_TIMER_START;
02598 
02599     rest_sweep(objspace);
02600 
02601     during_gc++;
02602     gc_marks(objspace);
02603 
02604     GC_PROF_SWEEP_TIMER_START;
02605     gc_sweep(objspace);
02606     GC_PROF_SWEEP_TIMER_STOP;
02607 
02608     GC_PROF_TIMER_STOP(Qtrue);
02609     if (GC_NOTIFY) printf("end garbage_collect()\n");
02610     return TRUE;
02611 }
02612 
02613 int
02614 rb_garbage_collect(void)
02615 {
02616     return garbage_collect(&rb_objspace);
02617 }
02618 
02619 void
02620 rb_gc_mark_machine_stack(rb_thread_t *th)
02621 {
02622     rb_objspace_t *objspace = &rb_objspace;
02623     VALUE *stack_start, *stack_end;
02624 
02625     GET_STACK_BOUNDS(stack_start, stack_end, 0);
02626     rb_gc_mark_locations(stack_start, stack_end);
02627 #ifdef __ia64
02628     rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
02629 #endif
02630 }
02631 
02632 
02633 /*
02634  *  call-seq:
02635  *     GC.start                     -> nil
02636  *     gc.garbage_collect           -> nil
02637  *     ObjectSpace.garbage_collect  -> nil
02638  *
02639  *  Initiates garbage collection, unless manually disabled.
02640  *
02641  */
02642 
02643 VALUE
02644 rb_gc_start(void)
02645 {
02646     rb_gc();
02647     return Qnil;
02648 }
02649 
02650 #undef Init_stack
02651 
02652 void
02653 Init_stack(volatile VALUE *addr)
02654 {
02655     ruby_init_stack(addr);
02656 }
02657 
02658 /*
02659  * Document-class: ObjectSpace
02660  *
02661  *  The <code>ObjectSpace</code> module contains a number of routines
02662  *  that interact with the garbage collection facility and allow you to
02663  *  traverse all living objects with an iterator.
02664  *
02665  *  <code>ObjectSpace</code> also provides support for object
02666  *  finalizers, procs that will be called when a specific object is
02667  *  about to be destroyed by garbage collection.
02668  *
02669  *     include ObjectSpace
02670  *
02671  *
02672  *     a = "A"
02673  *     b = "B"
02674  *     c = "C"
02675  *
02676  *
02677  *     define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
02678  *     define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
02679  *     define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
02680  *
02681  *  <em>produces:</em>
02682  *
02683  *     Finalizer three on 537763470
02684  *     Finalizer one on 537763480
02685  *     Finalizer two on 537763480
02686  *
02687  */
02688 
02689 void
02690 Init_heap(void)
02691 {
02692     init_heap(&rb_objspace);
02693 }
02694 
02695 static VALUE
02696 lazy_sweep_enable(void)
02697 {
02698     rb_objspace_t *objspace = &rb_objspace;
02699 
02700     objspace->flags.dont_lazy_sweep = FALSE;
02701     return Qnil;
02702 }
02703 
02704 typedef int each_obj_callback(void *, void *, size_t, void *);
02705 
02706 struct each_obj_args {
02707     each_obj_callback *callback;
02708     void *data;
02709 };
02710 
02711 static VALUE
02712 objspace_each_objects(VALUE arg)
02713 {
02714     size_t i;
02715     RVALUE *membase = 0;
02716     RVALUE *pstart, *pend;
02717     rb_objspace_t *objspace = &rb_objspace;
02718     struct each_obj_args *args = (struct each_obj_args *)arg;
02719     volatile VALUE v;
02720 
02721     i = 0;
02722     while (i < heaps_used) {
02723         while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase)
02724             i--;
02725         while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase)
02726             i++;
02727         if (heaps_used <= i)
02728           break;
02729         membase = objspace->heap.sorted[i].slot->membase;
02730 
02731         pstart = objspace->heap.sorted[i].slot->slot;
02732         pend = pstart + objspace->heap.sorted[i].slot->limit;
02733 
02734         for (; pstart != pend; pstart++) {
02735             if (pstart->as.basic.flags) {
02736                 v = (VALUE)pstart; /* acquire to save this object */
02737                 break;
02738             }
02739         }
02740         if (pstart != pend) {
02741             if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
02742                 break;
02743             }
02744         }
02745     }
02746 
02747     return Qnil;
02748 }
02749 
02750 /*
02751  * rb_objspace_each_objects() is special C API to walk through
02752  * Ruby object space.  This C API is too difficult to use it.
02753  * To be frank, you should not use it. Or you need to read the
02754  * source code of this function and understand what this function does.
02755  *
02756  * 'callback' will be called several times (the number of heap slot,
02757  * at current implementation) with:
02758  *   vstart: a pointer to the first living object of the heap_slot.
02759  *   vend: a pointer to next to the valid heap_slot area.
02760  *   stride: a distance to next VALUE.
02761  *
02762  * If callback() returns non-zero, the iteration will be stopped.
02763  *
02764  * This is a sample callback code to iterate liveness objects:
02765  *
02766  *   int
02767  *   sample_callback(void *vstart, void *vend, int stride, void *data) {
02768  *     VALUE v = (VALUE)vstart;
02769  *     for (; v != (VALUE)vend; v += stride) {
02770  *       if (RBASIC(v)->flags) { // liveness check
02771  *       // do something with live object 'v'
02772  *     }
02773  *     return 0; // continue to iteration
02774  *   }
02775  *
02776  * Note: 'vstart' is not a top of heap_slot.  This point the first
02777  *       living object to grasp at least one object to avoid GC issue.
02778  *       This means that you can not walk through all Ruby object slot
02779  *       including freed object slot.
02780  *
02781  * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
02782  *       However, there are possibilities to pass variable values with
02783  *       'stride' with some reasons.  You must use stride instead of
02784  *       use some constant value in the iteration.
02785  */
02786 void
02787 rb_objspace_each_objects(each_obj_callback *callback, void *data)
02788 {
02789     struct each_obj_args args;
02790     rb_objspace_t *objspace = &rb_objspace;
02791 
02792     rest_sweep(objspace);
02793     objspace->flags.dont_lazy_sweep = TRUE;
02794 
02795     args.callback = callback;
02796     args.data = data;
02797     rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
02798 }
02799 
02800 struct os_each_struct {
02801     size_t num;
02802     VALUE of;
02803 };
02804 
02805 static int
02806 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
02807 {
02808     struct os_each_struct *oes = (struct os_each_struct *)data;
02809     RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
02810     volatile VALUE v;
02811 
02812     for (; p != pend; p++) {
02813         if (p->as.basic.flags) {
02814             switch (BUILTIN_TYPE(p)) {
02815               case T_NONE:
02816               case T_ICLASS:
02817               case T_NODE:
02818               case T_ZOMBIE:
02819                 continue;
02820               case T_CLASS:
02821                 if (FL_TEST(p, FL_SINGLETON))
02822                   continue;
02823               default:
02824                 if (!p->as.basic.klass) continue;
02825                 v = (VALUE)p;
02826                 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
02827                     rb_yield(v);
02828                     oes->num++;
02829                 }
02830             }
02831         }
02832     }
02833 
02834     return 0;
02835 }
02836 
02837 static VALUE
02838 os_obj_of(VALUE of)
02839 {
02840     struct os_each_struct oes;
02841 
02842     oes.num = 0;
02843     oes.of = of;
02844     rb_objspace_each_objects(os_obj_of_i, &oes);
02845     return SIZET2NUM(oes.num);
02846 }
02847 
02848 /*
02849  *  call-seq:
02850  *     ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
02851  *     ObjectSpace.each_object([module])              -> an_enumerator
02852  *
02853  *  Calls the block once for each living, nonimmediate object in this
02854  *  Ruby process. If <i>module</i> is specified, calls the block
02855  *  for only those classes or modules that match (or are a subclass of)
02856  *  <i>module</i>. Returns the number of objects found. Immediate
02857  *  objects (<code>Fixnum</code>s, <code>Symbol</code>s
02858  *  <code>true</code>, <code>false</code>, and <code>nil</code>) are
02859  *  never returned. In the example below, <code>each_object</code>
02860  *  returns both the numbers we defined and several constants defined in
02861  *  the <code>Math</code> module.
02862  *
02863  *  If no block is given, an enumerator is returned instead.
02864  *
02865  *     a = 102.7
02866  *     b = 95       # Won't be returned
02867  *     c = 12345678987654321
02868  *     count = ObjectSpace.each_object(Numeric) {|x| p x }
02869  *     puts "Total count: #{count}"
02870  *
02871  *  <em>produces:</em>
02872  *
02873  *     12345678987654321
02874  *     102.7
02875  *     2.71828182845905
02876  *     3.14159265358979
02877  *     2.22044604925031e-16
02878  *     1.7976931348623157e+308
02879  *     2.2250738585072e-308
02880  *     Total count: 7
02881  *
02882  */
02883 
02884 static VALUE
02885 os_each_obj(int argc, VALUE *argv, VALUE os)
02886 {
02887     VALUE of;
02888 
02889     rb_secure(4);
02890     if (argc == 0) {
02891         of = 0;
02892     }
02893     else {
02894         rb_scan_args(argc, argv, "01", &of);
02895     }
02896     RETURN_ENUMERATOR(os, 1, &of);
02897     return os_obj_of(of);
02898 }
02899 
02900 /*
02901  *  call-seq:
02902  *     ObjectSpace.undefine_finalizer(obj)
02903  *
02904  *  Removes all finalizers for <i>obj</i>.
02905  *
02906  */
02907 
02908 static VALUE
02909 undefine_final(VALUE os, VALUE obj)
02910 {
02911     rb_objspace_t *objspace = &rb_objspace;
02912     st_data_t data = obj;
02913     rb_check_frozen(obj);
02914     st_delete(finalizer_table, &data, 0);
02915     FL_UNSET(obj, FL_FINALIZE);
02916     return obj;
02917 }
02918 
02919 /*
02920  *  call-seq:
02921  *     ObjectSpace.define_finalizer(obj, aProc=proc())
02922  *
02923  *  Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
02924  *  was destroyed.
02925  *
02926  */
02927 
02928 static VALUE
02929 define_final(int argc, VALUE *argv, VALUE os)
02930 {
02931     rb_objspace_t *objspace = &rb_objspace;
02932     VALUE obj, block, table;
02933     st_data_t data;
02934 
02935     rb_scan_args(argc, argv, "11", &obj, &block);
02936     rb_check_frozen(obj);
02937     if (argc == 1) {
02938         block = rb_block_proc();
02939     }
02940     else if (!rb_respond_to(block, rb_intern("call"))) {
02941         rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
02942                  rb_obj_classname(block));
02943     }
02944     if (!FL_ABLE(obj)) {
02945         rb_raise(rb_eArgError, "cannot define finalizer for %s",
02946                  rb_obj_classname(obj));
02947     }
02948     RBASIC(obj)->flags |= FL_FINALIZE;
02949 
02950     block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
02951     OBJ_FREEZE(block);
02952 
02953     if (st_lookup(finalizer_table, obj, &data)) {
02954         table = (VALUE)data;
02955         rb_ary_push(table, block);
02956     }
02957     else {
02958         table = rb_ary_new3(1, block);
02959         RBASIC(table)->klass = 0;
02960         st_add_direct(finalizer_table, obj, table);
02961     }
02962     return block;
02963 }
02964 
02965 void
02966 rb_gc_copy_finalizer(VALUE dest, VALUE obj)
02967 {
02968     rb_objspace_t *objspace = &rb_objspace;
02969     VALUE table;
02970     st_data_t data;
02971 
02972     if (!FL_TEST(obj, FL_FINALIZE)) return;
02973     if (st_lookup(finalizer_table, obj, &data)) {
02974         table = (VALUE)data;
02975         st_insert(finalizer_table, dest, table);
02976     }
02977     FL_SET(dest, FL_FINALIZE);
02978 }
02979 
02980 static VALUE
02981 run_single_final(VALUE arg)
02982 {
02983     VALUE *args = (VALUE *)arg;
02984     rb_eval_cmd(args[0], args[1], (int)args[2]);
02985     return Qnil;
02986 }
02987 
02988 static void
02989 run_finalizer(rb_objspace_t *objspace, VALUE objid, VALUE table)
02990 {
02991     long i;
02992     int status;
02993     VALUE args[3];
02994 
02995     if (RARRAY_LEN(table) > 0) {
02996         args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
02997     }
02998     else {
02999         args[1] = 0;
03000     }
03001 
03002     args[2] = (VALUE)rb_safe_level();
03003     for (i=0; i<RARRAY_LEN(table); i++) {
03004         VALUE final = RARRAY_PTR(table)[i];
03005         args[0] = RARRAY_PTR(final)[1];
03006         args[2] = FIX2INT(RARRAY_PTR(final)[0]);
03007         status = 0;
03008         rb_protect(run_single_final, (VALUE)args, &status);
03009         if (status)
03010             rb_set_errinfo(Qnil);
03011     }
03012 }
03013 
03014 static void
03015 run_final(rb_objspace_t *objspace, VALUE obj)
03016 {
03017     VALUE objid;
03018     RUBY_DATA_FUNC free_func = 0;
03019     st_data_t key, table;
03020 
03021     objspace->heap.final_num--;
03022 
03023     objid = rb_obj_id(obj);     /* make obj into id */
03024     RBASIC(obj)->klass = 0;
03025 
03026     if (RTYPEDDATA_P(obj)) {
03027         free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
03028     }
03029     else {
03030         free_func = RDATA(obj)->dfree;
03031     }
03032     if (free_func) {
03033         (*free_func)(DATA_PTR(obj));
03034     }
03035 
03036     key = (st_data_t)obj;
03037     if (st_delete(finalizer_table, &key, &table)) {
03038         run_finalizer(objspace, objid, (VALUE)table);
03039     }
03040 }
03041 
03042 static void
03043 finalize_deferred(rb_objspace_t *objspace)
03044 {
03045     RVALUE *p = deferred_final_list;
03046     deferred_final_list = 0;
03047 
03048     if (p) {
03049         finalize_list(objspace, p);
03050     }
03051 }
03052 
03053 void
03054 rb_gc_finalize_deferred(void)
03055 {
03056     finalize_deferred(&rb_objspace);
03057 }
03058 
03059 struct force_finalize_list {
03060     VALUE obj;
03061     VALUE table;
03062     struct force_finalize_list *next;
03063 };
03064 
03065 static int
03066 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
03067 {
03068     struct force_finalize_list **prev = (struct force_finalize_list **)arg;
03069     struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
03070     curr->obj = key;
03071     curr->table = val;
03072     curr->next = *prev;
03073     *prev = curr;
03074     return ST_CONTINUE;
03075 }
03076 
03077 void
03078 rb_gc_call_finalizer_at_exit(void)
03079 {
03080     rb_objspace_call_finalizer(&rb_objspace);
03081 }
03082 
03083 static void
03084 rb_objspace_call_finalizer(rb_objspace_t *objspace)
03085 {
03086     RVALUE *p, *pend;
03087     RVALUE *final_list = 0;
03088     size_t i;
03089 
03090     rest_sweep(objspace);
03091 
03092     /* run finalizers */
03093     finalize_deferred(objspace);
03094     assert(deferred_final_list == 0);
03095 
03096     /* force to run finalizer */
03097     while (finalizer_table->num_entries) {
03098         struct force_finalize_list *list = 0;
03099         st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
03100         while (list) {
03101             struct force_finalize_list *curr = list;
03102             run_finalizer(objspace, rb_obj_id(curr->obj), curr->table);
03103             st_delete(finalizer_table, (st_data_t*)&curr->obj, 0);
03104             list = curr->next;
03105             xfree(curr);
03106         }
03107     }
03108 
03109     /* finalizers are part of garbage collection */
03110     during_gc++;
03111 
03112     /* run data object's finalizers */
03113     for (i = 0; i < heaps_used; i++) {
03114         p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
03115         while (p < pend) {
03116             if (BUILTIN_TYPE(p) == T_DATA &&
03117                 DATA_PTR(p) && RANY(p)->as.data.dfree &&
03118                 !rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) &&
03119                 !rb_obj_is_fiber((VALUE)p)) {
03120                 p->as.free.flags = 0;
03121                 if (RTYPEDDATA_P(p)) {
03122                     RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
03123                 }
03124                 if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
03125                     xfree(DATA_PTR(p));
03126                 }
03127                 else if (RANY(p)->as.data.dfree) {
03128                     make_deferred(RANY(p));
03129                     RANY(p)->as.free.next = final_list;
03130                     final_list = p;
03131                 }
03132             }
03133             else if (BUILTIN_TYPE(p) == T_FILE) {
03134                 if (RANY(p)->as.file.fptr) {
03135                     make_io_deferred(RANY(p));
03136                     RANY(p)->as.free.next = final_list;
03137                     final_list = p;
03138                 }
03139             }
03140             p++;
03141         }
03142     }
03143     during_gc = 0;
03144     if (final_list) {
03145         finalize_list(objspace, final_list);
03146     }
03147 
03148     st_free_table(finalizer_table);
03149     finalizer_table = 0;
03150 }
03151 
03152 void
03153 rb_gc(void)
03154 {
03155     rb_objspace_t *objspace = &rb_objspace;
03156     garbage_collect(objspace);
03157     finalize_deferred(objspace);
03158     free_unused_heaps(objspace);
03159 }
03160 
03161 /*
03162  *  call-seq:
03163  *     ObjectSpace._id2ref(object_id) -> an_object
03164  *
03165  *  Converts an object id to a reference to the object. May not be
03166  *  called on an object id passed as a parameter to a finalizer.
03167  *
03168  *     s = "I am a string"                    #=> "I am a string"
03169  *     r = ObjectSpace._id2ref(s.object_id)   #=> "I am a string"
03170  *     r == s                                 #=> true
03171  *
03172  */
03173 
03174 static VALUE
03175 id2ref(VALUE obj, VALUE objid)
03176 {
03177 #if SIZEOF_LONG == SIZEOF_VOIDP
03178 #define NUM2PTR(x) NUM2ULONG(x)
03179 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
03180 #define NUM2PTR(x) NUM2ULL(x)
03181 #endif
03182     rb_objspace_t *objspace = &rb_objspace;
03183     VALUE ptr;
03184     void *p0;
03185 
03186     rb_secure(4);
03187     ptr = NUM2PTR(objid);
03188     p0 = (void *)ptr;
03189 
03190     if (ptr == Qtrue) return Qtrue;
03191     if (ptr == Qfalse) return Qfalse;
03192     if (ptr == Qnil) return Qnil;
03193     if (FIXNUM_P(ptr)) return (VALUE)ptr;
03194     ptr = obj_id_to_ref(objid);
03195 
03196     if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
03197         ID symid = ptr / sizeof(RVALUE);
03198         if (rb_id2name(symid) == 0)
03199             rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
03200         return ID2SYM(symid);
03201     }
03202 
03203     if (!is_pointer_to_heap(objspace, (void *)ptr) ||
03204         BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
03205         rb_raise(rb_eRangeError, "%p is not id value", p0);
03206     }
03207     if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
03208         rb_raise(rb_eRangeError, "%p is recycled object", p0);
03209     }
03210     return (VALUE)ptr;
03211 }
03212 
03213 /*
03214  *  Document-method: __id__
03215  *  Document-method: object_id
03216  *
03217  *  call-seq:
03218  *     obj.__id__       -> fixnum
03219  *     obj.object_id    -> fixnum
03220  *
03221  *  Returns an integer identifier for <i>obj</i>. The same number will
03222  *  be returned on all calls to <code>id</code> for a given object, and
03223  *  no two active objects will share an id.
03224  *  <code>Object#object_id</code> is a different concept from the
03225  *  <code>:name</code> notation, which returns the symbol id of
03226  *  <code>name</code>. Replaces the deprecated <code>Object#id</code>.
03227  */
03228 
03229 /*
03230  *  call-seq:
03231  *     obj.hash    -> fixnum
03232  *
03233  *  Generates a <code>Fixnum</code> hash value for this object. This
03234  *  function must have the property that <code>a.eql?(b)</code> implies
03235  *  <code>a.hash == b.hash</code>. The hash value is used by class
03236  *  <code>Hash</code>. Any hash value that exceeds the capacity of a
03237  *  <code>Fixnum</code> will be truncated before being used.
03238  */
03239 
03240 VALUE
03241 rb_obj_id(VALUE obj)
03242 {
03243     /*
03244      *                32-bit VALUE space
03245      *          MSB ------------------------ LSB
03246      *  false   00000000000000000000000000000000
03247      *  true    00000000000000000000000000000010
03248      *  nil     00000000000000000000000000000100
03249      *  undef   00000000000000000000000000000110
03250      *  symbol  ssssssssssssssssssssssss00001110
03251      *  object  oooooooooooooooooooooooooooooo00        = 0 (mod sizeof(RVALUE))
03252      *  fixnum  fffffffffffffffffffffffffffffff1
03253      *
03254      *                    object_id space
03255      *                                       LSB
03256      *  false   00000000000000000000000000000000
03257      *  true    00000000000000000000000000000010
03258      *  nil     00000000000000000000000000000100
03259      *  undef   00000000000000000000000000000110
03260      *  symbol   000SSSSSSSSSSSSSSSSSSSSSSSSSSS0        S...S % A = 4 (S...S = s...s * A + 4)
03261      *  object   oooooooooooooooooooooooooooooo0        o...o % A = 0
03262      *  fixnum  fffffffffffffffffffffffffffffff1        bignum if required
03263      *
03264      *  where A = sizeof(RVALUE)/4
03265      *
03266      *  sizeof(RVALUE) is
03267      *  20 if 32-bit, double is 4-byte aligned
03268      *  24 if 32-bit, double is 8-byte aligned
03269      *  40 if 64-bit
03270      */
03271     if (SYMBOL_P(obj)) {
03272         return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
03273     }
03274     if (SPECIAL_CONST_P(obj)) {
03275         return LONG2NUM((SIGNED_VALUE)obj);
03276     }
03277     return nonspecial_obj_id(obj);
03278 }
03279 
03280 static int
03281 set_zero(st_data_t key, st_data_t val, st_data_t arg)
03282 {
03283     VALUE k = (VALUE)key;
03284     VALUE hash = (VALUE)arg;
03285     rb_hash_aset(hash, k, INT2FIX(0));
03286     return ST_CONTINUE;
03287 }
03288 
03289 /*
03290  *  call-seq:
03291  *     ObjectSpace.count_objects([result_hash]) -> hash
03292  *
03293  *  Counts objects for each type.
03294  *
03295  *  It returns a hash as:
03296  *  {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...}
03297  *
03298  *  If the optional argument, result_hash, is given,
03299  *  it is overwritten and returned.
03300  *  This is intended to avoid probe effect.
03301  *
03302  *  The contents of the returned hash is implementation defined.
03303  *  It may be changed in future.
03304  *
03305  *  This method is not expected to work except C Ruby.
03306  *
03307  */
03308 
03309 static VALUE
03310 count_objects(int argc, VALUE *argv, VALUE os)
03311 {
03312     rb_objspace_t *objspace = &rb_objspace;
03313     size_t counts[T_MASK+1];
03314     size_t freed = 0;
03315     size_t total = 0;
03316     size_t i;
03317     VALUE hash;
03318 
03319     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
03320         if (TYPE(hash) != T_HASH)
03321             rb_raise(rb_eTypeError, "non-hash given");
03322     }
03323 
03324     for (i = 0; i <= T_MASK; i++) {
03325         counts[i] = 0;
03326     }
03327 
03328     for (i = 0; i < heaps_used; i++) {
03329         RVALUE *p, *pend;
03330 
03331         p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
03332         for (;p < pend; p++) {
03333             if (p->as.basic.flags) {
03334                 counts[BUILTIN_TYPE(p)]++;
03335             }
03336             else {
03337                 freed++;
03338             }
03339         }
03340         total += objspace->heap.sorted[i].slot->limit;
03341     }
03342 
03343     if (hash == Qnil) {
03344         hash = rb_hash_new();
03345     }
03346     else if (!RHASH_EMPTY_P(hash)) {
03347         st_foreach(RHASH_TBL(hash), set_zero, hash);
03348     }
03349     rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
03350     rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
03351 
03352     for (i = 0; i <= T_MASK; i++) {
03353         VALUE type;
03354         switch (i) {
03355 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
03356             COUNT_TYPE(T_NONE);
03357             COUNT_TYPE(T_OBJECT);
03358             COUNT_TYPE(T_CLASS);
03359             COUNT_TYPE(T_MODULE);
03360             COUNT_TYPE(T_FLOAT);
03361             COUNT_TYPE(T_STRING);
03362             COUNT_TYPE(T_REGEXP);
03363             COUNT_TYPE(T_ARRAY);
03364             COUNT_TYPE(T_HASH);
03365             COUNT_TYPE(T_STRUCT);
03366             COUNT_TYPE(T_BIGNUM);
03367             COUNT_TYPE(T_FILE);
03368             COUNT_TYPE(T_DATA);
03369             COUNT_TYPE(T_MATCH);
03370             COUNT_TYPE(T_COMPLEX);
03371             COUNT_TYPE(T_RATIONAL);
03372             COUNT_TYPE(T_NIL);
03373             COUNT_TYPE(T_TRUE);
03374             COUNT_TYPE(T_FALSE);
03375             COUNT_TYPE(T_SYMBOL);
03376             COUNT_TYPE(T_FIXNUM);
03377             COUNT_TYPE(T_UNDEF);
03378             COUNT_TYPE(T_NODE);
03379             COUNT_TYPE(T_ICLASS);
03380             COUNT_TYPE(T_ZOMBIE);
03381 #undef COUNT_TYPE
03382           default:              type = INT2NUM(i); break;
03383         }
03384         if (counts[i])
03385             rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
03386     }
03387 
03388     return hash;
03389 }
03390 
03391 /*
03392  *  call-seq:
03393  *     GC.count -> Integer
03394  *
03395  *  The number of times GC occurred.
03396  *
03397  *  It returns the number of times GC occurred since the process started.
03398  *
03399  */
03400 
03401 static VALUE
03402 gc_count(VALUE self)
03403 {
03404     return UINT2NUM((&rb_objspace)->count);
03405 }
03406 
03407 /*
03408  *  call-seq:
03409  *     GC.stat -> Hash
03410  *
03411  *  Returns a Hash containing information about the GC.
03412  *
03413  *  The hash includes information about internal statistics about GC such as:
03414  *
03415  *    {
03416  *      :count          => 18,
03417  *      :heap_used      => 77,
03418  *      :heap_length    => 77,
03419  *      :heap_increment => 0,
03420  *      :heap_live_num  => 23287,
03421  *      :heap_free_num  => 8115,
03422  *      :heap_final_num => 0,
03423  *    }
03424  *
03425  *  The contents of the hash are implementation defined and may be changed in
03426  *  the future.
03427  *
03428  *  This method is only expected to work on C Ruby.
03429  *
03430  */
03431 
03432 static VALUE
03433 gc_stat(int argc, VALUE *argv, VALUE self)
03434 {
03435     rb_objspace_t *objspace = &rb_objspace;
03436     VALUE hash;
03437 
03438     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
03439         if (TYPE(hash) != T_HASH)
03440             rb_raise(rb_eTypeError, "non-hash given");
03441     }
03442 
03443     if (hash == Qnil) {
03444         hash = rb_hash_new();
03445     }
03446 
03447     rest_sweep(objspace);
03448 
03449     rb_hash_aset(hash, ID2SYM(rb_intern("count")), SIZET2NUM(objspace->count));
03450 
03451     /* implementation dependent counters */
03452     rb_hash_aset(hash, ID2SYM(rb_intern("heap_used")), SIZET2NUM(objspace->heap.used));
03453     rb_hash_aset(hash, ID2SYM(rb_intern("heap_length")), SIZET2NUM(objspace->heap.length));
03454     rb_hash_aset(hash, ID2SYM(rb_intern("heap_increment")), SIZET2NUM(objspace->heap.increment));
03455     rb_hash_aset(hash, ID2SYM(rb_intern("heap_live_num")), SIZET2NUM(objspace->heap.live_num));
03456     rb_hash_aset(hash, ID2SYM(rb_intern("heap_free_num")), SIZET2NUM(objspace->heap.free_num));
03457     rb_hash_aset(hash, ID2SYM(rb_intern("heap_final_num")), SIZET2NUM(objspace->heap.final_num));
03458     return hash;
03459 }
03460 
03461 
03462 #if CALC_EXACT_MALLOC_SIZE
03463 /*
03464  *  call-seq:
03465  *     GC.malloc_allocated_size -> Integer
03466  *
03467  *  The allocated size by malloc().
03468  *
03469  *  It returns the allocated size by malloc().
03470  */
03471 
03472 static VALUE
03473 gc_malloc_allocated_size(VALUE self)
03474 {
03475     return UINT2NUM((&rb_objspace)->malloc_params.allocated_size);
03476 }
03477 
03478 /*
03479  *  call-seq:
03480  *     GC.malloc_allocations -> Integer
03481  *
03482  *  The number of allocated memory object by malloc().
03483  *
03484  *  It returns the number of allocated memory object by malloc().
03485  */
03486 
03487 static VALUE
03488 gc_malloc_allocations(VALUE self)
03489 {
03490     return UINT2NUM((&rb_objspace)->malloc_params.allocations);
03491 }
03492 #endif
03493 
03494 static VALUE
03495 gc_profile_record_get(void)
03496 {
03497     VALUE prof;
03498     VALUE gc_profile = rb_ary_new();
03499     size_t i;
03500     rb_objspace_t *objspace = (&rb_objspace);
03501 
03502     if (!objspace->profile.run) {
03503         return Qnil;
03504     }
03505 
03506     for (i =0; i < objspace->profile.count; i++) {
03507         prof = rb_hash_new();
03508         rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(objspace->profile.record[i].gc_time));
03509         rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(objspace->profile.record[i].gc_invoke_time));
03510         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_use_size));
03511         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_total_size));
03512         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_total_objects));
03513         rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), objspace->profile.record[i].is_marked);
03514 #if GC_PROFILE_MORE_DETAIL
03515         rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(objspace->profile.record[i].gc_mark_time));
03516         rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(objspace->profile.record[i].gc_sweep_time));
03517         rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(objspace->profile.record[i].allocate_increase));
03518         rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(objspace->profile.record[i].allocate_limit));
03519         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), SIZET2NUM(objspace->profile.record[i].heap_use_slots));
03520         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_live_objects));
03521         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_free_objects));
03522         rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), objspace->profile.record[i].have_finalize);
03523 #endif
03524         rb_ary_push(gc_profile, prof);
03525     }
03526 
03527     return gc_profile;
03528 }
03529 
03530 /*
03531  *  call-seq:
03532  *     GC::Profiler.result -> String
03533  *
03534  *  Returns a profile data report such as:
03535  *
03536  *    GC 1 invokes.
03537  *    Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC time(ms)
03538  *        1               0.012               159240               212940                10647         0.00000000000001530000
03539  */
03540 
03541 static VALUE
03542 gc_profile_result(void)
03543 {
03544     rb_objspace_t *objspace = &rb_objspace;
03545     VALUE record;
03546     VALUE result;
03547     int i, index;
03548 
03549     record = gc_profile_record_get();
03550     if (objspace->profile.run && objspace->profile.count) {
03551         result = rb_sprintf("GC %d invokes.\n", NUM2INT(gc_count(0)));
03552         index = 1;
03553         rb_str_cat2(result, "Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC Time(ms)\n");
03554         for (i = 0; i < (int)RARRAY_LEN(record); i++) {
03555             VALUE r = RARRAY_PTR(record)[i];
03556 #if !GC_PROFILE_MORE_DETAIL
03557             if (rb_hash_aref(r, ID2SYM(rb_intern("GC_IS_MARKED")))) {
03558 #endif
03559             rb_str_catf(result, "%5d %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
03560                         index++, NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_INVOKE_TIME")))),
03561                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SIZE")))),
03562                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")))),
03563                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")))),
03564                         NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_TIME"))))*1000);
03565 #if !GC_PROFILE_MORE_DETAIL
03566             }
03567 #endif
03568         }
03569 #if GC_PROFILE_MORE_DETAIL
03570         rb_str_cat2(result, "\n\n");
03571         rb_str_cat2(result, "More detail.\n");
03572         rb_str_cat2(result, "Index Allocate Increase    Allocate Limit  Use Slot  Have Finalize             Mark Time(ms)            Sweep Time(ms)\n");
03573         index = 1;
03574         for (i = 0; i < (int)RARRAY_LEN(record); i++) {
03575             VALUE r = RARRAY_PTR(record)[i];
03576             rb_str_catf(result, "%5d %17"PRIuSIZE" %17"PRIuSIZE" %9"PRIuSIZE" %14s %25.20f %25.20f\n",
03577                         index++, (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_INCREASE")))),
03578                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_LIMIT")))),
03579                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SLOTS")))),
03580                         rb_hash_aref(r, ID2SYM(rb_intern("HAVE_FINALIZE")))? "true" : "false",
03581                         NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_MARK_TIME"))))*1000,
03582                         NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_SWEEP_TIME"))))*1000);
03583         }
03584 #endif
03585     }
03586     else {
03587         result = rb_str_new2("");
03588     }
03589     return result;
03590 }
03591 
03592 
03593 /*
03594  *  call-seq:
03595  *     GC::Profiler.report
03596  *     GC::Profiler.report io
03597  *
03598  *  Writes the GC::Profiler#result to <tt>$stdout</tt> or the given IO object.
03599  *
03600  */
03601 
03602 static VALUE
03603 gc_profile_report(int argc, VALUE *argv, VALUE self)
03604 {
03605     VALUE out;
03606 
03607     if (argc == 0) {
03608         out = rb_stdout;
03609     }
03610     else {
03611         rb_scan_args(argc, argv, "01", &out);
03612     }
03613     rb_io_write(out, gc_profile_result());
03614 
03615     return Qnil;
03616 }
03617 
03618 /*
03619  *  call-seq:
03620  *     GC::Profiler.total_time -> float
03621  *
03622  *  The total time used for garbage collection in milliseconds
03623  */
03624 
03625 static VALUE
03626 gc_profile_total_time(VALUE self)
03627 {
03628     double time = 0;
03629     rb_objspace_t *objspace = &rb_objspace;
03630     size_t i;
03631 
03632     if (objspace->profile.run && objspace->profile.count) {
03633         for (i = 0; i < objspace->profile.count; i++) {
03634             time += objspace->profile.record[i].gc_time;
03635         }
03636     }
03637     return DBL2NUM(time);
03638 }
03639 
03640 /*  Document-class: GC::Profiler
03641  *
03642  *  The GC profiler provides access to information on GC runs including time,
03643  *  length and object space size.
03644  *
03645  *  Example:
03646  *
03647  *    GC::Profiler.enable
03648  *
03649  *    require 'rdoc/rdoc'
03650  *
03651  *    puts GC::Profiler.result
03652  *
03653  *    GC::Profiler.disable
03654  *
03655  *  See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
03656  */
03657 
03658 /*
03659  *  The <code>GC</code> module provides an interface to Ruby's mark and
03660  *  sweep garbage collection mechanism. Some of the underlying methods
03661  *  are also available via the ObjectSpace module.
03662  *
03663  *  You may obtain information about the operation of the GC through
03664  *  GC::Profiler.
03665  */
03666 
03667 void
03668 Init_GC(void)
03669 {
03670     VALUE rb_mObSpace;
03671     VALUE rb_mProfiler;
03672 
03673     rb_mGC = rb_define_module("GC");
03674     rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
03675     rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
03676     rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
03677     rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
03678     rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
03679     rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
03680     rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
03681     rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
03682 
03683     rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
03684     rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
03685     rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
03686     rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
03687     rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
03688     rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
03689     rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
03690     rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
03691 
03692     rb_mObSpace = rb_define_module("ObjectSpace");
03693     rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
03694     rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
03695 
03696     rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
03697     rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
03698 
03699     rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
03700 
03701     nomem_error = rb_exc_new3(rb_eNoMemError,
03702                               rb_obj_freeze(rb_str_new2("failed to allocate memory")));
03703     OBJ_TAINT(nomem_error);
03704     OBJ_FREEZE(nomem_error);
03705 
03706     rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
03707     rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
03708 
03709     rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
03710 
03711 #if CALC_EXACT_MALLOC_SIZE
03712     rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
03713     rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
03714 #endif
03715 }
03716