Ruby  1.9.3p448(2013-06-27revision41675)
vm.c
Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   vm.c -
00004 
00005   $Author: usa $
00006 
00007   Copyright (C) 2004-2007 Koichi Sasada
00008 
00009 **********************************************************************/
00010 
00011 #include "ruby/ruby.h"
00012 #include "ruby/vm.h"
00013 #include "ruby/st.h"
00014 #include "ruby/encoding.h"
00015 #include "internal.h"
00016 
00017 #include "gc.h"
00018 #include "vm_core.h"
00019 #include "iseq.h"
00020 #include "eval_intern.h"
00021 
00022 #include "vm_insnhelper.h"
00023 #include "vm_insnhelper.c"
00024 #include "vm_exec.h"
00025 #include "vm_exec.c"
00026 
00027 #include "vm_method.c"
00028 #include "vm_eval.c"
00029 
00030 #include <assert.h>
00031 
00032 #define BUFSIZE 0x100
00033 #define PROCDEBUG 0
00034 
00035 VALUE rb_cRubyVM;
00036 VALUE rb_cThread;
00037 VALUE rb_cEnv;
00038 VALUE rb_mRubyVMFrozenCore;
00039 
00040 VALUE ruby_vm_const_missing_count = 0;
00041 
00042 char ruby_vm_redefined_flag[BOP_LAST_];
00043 
00044 rb_thread_t *ruby_current_thread = 0;
00045 rb_vm_t *ruby_current_vm = 0;
00046 
00047 static void thread_free(void *ptr);
00048 
00049 void vm_analysis_operand(int insn, int n, VALUE op);
00050 void vm_analysis_register(int reg, int isset);
00051 void vm_analysis_insn(int insn);
00052 
00053 /*
00054  * TODO: replace with better interface at the next release.
00055  *
00056  * these functions are exported just as a workaround for ruby-debug
00057  * for the time being.
00058  */
00059 RUBY_FUNC_EXPORTED VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
00060 RUBY_FUNC_EXPORTED int rb_vm_get_sourceline(const rb_control_frame_t *cfp);
00061 
00062 void
00063 rb_vm_change_state(void)
00064 {
00065     INC_VM_STATE_VERSION();
00066 }
00067 
00068 static void vm_clear_global_method_cache(void);
00069 
00070 static void
00071 vm_clear_all_inline_method_cache(void)
00072 {
00073     /* TODO: Clear all inline cache entries in all iseqs.
00074              How to iterate all iseqs in sweep phase?
00075              rb_objspace_each_objects() doesn't work at sweep phase.
00076      */
00077 }
00078 
00079 static void
00080 vm_clear_all_cache()
00081 {
00082     vm_clear_global_method_cache();
00083     vm_clear_all_inline_method_cache();
00084     ruby_vm_global_state_version = 1;
00085 }
00086 
00087 void
00088 rb_vm_inc_const_missing_count(void)
00089 {
00090     ruby_vm_const_missing_count +=1;
00091 }
00092 
00093 /* control stack frame */
00094 
00095 static inline VALUE
00096 rb_vm_set_finish_env(rb_thread_t * th)
00097 {
00098     vm_push_frame(th, 0, VM_FRAME_MAGIC_FINISH,
00099                   Qnil, th->cfp->lfp[0], 0,
00100                   th->cfp->sp, 0, 1);
00101     th->cfp->pc = (VALUE *)&finish_insn_seq[0];
00102     return Qtrue;
00103 }
00104 
00105 static void
00106 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
00107 {
00108     rb_iseq_t *iseq;
00109     GetISeqPtr(iseqval, iseq);
00110 
00111     if (iseq->type != ISEQ_TYPE_TOP) {
00112         rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
00113     }
00114 
00115     /* for return */
00116     rb_vm_set_finish_env(th);
00117 
00118     vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP,
00119                   th->top_self, 0, iseq->iseq_encoded,
00120                   th->cfp->sp, 0, iseq->local_size);
00121 
00122     CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00123 }
00124 
00125 static void
00126 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref)
00127 {
00128     rb_iseq_t *iseq;
00129     rb_block_t * const block = th->base_block;
00130     GetISeqPtr(iseqval, iseq);
00131 
00132     /* for return */
00133     rb_vm_set_finish_env(th);
00134     vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL, block->self,
00135                   GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded,
00136                   th->cfp->sp, block->lfp, iseq->local_size);
00137 
00138     if (cref) {
00139         th->cfp->dfp[-1] = (VALUE)cref;
00140     }
00141 
00142     CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00143 }
00144 
00145 static void
00146 vm_set_main_stack(rb_thread_t *th, VALUE iseqval)
00147 {
00148     VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
00149     rb_binding_t *bind;
00150     rb_iseq_t *iseq;
00151     rb_env_t *env;
00152 
00153     GetBindingPtr(toplevel_binding, bind);
00154     GetEnvPtr(bind->env, env);
00155     th->base_block = &env->block;
00156     vm_set_eval_stack(th, iseqval, 0);
00157     th->base_block = 0;
00158 
00159     /* save binding */
00160     GetISeqPtr(iseqval, iseq);
00161     if (bind && iseq->local_size > 0) {
00162         bind->env = rb_vm_make_env_object(th, th->cfp);
00163     }
00164 
00165     CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00166 }
00167 
00168 rb_control_frame_t *
00169 rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00170 {
00171     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00172         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00173             return cfp;
00174         }
00175         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00176     }
00177     return 0;
00178 }
00179 
00180 static rb_control_frame_t *
00181 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00182 {
00183     if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00184         return cfp;
00185     }
00186 
00187     cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00188 
00189     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00190         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00191             return cfp;
00192         }
00193 
00194         if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
00195             break;
00196         }
00197         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00198     }
00199     return 0;
00200 }
00201 
00202 /* at exit */
00203 
00204 void
00205 ruby_vm_at_exit(void (*func)(rb_vm_t *))
00206 {
00207     rb_ary_push((VALUE)&GET_VM()->at_exit, (VALUE)func);
00208 }
00209 
00210 static void
00211 ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
00212 {
00213     VALUE hook = (VALUE)&vm->at_exit;
00214 
00215     while (RARRAY_LEN(hook) > 0) {
00216         typedef void rb_vm_at_exit_func(rb_vm_t*);
00217         rb_vm_at_exit_func *func = (rb_vm_at_exit_func*)rb_ary_pop(hook);
00218         (*func)(vm);
00219     }
00220     rb_ary_free(hook);
00221 }
00222 
00223 /* Env */
00224 
00225 /*
00226   env{
00227     env[0] // special (block or prev env)
00228     env[1] // env object
00229     env[2] // prev env val
00230   };
00231  */
00232 
00233 #define ENV_IN_HEAP_P(th, env)  \
00234   (!((th)->stack < (env) && (env) < ((th)->stack + (th)->stack_size)))
00235 #define ENV_VAL(env)        ((env)[1])
00236 
00237 static void
00238 env_mark(void * const ptr)
00239 {
00240     RUBY_MARK_ENTER("env");
00241     if (ptr) {
00242         const rb_env_t * const env = ptr;
00243 
00244         if (env->env) {
00245             /* TODO: should mark more restricted range */
00246             RUBY_GC_INFO("env->env\n");
00247             rb_gc_mark_locations(env->env, env->env + env->env_size);
00248         }
00249 
00250         RUBY_GC_INFO("env->prev_envval\n");
00251         RUBY_MARK_UNLESS_NULL(env->prev_envval);
00252         RUBY_MARK_UNLESS_NULL(env->block.self);
00253         RUBY_MARK_UNLESS_NULL(env->block.proc);
00254 
00255         if (env->block.iseq) {
00256             if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
00257                 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
00258             }
00259             else {
00260                 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
00261             }
00262         }
00263     }
00264     RUBY_MARK_LEAVE("env");
00265 }
00266 
00267 static void
00268 env_free(void * const ptr)
00269 {
00270     RUBY_FREE_ENTER("env");
00271     if (ptr) {
00272         rb_env_t *const env = ptr;
00273         RUBY_FREE_UNLESS_NULL(env->env);
00274         ruby_xfree(ptr);
00275     }
00276     RUBY_FREE_LEAVE("env");
00277 }
00278 
00279 static size_t
00280 env_memsize(const void *ptr)
00281 {
00282     if (ptr) {
00283         const rb_env_t * const env = ptr;
00284         size_t size = sizeof(rb_env_t);
00285         if (env->env) {
00286             size += env->env_size * sizeof(VALUE);
00287         }
00288         return size;
00289     }
00290     return 0;
00291 }
00292 
00293 static const rb_data_type_t env_data_type = {
00294     "VM/env",
00295     {env_mark, env_free, env_memsize,},
00296 };
00297 
00298 static VALUE
00299 env_alloc(void)
00300 {
00301     VALUE obj;
00302     rb_env_t *env;
00303     obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
00304     env->env = 0;
00305     env->prev_envval = 0;
00306     env->block.iseq = 0;
00307     return obj;
00308 }
00309 
00310 static VALUE check_env_value(VALUE envval);
00311 
00312 static int
00313 check_env(rb_env_t * const env)
00314 {
00315     printf("---\n");
00316     printf("envptr: %p\n", (void *)&env->block.dfp[0]);
00317     printf("orphan: %p\n", (void *)env->block.dfp[1]);
00318     printf("inheap: %p\n", (void *)env->block.dfp[2]);
00319     printf("envval: %10p ", (void *)env->block.dfp[3]);
00320     dp(env->block.dfp[3]);
00321     printf("penvv : %10p ", (void *)env->block.dfp[4]);
00322     dp(env->block.dfp[4]);
00323     printf("lfp:    %10p\n", (void *)env->block.lfp);
00324     printf("dfp:    %10p\n", (void *)env->block.dfp);
00325     if (env->block.dfp[4]) {
00326         printf(">>\n");
00327         check_env_value(env->block.dfp[4]);
00328         printf("<<\n");
00329     }
00330     return 1;
00331 }
00332 
00333 static VALUE
00334 check_env_value(VALUE envval)
00335 {
00336     rb_env_t *env;
00337     GetEnvPtr(envval, env);
00338 
00339     if (check_env(env)) {
00340         return envval;
00341     }
00342     rb_bug("invalid env");
00343     return Qnil;                /* unreachable */
00344 }
00345 
00346 static VALUE
00347 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
00348                  VALUE *envptr, VALUE * const endptr)
00349 {
00350     VALUE envval, penvval = 0;
00351     rb_env_t *env;
00352     VALUE *nenvptr;
00353     int i, local_size;
00354 
00355     if (ENV_IN_HEAP_P(th, envptr)) {
00356         return ENV_VAL(envptr);
00357     }
00358 
00359     if (envptr != endptr) {
00360         VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
00361         rb_control_frame_t *pcfp = cfp;
00362 
00363         if (ENV_IN_HEAP_P(th, penvptr)) {
00364             penvval = ENV_VAL(penvptr);
00365         }
00366         else {
00367             while (pcfp->dfp != penvptr) {
00368                 pcfp++;
00369                 if (pcfp->dfp == 0) {
00370                     SDR();
00371                     rb_bug("invalid dfp");
00372                 }
00373             }
00374             penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
00375             cfp->lfp = pcfp->lfp;
00376             *envptr = GC_GUARDED_PTR(pcfp->dfp);
00377         }
00378     }
00379 
00380     /* allocate env */
00381     envval = env_alloc();
00382     GetEnvPtr(envval, env);
00383 
00384     if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00385         local_size = 2;
00386     }
00387     else {
00388         local_size = cfp->iseq->local_size;
00389     }
00390 
00391     env->env_size = local_size + 1 + 2;
00392     env->local_size = local_size;
00393     env->env = ALLOC_N(VALUE, env->env_size);
00394     env->prev_envval = penvval;
00395 
00396     for (i = 0; i <= local_size; i++) {
00397         env->env[i] = envptr[-local_size + i];
00398 #if 0
00399         fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
00400         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00401             /* clear value stack for GC */
00402             envptr[-local_size + i] = 0;
00403         }
00404 #endif
00405     }
00406 
00407     *envptr = envval;           /* GC mark */
00408     nenvptr = &env->env[i - 1];
00409     nenvptr[1] = envval;        /* frame self */
00410     nenvptr[2] = penvval;       /* frame prev env object */
00411 
00412     /* reset lfp/dfp in cfp */
00413     cfp->dfp = nenvptr;
00414     if (envptr == endptr) {
00415         cfp->lfp = nenvptr;
00416     }
00417 
00418     /* as Binding */
00419     env->block.self = cfp->self;
00420     env->block.lfp = cfp->lfp;
00421     env->block.dfp = cfp->dfp;
00422     env->block.iseq = cfp->iseq;
00423 
00424     if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00425         /* TODO */
00426         env->block.iseq = 0;
00427     }
00428     return envval;
00429 }
00430 
00431 static int
00432 collect_local_variables_in_iseq(rb_iseq_t *iseq, const VALUE ary)
00433 {
00434     int i;
00435     if (!iseq) return 0;
00436     for (i = 0; i < iseq->local_table_size; i++) {
00437         ID lid = iseq->local_table[i];
00438         if (rb_is_local_id(lid)) {
00439             rb_ary_push(ary, ID2SYM(lid));
00440         }
00441     }
00442     return 1;
00443 }
00444 
00445 static int
00446 collect_local_variables_in_env(rb_env_t * env, const VALUE ary)
00447 {
00448 
00449     while (collect_local_variables_in_iseq(env->block.iseq, ary),
00450            env->prev_envval) {
00451         GetEnvPtr(env->prev_envval, env);
00452     }
00453     return 0;
00454 }
00455 
00456 static int
00457 vm_collect_local_variables_in_heap(rb_thread_t *th, VALUE *dfp, VALUE ary)
00458 {
00459     if (ENV_IN_HEAP_P(th, dfp)) {
00460         rb_env_t *env;
00461         GetEnvPtr(ENV_VAL(dfp), env);
00462         collect_local_variables_in_env(env, ary);
00463         return 1;
00464     }
00465     else {
00466         return 0;
00467     }
00468 }
00469 
00470 static VALUE vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block);
00471 static VALUE vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp, VALUE *blockprocptr);
00472 
00473 VALUE
00474 rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
00475 {
00476     VALUE blockprocval;
00477     return vm_make_env_object(th, cfp, &blockprocval);
00478 }
00479 
00480 static VALUE
00481 vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp, VALUE *blockprocptr)
00482 {
00483     VALUE envval;
00484     VALUE *lfp;
00485     rb_block_t *blockptr;
00486 
00487     if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_FINISH) {
00488         /* for method_missing */
00489         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00490     }
00491 
00492     lfp = cfp->lfp;
00493     blockptr = GC_GUARDED_PTR_REF(lfp[0]);
00494 
00495     if (blockptr && !(lfp[0] & 0x02)) {
00496         VALUE blockprocval = vm_make_proc_from_block(th, blockptr);
00497         rb_proc_t *p;
00498         GetProcPtr(blockprocval, p);
00499         lfp[0] = GC_GUARDED_PTR(&p->block);
00500         *blockprocptr = blockprocval;
00501     }
00502 
00503     envval = vm_make_env_each(th, cfp, cfp->dfp, cfp->lfp);
00504     rb_vm_rewrite_dfp_in_errinfo(th);
00505 
00506     if (PROCDEBUG) {
00507         check_env_value(envval);
00508     }
00509 
00510     return envval;
00511 }
00512 
00513 void
00514 rb_vm_rewrite_dfp_in_errinfo(rb_thread_t *th)
00515 {
00516     rb_control_frame_t *cfp = th->cfp;
00517     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00518         /* rewrite dfp in errinfo to point to heap */
00519         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) &&
00520             (cfp->iseq->type == ISEQ_TYPE_RESCUE ||
00521              cfp->iseq->type == ISEQ_TYPE_ENSURE)) {
00522             VALUE errinfo = cfp->dfp[-2]; /* #$! */
00523             if (RB_TYPE_P(errinfo, T_NODE)) {
00524                 VALUE *escape_dfp = GET_THROWOBJ_CATCH_POINT(errinfo);
00525                 if (! ENV_IN_HEAP_P(th, escape_dfp)) {
00526                     VALUE dfpval = *escape_dfp;
00527                     if (CLASS_OF(dfpval) == rb_cEnv) {
00528                         rb_env_t *dfpenv;
00529                         GetEnvPtr(dfpval, dfpenv);
00530                         SET_THROWOBJ_CATCH_POINT(errinfo, (VALUE)(dfpenv->env + dfpenv->local_size));
00531                     }
00532                 }
00533             }
00534         }
00535         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00536     }
00537 }
00538 
00539 void
00540 rb_vm_stack_to_heap(rb_thread_t *th)
00541 {
00542     rb_control_frame_t *cfp = th->cfp;
00543     while ((cfp = rb_vm_get_ruby_level_next_cfp(th, cfp)) != 0) {
00544         rb_vm_make_env_object(th, cfp);
00545         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00546     }
00547 }
00548 
00549 /* Proc */
00550 
00551 static VALUE
00552 vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block)
00553 {
00554     if (!block->proc) {
00555         block->proc = rb_vm_make_proc(th, block, rb_cProc);
00556     }
00557     return block->proc;
00558 }
00559 
00560 VALUE
00561 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
00562 {
00563     VALUE procval, envval, blockprocval = 0;
00564     rb_proc_t *proc;
00565     rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
00566 
00567     if (block->proc) {
00568         rb_bug("rb_vm_make_proc: Proc value is already created.");
00569     }
00570 
00571     envval = vm_make_env_object(th, cfp, &blockprocval);
00572 
00573     if (PROCDEBUG) {
00574         check_env_value(envval);
00575     }
00576     procval = rb_proc_alloc(klass);
00577     GetProcPtr(procval, proc);
00578     proc->blockprocval = blockprocval;
00579     proc->block.self = block->self;
00580     proc->block.lfp = block->lfp;
00581     proc->block.dfp = block->dfp;
00582     proc->block.iseq = block->iseq;
00583     proc->block.proc = procval;
00584     proc->envval = envval;
00585     proc->safe_level = th->safe_level;
00586 
00587     if (VMDEBUG) {
00588         if (th->stack < block->dfp && block->dfp < th->stack + th->stack_size) {
00589             rb_bug("invalid ptr: block->dfp");
00590         }
00591         if (th->stack < block->lfp && block->lfp < th->stack + th->stack_size) {
00592             rb_bug("invalid ptr: block->lfp");
00593         }
00594     }
00595 
00596     return procval;
00597 }
00598 
00599 /* C -> Ruby: block */
00600 
00601 static inline VALUE
00602 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
00603                     VALUE self, int argc, const VALUE *argv,
00604                     const rb_block_t *blockptr, const NODE *cref)
00605 {
00606     if (SPECIAL_CONST_P(block->iseq))
00607         return Qnil;
00608     else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
00609         const rb_iseq_t *iseq = block->iseq;
00610         const rb_control_frame_t *cfp;
00611         rb_control_frame_t *ncfp;
00612         int i, opt_pc, arg_size = iseq->arg_size;
00613         int type = block_proc_is_lambda(block->proc) ?
00614           VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
00615 
00616         rb_vm_set_finish_env(th);
00617 
00618         cfp = th->cfp;
00619         CHECK_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
00620 
00621         for (i=0; i<argc; i++) {
00622             cfp->sp[i] = argv[i];
00623         }
00624 
00625         opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
00626                                      type == VM_FRAME_MAGIC_LAMBDA);
00627 
00628         ncfp = vm_push_frame(th, iseq, type,
00629                              self, GC_GUARDED_PTR(block->dfp),
00630                              iseq->iseq_encoded + opt_pc, cfp->sp + arg_size, block->lfp,
00631                              iseq->local_size - arg_size);
00632         ncfp->me = th->passed_me;
00633         th->passed_me = 0;
00634         th->passed_block = blockptr;
00635 
00636         if (cref) {
00637             th->cfp->dfp[-1] = (VALUE)cref;
00638         }
00639 
00640         return vm_exec(th);
00641     }
00642     else {
00643         return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
00644     }
00645 }
00646 
00647 static inline const rb_block_t *
00648 check_block(rb_thread_t *th)
00649 {
00650     const rb_block_t *blockptr = GC_GUARDED_PTR_REF(th->cfp->lfp[0]);
00651 
00652     if (blockptr == 0) {
00653         rb_vm_localjump_error("no block given", Qnil, 0);
00654     }
00655 
00656     return blockptr;
00657 }
00658 
00659 static inline VALUE
00660 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
00661 {
00662     const rb_block_t *blockptr = check_block(th);
00663     return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref);
00664 }
00665 
00666 static inline VALUE
00667 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
00668 {
00669     const rb_block_t *blockptr = check_block(th);
00670     return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0);
00671 }
00672 
00673 VALUE
00674 rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
00675                   int argc, const VALUE *argv, const rb_block_t * blockptr)
00676 {
00677     VALUE val = Qundef;
00678     int state;
00679     volatile int stored_safe = th->safe_level;
00680 
00681     TH_PUSH_TAG(th);
00682     if ((state = EXEC_TAG()) == 0) {
00683         if (!proc->is_from_method) {
00684             th->safe_level = proc->safe_level;
00685         }
00686         val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0);
00687     }
00688     TH_POP_TAG();
00689 
00690     if (!proc->is_from_method) {
00691         th->safe_level = stored_safe;
00692     }
00693 
00694     if (state) {
00695         JUMP_TAG(state);
00696     }
00697     return val;
00698 }
00699 
00700 /* special variable */
00701 
00702 static rb_control_frame_t *
00703 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
00704 {
00705     while (cfp->pc == 0) {
00706         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00707         if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00708             return 0;
00709         }
00710     }
00711     return cfp;
00712 }
00713 
00714 static VALUE
00715 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
00716 {
00717     cfp = vm_normal_frame(th, cfp);
00718     return lfp_svar_get(th, cfp ? cfp->lfp : 0, key);
00719 }
00720 
00721 static void
00722 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
00723 {
00724     cfp = vm_normal_frame(th, cfp);
00725     lfp_svar_set(th, cfp ? cfp->lfp : 0, key, val);
00726 }
00727 
00728 static VALUE
00729 vm_svar_get(VALUE key)
00730 {
00731     rb_thread_t *th = GET_THREAD();
00732     return vm_cfp_svar_get(th, th->cfp, key);
00733 }
00734 
00735 static void
00736 vm_svar_set(VALUE key, VALUE val)
00737 {
00738     rb_thread_t *th = GET_THREAD();
00739     vm_cfp_svar_set(th, th->cfp, key, val);
00740 }
00741 
00742 VALUE
00743 rb_backref_get(void)
00744 {
00745     return vm_svar_get(1);
00746 }
00747 
00748 void
00749 rb_backref_set(VALUE val)
00750 {
00751     vm_svar_set(1, val);
00752 }
00753 
00754 VALUE
00755 rb_lastline_get(void)
00756 {
00757     return vm_svar_get(0);
00758 }
00759 
00760 void
00761 rb_lastline_set(VALUE val)
00762 {
00763     vm_svar_set(0, val);
00764 }
00765 
00766 /* backtrace */
00767 
00768 int
00769 rb_vm_get_sourceline(const rb_control_frame_t *cfp)
00770 {
00771     int line_no = 0;
00772     const rb_iseq_t *iseq = cfp->iseq;
00773 
00774     if (RUBY_VM_NORMAL_ISEQ_P(iseq) && iseq->insn_info_size > 0) {
00775         rb_num_t i;
00776         size_t pos = cfp->pc - cfp->iseq->iseq_encoded;
00777 
00778         if (iseq->insn_info_table[0].position == pos) goto found;
00779         for (i = 1; i < iseq->insn_info_size; i++) {
00780             if (iseq->insn_info_table[i].position == pos) {
00781                 line_no = iseq->insn_info_table[i - 1].line_no;
00782                 goto found;
00783             }
00784         }
00785         line_no = iseq->insn_info_table[i - 1].line_no;
00786     }
00787   found:
00788     return line_no;
00789 }
00790 
00791 static int
00792 vm_backtrace_each(rb_thread_t *th, int lev, void (*init)(void *), rb_backtrace_iter_func *iter, void *arg)
00793 {
00794     const rb_control_frame_t *limit_cfp = th->cfp;
00795     const rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
00796     VALUE file = Qnil;
00797     int line_no = 0;
00798 
00799     cfp -= 2;
00800     while (lev-- >= 0) {
00801         if (++limit_cfp > cfp) {
00802             return FALSE;
00803         }
00804     }
00805     if (init) (*init)(arg);
00806     limit_cfp = RUBY_VM_NEXT_CONTROL_FRAME(limit_cfp);
00807     if (th->vm->progname) file = th->vm->progname;
00808     while (cfp > limit_cfp) {
00809         if (cfp->iseq != 0) {
00810             if (cfp->pc != 0) {
00811                 rb_iseq_t *iseq = cfp->iseq;
00812 
00813                 line_no = rb_vm_get_sourceline(cfp);
00814                 file = iseq->filename;
00815                 if ((*iter)(arg, file, line_no, iseq->name)) break;
00816             }
00817         }
00818         else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
00819             ID id;
00820             extern VALUE ruby_engine_name;
00821 
00822             if (NIL_P(file)) file = ruby_engine_name;
00823             if (cfp->me->def)
00824                 id = cfp->me->def->original_id;
00825             else
00826                 id = cfp->me->called_id;
00827             if (id != ID_ALLOCATOR && (*iter)(arg, file, line_no, rb_id2str(id)))
00828                 break;
00829         }
00830         cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp);
00831     }
00832     return TRUE;
00833 }
00834 
00835 static void
00836 vm_backtrace_alloc(void *arg)
00837 {
00838     VALUE *aryp = arg;
00839     *aryp = rb_ary_new();
00840 }
00841 
00842 static int
00843 vm_backtrace_push(void *arg, VALUE file, int line_no, VALUE name)
00844 {
00845     VALUE *aryp = arg;
00846     VALUE bt;
00847 
00848     if (line_no) {
00849         bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:%d:in `%s'",
00850                             RSTRING_PTR(file), line_no, RSTRING_PTR(name));
00851     }
00852     else {
00853         bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:in `%s'",
00854                             RSTRING_PTR(file), RSTRING_PTR(name));
00855     }
00856     rb_ary_push(*aryp, bt);
00857     return 0;
00858 }
00859 
00860 static inline VALUE
00861 vm_backtrace(rb_thread_t *th, int lev)
00862 {
00863     VALUE ary = 0;
00864 
00865     if (lev < 0) {
00866         ary = rb_ary_new();
00867     }
00868     vm_backtrace_each(th, lev, vm_backtrace_alloc, vm_backtrace_push, &ary);
00869     if (!ary) return Qnil;
00870     return rb_ary_reverse(ary);
00871 }
00872 
00873 const char *
00874 rb_sourcefile(void)
00875 {
00876     rb_thread_t *th = GET_THREAD();
00877     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00878 
00879     if (cfp) {
00880         return RSTRING_PTR(cfp->iseq->filename);
00881     }
00882     else {
00883         return 0;
00884     }
00885 }
00886 
00887 int
00888 rb_sourceline(void)
00889 {
00890     rb_thread_t *th = GET_THREAD();
00891     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00892 
00893     if (cfp) {
00894         return rb_vm_get_sourceline(cfp);
00895     }
00896     else {
00897         return 0;
00898     }
00899 }
00900 
00901 NODE *
00902 rb_vm_cref(void)
00903 {
00904     rb_thread_t *th = GET_THREAD();
00905     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00906 
00907     if (cfp == 0) {
00908         rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00909     }
00910     return vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
00911 }
00912 
00913 #if 0
00914 void
00915 debug_cref(NODE *cref)
00916 {
00917     while (cref) {
00918         dp(cref->nd_clss);
00919         printf("%ld\n", cref->nd_visi);
00920         cref = cref->nd_next;
00921     }
00922 }
00923 #endif
00924 
00925 VALUE
00926 rb_vm_cbase(void)
00927 {
00928     rb_thread_t *th = GET_THREAD();
00929     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00930 
00931     if (cfp == 0) {
00932         rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00933     }
00934     return vm_get_cbase(cfp->iseq, cfp->lfp, cfp->dfp);
00935 }
00936 
00937 /* jump */
00938 
00939 static VALUE
00940 make_localjump_error(const char *mesg, VALUE value, int reason)
00941 {
00942     extern VALUE rb_eLocalJumpError;
00943     VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
00944     ID id;
00945 
00946     switch (reason) {
00947       case TAG_BREAK:
00948         CONST_ID(id, "break");
00949         break;
00950       case TAG_REDO:
00951         CONST_ID(id, "redo");
00952         break;
00953       case TAG_RETRY:
00954         CONST_ID(id, "retry");
00955         break;
00956       case TAG_NEXT:
00957         CONST_ID(id, "next");
00958         break;
00959       case TAG_RETURN:
00960         CONST_ID(id, "return");
00961         break;
00962       default:
00963         CONST_ID(id, "noreason");
00964         break;
00965     }
00966     rb_iv_set(exc, "@exit_value", value);
00967     rb_iv_set(exc, "@reason", ID2SYM(id));
00968     return exc;
00969 }
00970 
00971 void
00972 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
00973 {
00974     VALUE exc = make_localjump_error(mesg, value, reason);
00975     rb_exc_raise(exc);
00976 }
00977 
00978 VALUE
00979 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
00980 {
00981     VALUE result = Qnil;
00982 
00983     if (val == Qundef) {
00984         val = GET_THREAD()->tag->retval;
00985     }
00986     switch (state) {
00987       case 0:
00988         break;
00989       case TAG_RETURN:
00990         result = make_localjump_error("unexpected return", val, state);
00991         break;
00992       case TAG_BREAK:
00993         result = make_localjump_error("unexpected break", val, state);
00994         break;
00995       case TAG_NEXT:
00996         result = make_localjump_error("unexpected next", val, state);
00997         break;
00998       case TAG_REDO:
00999         result = make_localjump_error("unexpected redo", Qnil, state);
01000         break;
01001       case TAG_RETRY:
01002         result = make_localjump_error("retry outside of rescue clause", Qnil, state);
01003         break;
01004       default:
01005         break;
01006     }
01007     return result;
01008 }
01009 
01010 void
01011 rb_vm_jump_tag_but_local_jump(int state, VALUE val)
01012 {
01013     if (val != Qnil) {
01014         VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, val);
01015         if (!NIL_P(exc)) rb_exc_raise(exc);
01016     }
01017     JUMP_TAG(state);
01018 }
01019 
01020 NORETURN(static void vm_iter_break(rb_thread_t *th));
01021 
01022 static void
01023 vm_iter_break(rb_thread_t *th)
01024 {
01025     rb_control_frame_t *cfp = th->cfp;
01026     VALUE *dfp = GC_GUARDED_PTR_REF(*cfp->dfp);
01027 
01028     th->state = TAG_BREAK;
01029     th->errinfo = (VALUE)NEW_THROW_OBJECT(Qnil, (VALUE)dfp, TAG_BREAK);
01030     TH_JUMP_TAG(th, TAG_BREAK);
01031 }
01032 
01033 void
01034 rb_iter_break(void)
01035 {
01036     vm_iter_break(GET_THREAD());
01037 }
01038 
01039 /* optimization: redefine management */
01040 
01041 static st_table *vm_opt_method_table = 0;
01042 
01043 static void
01044 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me)
01045 {
01046     st_data_t bop;
01047     if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
01048         if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
01049             ruby_vm_redefined_flag[bop] = 1;
01050         }
01051     }
01052 }
01053 
01054 static void
01055 add_opt_method(VALUE klass, ID mid, VALUE bop)
01056 {
01057     rb_method_entry_t *me;
01058     if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
01059         me->def->type == VM_METHOD_TYPE_CFUNC) {
01060         st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
01061     }
01062     else {
01063         rb_bug("undefined optimized method: %s", rb_id2name(mid));
01064     }
01065 }
01066 
01067 static void
01068 vm_init_redefined_flag(void)
01069 {
01070     ID mid;
01071     VALUE bop;
01072 
01073     vm_opt_method_table = st_init_numtable();
01074 
01075 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
01076 #define C(k) add_opt_method(rb_c##k, mid, bop)
01077     OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
01078     OP(MINUS, MINUS), (C(Fixnum));
01079     OP(MULT, MULT), (C(Fixnum), C(Float));
01080     OP(DIV, DIV), (C(Fixnum), C(Float));
01081     OP(MOD, MOD), (C(Fixnum), C(Float));
01082     OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
01083     OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
01084     OP(LT, LT), (C(Fixnum));
01085     OP(LE, LE), (C(Fixnum));
01086     OP(LTLT, LTLT), (C(String), C(Array));
01087     OP(AREF, AREF), (C(Array), C(Hash));
01088     OP(ASET, ASET), (C(Array), C(Hash));
01089     OP(Length, LENGTH), (C(Array), C(String), C(Hash));
01090     OP(Size, SIZE), (C(Array), C(String), C(Hash));
01091     OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
01092     OP(GT, GT), (C(Fixnum));
01093     OP(GE, GE), (C(Fixnum));
01094 #undef C
01095 #undef OP
01096 }
01097 
01098 /* for vm development */
01099 
01100 #if VMDEBUG
01101 static const char *
01102 vm_frametype_name(const rb_control_frame_t *cfp)
01103 {
01104     switch (VM_FRAME_TYPE(cfp)) {
01105       case VM_FRAME_MAGIC_METHOD: return "method";
01106       case VM_FRAME_MAGIC_BLOCK:  return "block";
01107       case VM_FRAME_MAGIC_CLASS:  return "class";
01108       case VM_FRAME_MAGIC_TOP:    return "top";
01109       case VM_FRAME_MAGIC_FINISH: return "finish";
01110       case VM_FRAME_MAGIC_CFUNC:  return "cfunc";
01111       case VM_FRAME_MAGIC_PROC:   return "proc";
01112       case VM_FRAME_MAGIC_IFUNC:  return "ifunc";
01113       case VM_FRAME_MAGIC_EVAL:   return "eval";
01114       case VM_FRAME_MAGIC_LAMBDA: return "lambda";
01115       default:
01116         rb_bug("unknown frame");
01117     }
01118 }
01119 #endif
01120 
01121 /* evaluator body */
01122 
01123 /*                  finish
01124   VMe (h1)          finish
01125     VM              finish F1 F2
01126       cfunc         finish F1 F2 C1
01127         rb_funcall  finish F1 F2 C1
01128           VMe       finish F1 F2 C1
01129             VM      finish F1 F2 C1 F3
01130 
01131   F1 - F3 : pushed by VM
01132   C1      : pushed by send insn (CFUNC)
01133 
01134   struct CONTROL_FRAME {
01135     VALUE *pc;                  // cfp[0], program counter
01136     VALUE *sp;                  // cfp[1], stack pointer
01137     VALUE *bp;                  // cfp[2], base pointer
01138     rb_iseq_t *iseq;            // cfp[3], iseq
01139     VALUE flag;                 // cfp[4], magic
01140     VALUE self;                 // cfp[5], self
01141     VALUE *lfp;                 // cfp[6], local frame pointer
01142     VALUE *dfp;                 // cfp[7], dynamic frame pointer
01143     rb_iseq_t * block_iseq;     // cfp[8], block iseq
01144     VALUE proc;                 // cfp[9], always 0
01145   };
01146 
01147   struct BLOCK {
01148     VALUE self;
01149     VALUE *lfp;
01150     VALUE *dfp;
01151     rb_iseq_t *block_iseq;
01152     VALUE proc;
01153   };
01154 
01155   struct METHOD_CONTROL_FRAME {
01156     rb_control_frame_t frame;
01157   };
01158 
01159   struct METHOD_FRAME {
01160     VALUE arg0;
01161     ...
01162     VALUE argM;
01163     VALUE param0;
01164     ...
01165     VALUE paramN;
01166     VALUE cref;
01167     VALUE special;                         // lfp [1]
01168     struct block_object *block_ptr | 0x01; // lfp [0]
01169   };
01170 
01171   struct BLOCK_CONTROL_FRAME {
01172     rb_control_frame_t frame;
01173   };
01174 
01175   struct BLOCK_FRAME {
01176     VALUE arg0;
01177     ...
01178     VALUE argM;
01179     VALUE param0;
01180     ...
01181     VALUE paramN;
01182     VALUE cref;
01183     VALUE *(prev_ptr | 0x01); // DFP[0]
01184   };
01185 
01186   struct CLASS_CONTROL_FRAME {
01187     rb_control_frame_t frame;
01188   };
01189 
01190   struct CLASS_FRAME {
01191     VALUE param0;
01192     ...
01193     VALUE paramN;
01194     VALUE cref;
01195     VALUE prev_dfp; // for frame jump
01196   };
01197 
01198   struct C_METHOD_CONTROL_FRAME {
01199     VALUE *pc;                       // 0
01200     VALUE *sp;                       // stack pointer
01201     VALUE *bp;                       // base pointer (used in exception)
01202     rb_iseq_t *iseq;               // cmi
01203     VALUE magic;                     // C_METHOD_FRAME
01204     VALUE self;                      // ?
01205     VALUE *lfp;                      // lfp
01206     VALUE *dfp;                      // == lfp
01207     rb_iseq_t * block_iseq;        //
01208     VALUE proc;                      // always 0
01209   };
01210 
01211   struct C_BLOCK_CONTROL_FRAME {
01212     VALUE *pc;                       // point only "finish" insn
01213     VALUE *sp;                       // sp
01214     rb_iseq_t *iseq;               // ?
01215     VALUE magic;                     // C_METHOD_FRAME
01216     VALUE self;                      // needed?
01217     VALUE *lfp;                      // lfp
01218     VALUE *dfp;                      // lfp
01219     rb_iseq_t * block_iseq; // 0
01220   };
01221  */
01222 
01223 
01224 static VALUE
01225 vm_exec(rb_thread_t *th)
01226 {
01227     int state;
01228     VALUE result, err;
01229     VALUE initial = 0;
01230     VALUE *escape_dfp = NULL;
01231 
01232     TH_PUSH_TAG(th);
01233     _tag.retval = Qnil;
01234     if ((state = EXEC_TAG()) == 0) {
01235       vm_loop_start:
01236         result = vm_exec_core(th, initial);
01237         if ((state = th->state) != 0) {
01238             err = result;
01239             th->state = 0;
01240             goto exception_handler;
01241         }
01242     }
01243     else {
01244         int i;
01245         struct iseq_catch_table_entry *entry;
01246         unsigned long epc, cont_pc, cont_sp;
01247         VALUE catch_iseqval;
01248         rb_control_frame_t *cfp;
01249         VALUE type;
01250 
01251         err = th->errinfo;
01252 
01253       exception_handler:
01254         cont_pc = cont_sp = catch_iseqval = 0;
01255 
01256         while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
01257             if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
01258                 const rb_method_entry_t *me = th->cfp->me;
01259                 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass);
01260             }
01261             th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01262         }
01263 
01264         cfp = th->cfp;
01265         epc = cfp->pc - cfp->iseq->iseq_encoded;
01266 
01267         if (state == TAG_BREAK || state == TAG_RETURN) {
01268             escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01269 
01270             if (cfp->dfp == escape_dfp) {
01271                 if (state == TAG_RETURN) {
01272                     if ((cfp + 1)->pc != &finish_insn_seq[0]) {
01273                         SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->dfp);
01274                         SET_THROWOBJ_STATE(err, state = TAG_BREAK);
01275                     }
01276                     else {
01277                         for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01278                             entry = &cfp->iseq->catch_table[i];
01279                             if (entry->start < epc && entry->end >= epc) {
01280                                 if (entry->type == CATCH_TYPE_ENSURE) {
01281                                     catch_iseqval = entry->iseq;
01282                                     cont_pc = entry->cont;
01283                                     cont_sp = entry->sp;
01284                                     break;
01285                                 }
01286                             }
01287                         }
01288                         if (!catch_iseqval) {
01289                             result = GET_THROWOBJ_VAL(err);
01290                             th->errinfo = Qnil;
01291                             th->cfp += 2;
01292                             goto finish_vme;
01293                         }
01294                     }
01295                     /* through */
01296                 }
01297                 else {
01298                     /* TAG_BREAK */
01299 #if OPT_STACK_CACHING
01300                     initial = (GET_THROWOBJ_VAL(err));
01301 #else
01302                     *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01303 #endif
01304                     th->errinfo = Qnil;
01305                     goto vm_loop_start;
01306                 }
01307             }
01308         }
01309 
01310         if (state == TAG_RAISE) {
01311             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01312                 entry = &cfp->iseq->catch_table[i];
01313                 if (entry->start < epc && entry->end >= epc) {
01314 
01315                     if (entry->type == CATCH_TYPE_RESCUE ||
01316                         entry->type == CATCH_TYPE_ENSURE) {
01317                         catch_iseqval = entry->iseq;
01318                         cont_pc = entry->cont;
01319                         cont_sp = entry->sp;
01320                         break;
01321                     }
01322                 }
01323             }
01324         }
01325         else if (state == TAG_RETRY) {
01326             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01327                 entry = &cfp->iseq->catch_table[i];
01328                 if (entry->start < epc && entry->end >= epc) {
01329 
01330                     if (entry->type == CATCH_TYPE_ENSURE) {
01331                         catch_iseqval = entry->iseq;
01332                         cont_pc = entry->cont;
01333                         cont_sp = entry->sp;
01334                         break;
01335                     }
01336                     else if (entry->type == CATCH_TYPE_RETRY) {
01337                         VALUE *escape_dfp;
01338                         escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01339                         if (cfp->dfp == escape_dfp) {
01340                             cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01341                             th->errinfo = Qnil;
01342                             goto vm_loop_start;
01343                         }
01344                     }
01345                 }
01346             }
01347         }
01348         else if (state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0) {
01349             type = CATCH_TYPE_BREAK;
01350 
01351           search_restart_point:
01352             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01353                 entry = &cfp->iseq->catch_table[i];
01354 
01355                 if (entry->start < epc && entry->end >= epc) {
01356                     if (entry->type == CATCH_TYPE_ENSURE) {
01357                         catch_iseqval = entry->iseq;
01358                         cont_pc = entry->cont;
01359                         cont_sp = entry->sp;
01360                         break;
01361                     }
01362                     else if (entry->type == type) {
01363                         cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01364                         cfp->sp = cfp->bp + entry->sp;
01365 
01366                         if (state != TAG_REDO) {
01367 #if OPT_STACK_CACHING
01368                             initial = (GET_THROWOBJ_VAL(err));
01369 #else
01370                             *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01371 #endif
01372                         }
01373                         th->state = 0;
01374                         th->errinfo = Qnil;
01375                         goto vm_loop_start;
01376                     }
01377                 }
01378             }
01379         }
01380         else if (state == TAG_REDO) {
01381             type = CATCH_TYPE_REDO;
01382             goto search_restart_point;
01383         }
01384         else if (state == TAG_NEXT) {
01385             type = CATCH_TYPE_NEXT;
01386             goto search_restart_point;
01387         }
01388         else {
01389             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01390                 entry = &cfp->iseq->catch_table[i];
01391                 if (entry->start < epc && entry->end >= epc) {
01392 
01393                     if (entry->type == CATCH_TYPE_ENSURE) {
01394                         catch_iseqval = entry->iseq;
01395                         cont_pc = entry->cont;
01396                         cont_sp = entry->sp;
01397                         break;
01398                     }
01399                 }
01400             }
01401         }
01402 
01403         if (catch_iseqval != 0) {
01404             /* found catch table */
01405             rb_iseq_t *catch_iseq;
01406 
01407             /* enter catch scope */
01408             GetISeqPtr(catch_iseqval, catch_iseq);
01409             cfp->sp = cfp->bp + cont_sp;
01410             cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
01411 
01412             /* push block frame */
01413             cfp->sp[0] = err;
01414             vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
01415                           cfp->self, (VALUE)cfp->dfp, catch_iseq->iseq_encoded,
01416                           cfp->sp + 1 /* push value */, cfp->lfp, catch_iseq->local_size - 1);
01417 
01418             state = 0;
01419             th->state = 0;
01420             th->errinfo = Qnil;
01421             goto vm_loop_start;
01422         }
01423         else {
01424             /* skip frame */
01425 
01426             switch (VM_FRAME_TYPE(th->cfp)) {
01427               case VM_FRAME_MAGIC_METHOD:
01428                 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0);
01429                 break;
01430               case VM_FRAME_MAGIC_CLASS:
01431                 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->cfp->self, 0, 0);
01432                 break;
01433             }
01434 
01435             th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01436 
01437             if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_FINISH) {
01438                 goto exception_handler;
01439             }
01440             else {
01441                 vm_pop_frame(th);
01442                 th->errinfo = err;
01443                 TH_POP_TAG2();
01444                 JUMP_TAG(state);
01445             }
01446         }
01447     }
01448   finish_vme:
01449     TH_POP_TAG();
01450     return result;
01451 }
01452 
01453 /* misc */
01454 
01455 VALUE
01456 rb_iseq_eval(VALUE iseqval)
01457 {
01458     rb_thread_t *th = GET_THREAD();
01459     VALUE val;
01460     volatile VALUE tmp;
01461 
01462     vm_set_top_stack(th, iseqval);
01463 
01464     val = vm_exec(th);
01465     tmp = iseqval; /* prohibit tail call optimization */
01466     return val;
01467 }
01468 
01469 VALUE
01470 rb_iseq_eval_main(VALUE iseqval)
01471 {
01472     rb_thread_t *th = GET_THREAD();
01473     VALUE val;
01474     volatile VALUE tmp;
01475 
01476     vm_set_main_stack(th, iseqval);
01477 
01478     val = vm_exec(th);
01479     tmp = iseqval; /* prohibit tail call optimization */
01480     return val;
01481 }
01482 
01483 int
01484 rb_thread_method_id_and_class(rb_thread_t *th,
01485                               ID *idp, VALUE *klassp)
01486 {
01487     rb_control_frame_t *cfp = th->cfp;
01488     rb_iseq_t *iseq = cfp->iseq;
01489     if (!iseq && cfp->me) {
01490         if (idp) *idp = cfp->me->def->original_id;
01491         if (klassp) *klassp = cfp->me->klass;
01492         return 1;
01493     }
01494     while (iseq) {
01495         if (RUBY_VM_IFUNC_P(iseq)) {
01496             if (idp) CONST_ID(*idp, "<ifunc>");
01497             if (klassp) *klassp = 0;
01498             return 1;
01499         }
01500         if (iseq->defined_method_id) {
01501             if (idp) *idp = iseq->defined_method_id;
01502             if (klassp) *klassp = iseq->klass;
01503             return 1;
01504         }
01505         if (iseq->local_iseq == iseq) {
01506             break;
01507         }
01508         iseq = iseq->parent_iseq;
01509     }
01510     return 0;
01511 }
01512 
01513 int
01514 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
01515 {
01516     return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
01517 }
01518 
01519 VALUE
01520 rb_thread_current_status(const rb_thread_t *th)
01521 {
01522     const rb_control_frame_t *cfp = th->cfp;
01523     VALUE str = Qnil;
01524 
01525     if (cfp->iseq != 0) {
01526         if (cfp->pc != 0) {
01527             rb_iseq_t *iseq = cfp->iseq;
01528             int line_no = rb_vm_get_sourceline(cfp);
01529             char *file = RSTRING_PTR(iseq->filename);
01530             str = rb_sprintf("%s:%d:in `%s'",
01531                              file, line_no, RSTRING_PTR(iseq->name));
01532         }
01533     }
01534     else if (cfp->me->def->original_id) {
01535         str = rb_sprintf("`%s#%s' (cfunc)",
01536                          rb_class2name(cfp->me->klass),
01537                          rb_id2name(cfp->me->def->original_id));
01538     }
01539 
01540     return str;
01541 }
01542 
01543 VALUE
01544 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
01545                  const rb_block_t *blockptr, VALUE filename)
01546 {
01547     rb_thread_t *th = GET_THREAD();
01548     const rb_control_frame_t *reg_cfp = th->cfp;
01549     volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
01550     VALUE val;
01551 
01552     vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP,
01553                   recv, (VALUE)blockptr, 0, reg_cfp->sp, 0, 1);
01554 
01555     val = (*func)(arg);
01556 
01557     vm_pop_frame(th);
01558     return val;
01559 }
01560 
01561 /* vm */
01562 
01563 static int
01564 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
01565 {
01566     VALUE thval = (VALUE)key;
01567     rb_gc_mark(thval);
01568     return ST_CONTINUE;
01569 }
01570 
01571 static void
01572 mark_event_hooks(rb_event_hook_t *hook)
01573 {
01574     while (hook) {
01575         rb_gc_mark(hook->data);
01576         hook = hook->next;
01577     }
01578 }
01579 
01580 void
01581 rb_vm_mark(void *ptr)
01582 {
01583     int i;
01584 
01585     RUBY_MARK_ENTER("vm");
01586     RUBY_GC_INFO("-------------------------------------------------\n");
01587     if (ptr) {
01588         rb_vm_t *vm = ptr;
01589         if (vm->living_threads) {
01590             st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
01591         }
01592         RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
01593         RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
01594         RUBY_MARK_UNLESS_NULL(vm->load_path);
01595         RUBY_MARK_UNLESS_NULL(vm->loaded_features);
01596         RUBY_MARK_UNLESS_NULL(vm->top_self);
01597         RUBY_MARK_UNLESS_NULL(vm->coverages);
01598         rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
01599 
01600         if (vm->loading_table) {
01601             rb_mark_tbl(vm->loading_table);
01602         }
01603 
01604         mark_event_hooks(vm->event_hooks);
01605 
01606         for (i = 0; i < RUBY_NSIG; i++) {
01607             if (vm->trap_list[i].cmd)
01608                 rb_gc_mark(vm->trap_list[i].cmd);
01609         }
01610     }
01611 
01612     RUBY_MARK_LEAVE("vm");
01613 }
01614 
01615 #define vm_free 0
01616 
01617 int
01618 ruby_vm_destruct(rb_vm_t *vm)
01619 {
01620     RUBY_FREE_ENTER("vm");
01621     if (vm) {
01622         rb_thread_t *th = vm->main_thread;
01623 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01624         struct rb_objspace *objspace = vm->objspace;
01625 #endif
01626         rb_gc_force_recycle(vm->self);
01627         vm->main_thread = 0;
01628         if (th) {
01629             rb_fiber_reset_root_local_storage(th->self);
01630             thread_free(th);
01631         }
01632         if (vm->living_threads) {
01633             st_free_table(vm->living_threads);
01634             vm->living_threads = 0;
01635         }
01636 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01637         if (objspace) {
01638             rb_objspace_free(objspace);
01639         }
01640 #endif
01641         ruby_vm_run_at_exit_hooks(vm);
01642         rb_vm_gvl_destroy(vm);
01643         ruby_xfree(vm);
01644         ruby_current_vm = 0;
01645     }
01646     RUBY_FREE_LEAVE("vm");
01647     return 0;
01648 }
01649 
01650 static size_t
01651 vm_memsize(const void *ptr)
01652 {
01653     if (ptr) {
01654         const rb_vm_t *vmobj = ptr;
01655         return sizeof(rb_vm_t) + st_memsize(vmobj->living_threads);
01656     }
01657     else {
01658         return 0;
01659     }
01660 }
01661 
01662 static const rb_data_type_t vm_data_type = {
01663     "VM",
01664     {rb_vm_mark, vm_free, vm_memsize,},
01665 };
01666 
01667 static void
01668 vm_init2(rb_vm_t *vm)
01669 {
01670     MEMZERO(vm, rb_vm_t, 1);
01671     vm->src_encoding_index = -1;
01672     vm->at_exit.basic.flags = (T_ARRAY | RARRAY_EMBED_FLAG) & ~RARRAY_EMBED_LEN_MASK; /* len set 0 */
01673     vm->at_exit.basic.klass = 0;
01674 }
01675 
01676 /* Thread */
01677 
01678 #define USE_THREAD_DATA_RECYCLE 1
01679 
01680 #if USE_THREAD_DATA_RECYCLE
01681 #define RECYCLE_MAX 64
01682 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
01683 static int thread_recycle_stack_count = 0;
01684 
01685 static VALUE *
01686 thread_recycle_stack(size_t size)
01687 {
01688     if (thread_recycle_stack_count) {
01689         return thread_recycle_stack_slot[--thread_recycle_stack_count];
01690     }
01691     else {
01692         return ALLOC_N(VALUE, size);
01693     }
01694 }
01695 
01696 #else
01697 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
01698 #endif
01699 
01700 void
01701 rb_thread_recycle_stack_release(VALUE *stack)
01702 {
01703 #if USE_THREAD_DATA_RECYCLE
01704     if (thread_recycle_stack_count < RECYCLE_MAX) {
01705         thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
01706         return;
01707     }
01708 #endif
01709     ruby_xfree(stack);
01710 }
01711 
01712 #ifdef USE_THREAD_RECYCLE
01713 static rb_thread_t *
01714 thread_recycle_struct(void)
01715 {
01716     void *p = ALLOC_N(rb_thread_t, 1);
01717     memset(p, 0, sizeof(rb_thread_t));
01718     return p;
01719 }
01720 #endif
01721 
01722 void
01723 rb_thread_mark(void *ptr)
01724 {
01725     rb_thread_t *th = NULL;
01726     RUBY_MARK_ENTER("thread");
01727     if (ptr) {
01728         th = ptr;
01729         if (th->stack) {
01730             VALUE *p = th->stack;
01731             VALUE *sp = th->cfp->sp;
01732             rb_control_frame_t *cfp = th->cfp;
01733             rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
01734 
01735             while (p < sp) {
01736                 rb_gc_mark(*p++);
01737             }
01738             rb_gc_mark_locations(p, p + th->mark_stack_len);
01739 
01740             while (cfp != limit_cfp) {
01741                 rb_iseq_t *iseq = cfp->iseq;
01742                 rb_gc_mark(cfp->proc);
01743                 rb_gc_mark(cfp->self);
01744                 if (iseq) {
01745                     rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
01746                 }
01747                 if (cfp->me) ((rb_method_entry_t *)cfp->me)->mark = 1;
01748                 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01749             }
01750         }
01751 
01752         /* mark ruby objects */
01753         RUBY_MARK_UNLESS_NULL(th->first_proc);
01754         if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
01755 
01756         RUBY_MARK_UNLESS_NULL(th->thgroup);
01757         RUBY_MARK_UNLESS_NULL(th->value);
01758         RUBY_MARK_UNLESS_NULL(th->errinfo);
01759         RUBY_MARK_UNLESS_NULL(th->thrown_errinfo);
01760         RUBY_MARK_UNLESS_NULL(th->local_svar);
01761         RUBY_MARK_UNLESS_NULL(th->top_self);
01762         RUBY_MARK_UNLESS_NULL(th->top_wrapper);
01763         RUBY_MARK_UNLESS_NULL(th->fiber);
01764         RUBY_MARK_UNLESS_NULL(th->root_fiber);
01765         RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
01766         RUBY_MARK_UNLESS_NULL(th->last_status);
01767 
01768         RUBY_MARK_UNLESS_NULL(th->locking_mutex);
01769 
01770         rb_mark_tbl(th->local_storage);
01771 
01772         if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
01773             rb_gc_mark_machine_stack(th);
01774             rb_gc_mark_locations((VALUE *)&th->machine_regs,
01775                                  (VALUE *)(&th->machine_regs) +
01776                                  sizeof(th->machine_regs) / sizeof(VALUE));
01777         }
01778 
01779         mark_event_hooks(th->event_hooks);
01780     }
01781 
01782     RUBY_MARK_LEAVE("thread");
01783 }
01784 
01785 static void
01786 thread_free(void *ptr)
01787 {
01788     rb_thread_t *th;
01789     RUBY_FREE_ENTER("thread");
01790 
01791     if (ptr) {
01792         th = ptr;
01793 
01794         if (!th->root_fiber) {
01795             RUBY_FREE_UNLESS_NULL(th->stack);
01796         }
01797 
01798         if (th->locking_mutex != Qfalse) {
01799             rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
01800         }
01801         if (th->keeping_mutexes != NULL) {
01802             rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
01803         }
01804 
01805         if (th->local_storage) {
01806             st_free_table(th->local_storage);
01807         }
01808 
01809         if (th->vm && th->vm->main_thread == th) {
01810             RUBY_GC_INFO("main thread\n");
01811         }
01812         else {
01813 #ifdef USE_SIGALTSTACK
01814             if (th->altstack) {
01815                 free(th->altstack);
01816             }
01817 #endif
01818             ruby_xfree(ptr);
01819         }
01820         if (ruby_current_thread == th)
01821             ruby_current_thread = NULL;
01822     }
01823     RUBY_FREE_LEAVE("thread");
01824 }
01825 
01826 static size_t
01827 thread_memsize(const void *ptr)
01828 {
01829     if (ptr) {
01830         const rb_thread_t *th = ptr;
01831         size_t size = sizeof(rb_thread_t);
01832 
01833         if (!th->root_fiber) {
01834             size += th->stack_size * sizeof(VALUE);
01835         }
01836         if (th->local_storage) {
01837             size += st_memsize(th->local_storage);
01838         }
01839         return size;
01840     }
01841     else {
01842         return 0;
01843     }
01844 }
01845 
01846 #define thread_data_type ruby_threadptr_data_type
01847 const rb_data_type_t ruby_threadptr_data_type = {
01848     "VM/thread",
01849     {
01850         rb_thread_mark,
01851         thread_free,
01852         thread_memsize,
01853     },
01854 };
01855 
01856 VALUE
01857 rb_obj_is_thread(VALUE obj)
01858 {
01859     if (rb_typeddata_is_kind_of(obj, &thread_data_type)) {
01860         return Qtrue;
01861     }
01862     else {
01863         return Qfalse;
01864     }
01865 }
01866 
01867 static VALUE
01868 thread_alloc(VALUE klass)
01869 {
01870     VALUE volatile obj;
01871 #ifdef USE_THREAD_RECYCLE
01872     rb_thread_t *th = thread_recycle_struct();
01873     obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
01874 #else
01875     rb_thread_t *th;
01876     obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
01877 #endif
01878     return obj;
01879 }
01880 
01881 static void
01882 th_init(rb_thread_t *th, VALUE self)
01883 {
01884     th->self = self;
01885 
01886     /* allocate thread stack */
01887 #ifdef USE_SIGALTSTACK
01888     /* altstack of main thread is reallocated in another place */
01889     th->altstack = malloc(ALT_STACK_SIZE);
01890 #endif
01891     th->stack_size = RUBY_VM_THREAD_STACK_SIZE;
01892     th->stack = thread_recycle_stack(th->stack_size);
01893 
01894     th->cfp = (void *)(th->stack + th->stack_size);
01895 
01896     vm_push_frame(th, 0, VM_FRAME_MAGIC_TOP, Qnil, 0, 0,
01897                   th->stack, 0, 1);
01898 
01899     th->status = THREAD_RUNNABLE;
01900     th->errinfo = Qnil;
01901     th->last_status = Qnil;
01902     th->waiting_fd = -1;
01903 }
01904 
01905 static VALUE
01906 ruby_thread_init(VALUE self)
01907 {
01908     rb_thread_t *th;
01909     rb_vm_t *vm = GET_THREAD()->vm;
01910     GetThreadPtr(self, th);
01911 
01912     th_init(th, self);
01913     th->vm = vm;
01914 
01915     th->top_wrapper = 0;
01916     th->top_self = rb_vm_top_self();
01917     return self;
01918 }
01919 
01920 VALUE
01921 rb_thread_alloc(VALUE klass)
01922 {
01923     VALUE self = thread_alloc(klass);
01924     ruby_thread_init(self);
01925     return self;
01926 }
01927 
01928 static void
01929 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
01930                  rb_num_t is_singleton, NODE *cref)
01931 {
01932     VALUE klass = cref->nd_clss;
01933     int noex = (int)cref->nd_visi;
01934     rb_iseq_t *miseq;
01935     GetISeqPtr(iseqval, miseq);
01936 
01937     if (miseq->klass) {
01938         iseqval = rb_iseq_clone(iseqval, 0);
01939         RB_GC_GUARD(iseqval);
01940         GetISeqPtr(iseqval, miseq);
01941     }
01942 
01943     if (NIL_P(klass)) {
01944         rb_raise(rb_eTypeError, "no class/module to add method");
01945     }
01946 
01947     if (is_singleton) {
01948         if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
01949             rb_raise(rb_eTypeError,
01950                      "can't define singleton method \"%s\" for %s",
01951                      rb_id2name(id), rb_obj_classname(obj));
01952         }
01953 
01954         rb_check_frozen(obj);
01955         klass = rb_singleton_class(obj);
01956         noex = NOEX_PUBLIC;
01957     }
01958 
01959     /* dup */
01960     COPY_CREF(miseq->cref_stack, cref);
01961     miseq->cref_stack->nd_visi = NOEX_PUBLIC;
01962     miseq->klass = klass;
01963     miseq->defined_method_id = id;
01964     rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
01965 
01966     if (!is_singleton && noex == NOEX_MODFUNC) {
01967         rb_add_method(rb_singleton_class(klass), id, VM_METHOD_TYPE_ISEQ, miseq, NOEX_PUBLIC);
01968     }
01969     INC_VM_STATE_VERSION();
01970 }
01971 
01972 #define REWIND_CFP(expr) do { \
01973     rb_thread_t *th__ = GET_THREAD(); \
01974     th__->cfp++; expr; th__->cfp--; \
01975 } while (0)
01976 
01977 static VALUE
01978 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01979 {
01980     REWIND_CFP({
01981         vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
01982     });
01983     return Qnil;
01984 }
01985 
01986 static VALUE
01987 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01988 {
01989     REWIND_CFP({
01990         vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
01991     });
01992     return Qnil;
01993 }
01994 
01995 static VALUE
01996 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
01997 {
01998     REWIND_CFP({
01999         rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
02000     });
02001     return Qnil;
02002 }
02003 
02004 static VALUE
02005 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
02006 {
02007     REWIND_CFP({
02008         rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
02009     });
02010     return Qnil;
02011 }
02012 
02013 static VALUE
02014 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
02015 {
02016     REWIND_CFP({
02017         rb_undef(cbase, SYM2ID(sym));
02018         INC_VM_STATE_VERSION();
02019     });
02020     return Qnil;
02021 }
02022 
02023 static VALUE
02024 m_core_set_postexe(VALUE self, VALUE iseqval)
02025 {
02026     REWIND_CFP({
02027         rb_iseq_t *blockiseq;
02028         rb_block_t *blockptr;
02029         rb_thread_t *th = GET_THREAD();
02030         rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
02031         VALUE proc;
02032 
02033         if (cfp == 0) {
02034             rb_bug("m_core_set_postexe: unreachable");
02035         }
02036 
02037         GetISeqPtr(iseqval, blockiseq);
02038 
02039         blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
02040         blockptr->iseq = blockiseq;
02041         blockptr->proc = 0;
02042 
02043         proc = rb_vm_make_proc(th, blockptr, rb_cProc);
02044         rb_set_end_proc(rb_call_end_proc, proc);
02045     });
02046     return Qnil;
02047 }
02048 
02049 extern VALUE *rb_gc_stack_start;
02050 extern size_t rb_gc_stack_maxsize;
02051 #ifdef __ia64
02052 extern VALUE *rb_gc_register_stack_start;
02053 #endif
02054 
02055 /* debug functions */
02056 
02057 /* :nodoc: */
02058 static VALUE
02059 sdr(void)
02060 {
02061     rb_vm_bugreport();
02062     return Qnil;
02063 }
02064 
02065 /* :nodoc: */
02066 static VALUE
02067 nsdr(void)
02068 {
02069     VALUE ary = rb_ary_new();
02070 #if HAVE_BACKTRACE
02071 #include <execinfo.h>
02072 #define MAX_NATIVE_TRACE 1024
02073     static void *trace[MAX_NATIVE_TRACE];
02074     int n = backtrace(trace, MAX_NATIVE_TRACE);
02075     char **syms = backtrace_symbols(trace, n);
02076     int i;
02077 
02078     if (syms == 0) {
02079         rb_memerror();
02080     }
02081 
02082     for (i=0; i<n; i++) {
02083         rb_ary_push(ary, rb_str_new2(syms[i]));
02084     }
02085     free(syms); /* OK */
02086 #endif
02087     return ary;
02088 }
02089 
02090 void
02091 Init_VM(void)
02092 {
02093     VALUE opts;
02094     VALUE klass;
02095     VALUE fcore;
02096 
02097     /* ::VM */
02098     rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
02099     rb_undef_alloc_func(rb_cRubyVM);
02100     rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
02101 
02102     /* ::VM::FrozenCore */
02103     fcore = rb_class_new(rb_cBasicObject);
02104     RBASIC(fcore)->flags = T_ICLASS;
02105     klass = rb_singleton_class(fcore);
02106     rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
02107     rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
02108     rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
02109     rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
02110     rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
02111     rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 1);
02112     rb_define_method_id(klass, idProc, rb_block_proc, 0);
02113     rb_define_method_id(klass, idLambda, rb_block_lambda, 0);
02114     rb_obj_freeze(fcore);
02115     rb_gc_register_mark_object(fcore);
02116     rb_mRubyVMFrozenCore = fcore;
02117 
02118     /* ::VM::Env */
02119     rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
02120     rb_undef_alloc_func(rb_cEnv);
02121     rb_undef_method(CLASS_OF(rb_cEnv), "new");
02122 
02123     /* ::Thread */
02124     rb_cThread = rb_define_class("Thread", rb_cObject);
02125     rb_undef_alloc_func(rb_cThread);
02126 
02127     /* ::VM::USAGE_ANALYSIS_* */
02128     rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
02129     rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
02130     rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
02131     rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
02132 
02133 #if   OPT_DIRECT_THREADED_CODE
02134     rb_ary_push(opts, rb_str_new2("direct threaded code"));
02135 #elif OPT_TOKEN_THREADED_CODE
02136     rb_ary_push(opts, rb_str_new2("token threaded code"));
02137 #elif OPT_CALL_THREADED_CODE
02138     rb_ary_push(opts, rb_str_new2("call threaded code"));
02139 #endif
02140 
02141 #if OPT_STACK_CACHING
02142     rb_ary_push(opts, rb_str_new2("stack caching"));
02143 #endif
02144 #if OPT_OPERANDS_UNIFICATION
02145     rb_ary_push(opts, rb_str_new2("operands unification]"));
02146 #endif
02147 #if OPT_INSTRUCTIONS_UNIFICATION
02148     rb_ary_push(opts, rb_str_new2("instructions unification"));
02149 #endif
02150 #if OPT_INLINE_METHOD_CACHE
02151     rb_ary_push(opts, rb_str_new2("inline method cache"));
02152 #endif
02153 #if OPT_BLOCKINLINING
02154     rb_ary_push(opts, rb_str_new2("block inlining"));
02155 #endif
02156 
02157     /* ::VM::InsnNameArray */
02158     rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
02159 
02160     /* debug functions ::VM::SDR(), ::VM::NSDR() */
02161 #if VMDEBUG
02162     rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
02163     rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
02164 #else
02165     (void)sdr;
02166     (void)nsdr;
02167 #endif
02168 
02169     /* VM bootstrap: phase 2 */
02170     {
02171         rb_vm_t *vm = ruby_current_vm;
02172         rb_thread_t *th = GET_THREAD();
02173         VALUE filename = rb_str_new2("<main>");
02174         volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
02175         volatile VALUE th_self;
02176         rb_iseq_t *iseq;
02177 
02178         /* create vm object */
02179         vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
02180 
02181         /* create main thread */
02182         th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
02183         vm->main_thread = th;
02184         vm->running_thread = th;
02185         th->vm = vm;
02186         th->top_wrapper = 0;
02187         th->top_self = rb_vm_top_self();
02188         rb_thread_set_current(th);
02189 
02190         vm->living_threads = st_init_numtable();
02191         st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
02192 
02193         rb_gc_register_mark_object(iseqval);
02194         GetISeqPtr(iseqval, iseq);
02195         th->cfp->iseq = iseq;
02196         th->cfp->pc = iseq->iseq_encoded;
02197         th->cfp->self = th->top_self;
02198 
02199         /*
02200          * The Binding of the top level scope
02201          */
02202         rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
02203     }
02204     vm_init_redefined_flag();
02205 }
02206 
02207 void
02208 rb_vm_set_progname(VALUE filename)
02209 {
02210     rb_thread_t *th = GET_VM()->main_thread;
02211     rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
02212     --cfp;
02213     cfp->iseq->filename = filename;
02214 }
02215 
02216 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02217 struct rb_objspace *rb_objspace_alloc(void);
02218 #endif
02219 
02220 void
02221 Init_BareVM(void)
02222 {
02223     /* VM bootstrap: phase 1 */
02224     rb_vm_t * vm = malloc(sizeof(*vm));
02225     rb_thread_t * th = malloc(sizeof(*th));
02226     if (!vm || !th) {
02227         fprintf(stderr, "[FATAL] failed to allocate memory\n");
02228         exit(EXIT_FAILURE);
02229     }
02230     MEMZERO(th, rb_thread_t, 1);
02231 
02232     rb_thread_set_current_raw(th);
02233 
02234     vm_init2(vm);
02235 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02236     vm->objspace = rb_objspace_alloc();
02237 #endif
02238     ruby_current_vm = vm;
02239 
02240     Init_native_thread();
02241     th_init(th, 0);
02242     th->vm = vm;
02243     ruby_thread_init_stack(th);
02244 }
02245 
02246 /* top self */
02247 
02248 static VALUE
02249 main_to_s(VALUE obj)
02250 {
02251     return rb_str_new2("main");
02252 }
02253 
02254 VALUE
02255 rb_vm_top_self(void)
02256 {
02257     return GET_VM()->top_self;
02258 }
02259 
02260 void
02261 Init_top_self(void)
02262 {
02263     rb_vm_t *vm = GET_VM();
02264 
02265     vm->top_self = rb_obj_alloc(rb_cObject);
02266     rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
02267 
02268     /* initialize mark object array */
02269     vm->mark_object_ary = rb_ary_tmp_new(1);
02270 }
02271 
02272 VALUE *
02273 ruby_vm_verbose_ptr(rb_vm_t *vm)
02274 {
02275     return &vm->verbose;
02276 }
02277 
02278 VALUE *
02279 ruby_vm_debug_ptr(rb_vm_t *vm)
02280 {
02281     return &vm->debug;
02282 }
02283 
02284 VALUE *
02285 rb_ruby_verbose_ptr(void)
02286 {
02287     return ruby_vm_verbose_ptr(GET_VM());
02288 }
02289 
02290 VALUE *
02291 rb_ruby_debug_ptr(void)
02292 {
02293     return ruby_vm_debug_ptr(GET_VM());
02294 }
02295