GC heap size less patch

authorNarie$B$G$9!#e(B

e$B$3$NA0$KOC$7$K$“$C$?!”%R!<%W$N:YJ,2=$N%Q%C%A$r:n$j$^$7$?!#e(B

e$B>:Y$O$3$A$i$K=q$$$F$*$j$^$9!#e(B
http://d.hatena.ne.jp/authorNari/20080328/1206675485

e$B$b$C$H%Y%s%A%^!<%/MQ$N%3!<%I$r:n$C$F@:::$9$kI,MW$,$“$j$^$9$,e(B
pree$BHG$H$$$&;v$G!”$h$m$7$/$*4j$$$7$^$9!#e(B


id: authorNari


Index: gc.c

— gc.c (e$B%j%S%8%g%se(B 15844)
+++ gc.c (e$B:n6H%3%T!<e(B)
@@ -143,7 +143,6 @@
static RVALUE *freelist = 0;
static RVALUE *deferred_final_list = 0;

-#define HEAPS_INCREMENT 10
static struct heaps_slot {
void *membase;
RVALUE *slot;
@@ -153,9 +152,27 @@
static int heaps_used = 0;

#define HEAP_MIN_SLOTS 10000
-static int heap_slots = HEAP_MIN_SLOTS;
+static int add_objects = HEAP_MIN_SLOTS;

+/* tiny heap size */
+// 32KB
+#define HEAP_SIZE 0x8000
+// 128KB
+//#define HEAP_SIZE 0x20000
+// 64KB
+//#define HEAP_SIZE 0x10000
+// 16KB
+//#define HEAP_SIZE 0x4000
+// 8KB
+//#define HEAP_SIZE 0x2000
+// 4KB
+//#define HEAP_SIZE 0x1000
+// 2KB
+//#define HEAP_SIZE 0x800
+
+#define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE))
#define FREE_MIN 4096
+static int add_inc_time = 0;

static RVALUE *himem, *lomem;

@@ -179,8 +196,6 @@
size_t rb_gc_stack_maxsize = 655300*sizeof(VALUE);
#endif

static void run_final(VALUE obj);
static int garbage_collect(void);

@@ -417,43 +432,39 @@
}
}

static void
-add_heap(void)
+allocate_heaps(void)
{

  • struct heaps_slot *p;
  • int length;
  • heaps_length += add_objects / HEAP_OBJ_LIMIT;
  • length = heaps_length*sizeof(struct heaps_slot);
  • RUBY_CRITICAL(
  •  if (heaps_used > 0) {
    
  •      p = (struct heaps_slot *)realloc(heaps, length);
    
  •      if (p) heaps = p;
    
  •  }
    
  •  else {
    
  •      p = heaps = (struct heaps_slot *)malloc(length);
    
  •  }
    
  •  );
    
  • if (p == 0) rb_memerror();
    +}

+static void
+assign_heap_slot(void)
+{
RVALUE *p, *pend, *membase;
long hi, lo, mid;

  • int objs;
  • objs = HEAP_OBJ_LIMIT;
  • RUBY_CRITICAL(p = (RVALUE*)malloc(HEAP_SIZE));
  • if (p == 0)
  • rb_memerror();
  • if (heaps_used == heaps_length) {

  • /* Realloc heaps */

  • struct heaps_slot *p;

  • int length;

  • heaps_length += HEAPS_INCREMENT;

  • length = heaps_length*sizeof(struct heaps_slot);

  • RUBY_CRITICAL(

  •  if (heaps_used > 0) {
    
  • p = (struct heaps_slot *)realloc(heaps, length);

  • if (p) heaps = p;

  •  }
    
  •  else {
    
  • p = heaps = (struct heaps_slot *)malloc(length);

  •  });
    
  • if (p == 0) rb_memerror();

  • }

  • for (;:wink: {

  • RUBY_CRITICAL(p = (RVALUE*)malloc(sizeof(RVALUE)*(heap_slots+1)));

  • if (p == 0) {

  •  if (heap_slots == HEAP_MIN_SLOTS) {
    
  • rb_memerror();

  •  }
    
  •  heap_slots = HEAP_MIN_SLOTS;
    
  • }

  • else {

  •  break;
    
  • }

  • }

  • lo = 0;
    hi = heaps_used;
    while (lo < hi) {
    @@ -471,21 +482,25 @@
    }

    membase = p;

  • if ((VALUE)p % sizeof(RVALUE) == 0)

  • heap_slots += 1;

  • else

  • if ((VALUE)p % sizeof(RVALUE) != 0) {
    p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p %
    sizeof(RVALUE)));
  • if ((membase + HEAP_SIZE) < (p + HEAP_SIZE)) {
  •  objs--;
    
  • }
  • }
  • if (hi < heaps_used) {
    MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used -
    hi);
    }
    heaps[hi].membase = membase;
    heaps[hi].slot = p;
  • heaps[hi].limit = heap_slots;
  • pend = p + heap_slots;
  • heaps[hi].limit = objs;
  • pend = p + objs;
    if (lomem == 0 || lomem > p) lomem = p;
    if (himem < pend) himem = pend;
    heaps_used++;
  • heap_slots *= 1.8;

    while (p < pend) {
    p->as.free.flags = 0;
    @@ -495,15 +510,56 @@
    }
    }

+static void
+add_heap(void)
+{

  • int add, i;
  • add = add_objects / HEAP_OBJ_LIMIT;
  • add_objects *= 1.8;
  • if ((heaps_used + add) > heaps_length) {
  •  allocate_heaps();
    
  • }
  • for (i = 0; i < add; i++) {
  •  assign_heap_slot();
    
  • }
  • add_inc_time = 0;
    +}

+static void
+set_add_inc_time(void)
+{

  • add_inc_time += add_objects / HEAP_OBJ_LIMIT;
  • add_objects *= 1.8;
  • if ((heaps_used + add_inc_time) > heaps_length) {
  • allocate_heaps();
  • }
    +}

+static int
+add_heap_increment(void)
+{

  • if (add_inc_time > 0) {
  • assign_heap_slot();
  • add_inc_time–;
  • return Qtrue;
  • }
  • return Qfalse;
    +}

#define RANY(o) ((RVALUE*)(o))

static VALUE
rb_newobj_from_heap(void)
{
VALUE obj;

  • if (ruby_gc_stress || !freelist) {
  • if(!garbage_collect()) {
  •  if (!add_heap_increment() && !garbage_collect()) {
     rb_memerror();
    
    }
    }
    @@ -516,6 +572,7 @@
    RANY(obj)->file = rb_sourcefile();
    RANY(obj)->line = rb_sourceline();
    #endif
  • return obj;
    }

@@ -1202,6 +1259,9 @@
j++;
}
}

  • if (i != j) {
  • add_objects = heaps_used * HEAP_OBJ_LIMIT;
  • }
    }

void rb_gc_abort_threads(void);
@@ -1212,15 +1272,14 @@
RVALUE *p, *pend, *final_list;
int freed = 0;
int i;

  • unsigned long live = 0;
  • unsigned long free_min = 0;
  • unsigned long live = 0, free_min = 0, do_heap_free = 0;
  • for (i = 0; i < heaps_used; i++) {
  •    free_min += heaps[i].limit;
    
  • do_heap_free = (heaps_used * HEAP_OBJ_LIMIT) * 0.65;
  • free_min = (heaps_used * HEAP_OBJ_LIMIT) * 0.2;
  • if (free_min < FREE_MIN) {
  • do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
  •    free_min = FREE_MIN;
    
    }
  • free_min = free_min * 0.2;

  • if (free_min < FREE_MIN)

  •    free_min = FREE_MIN;
    

    if (source_filenames) {
    st_foreach(source_filenames, sweep_source_filename, 0);
    @@ -1263,7 +1322,7 @@
    }
    p++;
    }

  • if (n == heaps[i].limit && freed > free_min) {

  • if (n == heaps[i].limit && freed > do_heap_free) {
    RVALUE *pp;

    heaps[i].limit = 0;
    

@@ -1282,7 +1341,8 @@
}
malloc_increase = 0;
if (freed < free_min) {

  • add_heap();
  •  set_add_inc_time();
    
  • add_heap_increment();
    }
    during_gc = 0;

@@ -1572,6 +1632,7 @@
}

 gc_sweep();
  • if (GC_NOTIFY) printf(“end garbage_collect()\n”);
    return Qtrue;
    }