jbarnette-johnson 1.0.0.200806240111 → 1.0.0.200807291507
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/MANIFEST +1 -0
- data/Rakefile +3 -10
- data/bin/johnson +2 -1
- data/ext/spidermonkey/context.c +3 -4
- data/ext/spidermonkey/context.h +1 -1
- data/ext/spidermonkey/conversions.c +39 -33
- data/ext/spidermonkey/debugger.c +5 -5
- data/ext/spidermonkey/immutable_node.c.erb +11 -11
- data/ext/spidermonkey/jroot.h +4 -4
- data/ext/spidermonkey/js_land_proxy.c +9 -8
- data/ext/spidermonkey/ruby_land_proxy.c +5 -4
- data/ext/spidermonkey/runtime.c +1 -1
- data/johnson.gemspec +36 -0
- data/lib/hoe.rb +0 -7
- data/lib/johnson/cli/options.rb +10 -4
- data/lib/johnson/spidermonkey/runtime.rb +2 -2
- data/lib/johnson/version.rb +4 -2
- data/lib/johnson.rb +1 -0
- data/test/johnson/runtime_test.rb +11 -0
- data/test/johnson/spidermonkey/ruby_land_proxy_test.rb +6 -0
- data/vendor/spidermonkey/.cvsignore +9 -0
- data/vendor/spidermonkey/Makefile.in +462 -0
- data/vendor/spidermonkey/Makefile.ref +364 -0
- data/vendor/spidermonkey/README.html +820 -0
- data/vendor/spidermonkey/SpiderMonkey.rsp +12 -0
- data/vendor/spidermonkey/Y.js +19 -0
- data/vendor/spidermonkey/build.mk +43 -0
- data/vendor/spidermonkey/config/AIX4.1.mk +65 -0
- data/vendor/spidermonkey/config/AIX4.2.mk +64 -0
- data/vendor/spidermonkey/config/AIX4.3.mk +65 -0
- data/vendor/spidermonkey/config/Darwin.mk +83 -0
- data/vendor/spidermonkey/config/Darwin1.3.mk +81 -0
- data/vendor/spidermonkey/config/Darwin1.4.mk +41 -0
- data/vendor/spidermonkey/config/Darwin5.2.mk +81 -0
- data/vendor/spidermonkey/config/Darwin5.3.mk +81 -0
- data/vendor/spidermonkey/config/HP-UXB.10.10.mk +77 -0
- data/vendor/spidermonkey/config/HP-UXB.10.20.mk +77 -0
- data/vendor/spidermonkey/config/HP-UXB.11.00.mk +80 -0
- data/vendor/spidermonkey/config/IRIX.mk +87 -0
- data/vendor/spidermonkey/config/IRIX5.3.mk +44 -0
- data/vendor/spidermonkey/config/IRIX6.1.mk +44 -0
- data/vendor/spidermonkey/config/IRIX6.2.mk +44 -0
- data/vendor/spidermonkey/config/IRIX6.3.mk +44 -0
- data/vendor/spidermonkey/config/IRIX6.5.mk +44 -0
- data/vendor/spidermonkey/config/Linux_All.mk +103 -0
- data/vendor/spidermonkey/config/Mac_OS10.0.mk +82 -0
- data/vendor/spidermonkey/config/OSF1V4.0.mk +72 -0
- data/vendor/spidermonkey/config/OSF1V5.0.mk +69 -0
- data/vendor/spidermonkey/config/SunOS4.1.4.mk +101 -0
- data/vendor/spidermonkey/config/SunOS5.10.mk +50 -0
- data/vendor/spidermonkey/config/SunOS5.3.mk +91 -0
- data/vendor/spidermonkey/config/SunOS5.4.mk +92 -0
- data/vendor/spidermonkey/config/SunOS5.5.1.mk +44 -0
- data/vendor/spidermonkey/config/SunOS5.5.mk +87 -0
- data/vendor/spidermonkey/config/SunOS5.6.mk +89 -0
- data/vendor/spidermonkey/config/SunOS5.7.mk +44 -0
- data/vendor/spidermonkey/config/SunOS5.8.mk +44 -0
- data/vendor/spidermonkey/config/SunOS5.9.mk +44 -0
- data/vendor/spidermonkey/config/WINNT4.0.mk +117 -0
- data/vendor/spidermonkey/config/WINNT5.0.mk +117 -0
- data/vendor/spidermonkey/config/WINNT5.1.mk +117 -0
- data/vendor/spidermonkey/config/WINNT5.2.mk +117 -0
- data/vendor/spidermonkey/config/WINNT6.0.mk +117 -0
- data/vendor/spidermonkey/config/dgux.mk +64 -0
- data/vendor/spidermonkey/config.mk +192 -0
- data/vendor/spidermonkey/editline/Makefile.ref +144 -0
- data/vendor/spidermonkey/editline/README +83 -0
- data/vendor/spidermonkey/editline/editline.3 +175 -0
- data/vendor/spidermonkey/editline/editline.c +1369 -0
- data/vendor/spidermonkey/editline/editline.h +135 -0
- data/vendor/spidermonkey/editline/sysunix.c +182 -0
- data/vendor/spidermonkey/editline/unix.h +82 -0
- data/vendor/spidermonkey/fdlibm/.cvsignore +7 -0
- data/vendor/spidermonkey/fdlibm/Makefile.in +127 -0
- data/vendor/spidermonkey/fdlibm/Makefile.ref +192 -0
- data/vendor/spidermonkey/fdlibm/e_acos.c +147 -0
- data/vendor/spidermonkey/fdlibm/e_acosh.c +105 -0
- data/vendor/spidermonkey/fdlibm/e_asin.c +156 -0
- data/vendor/spidermonkey/fdlibm/e_atan2.c +165 -0
- data/vendor/spidermonkey/fdlibm/e_atanh.c +110 -0
- data/vendor/spidermonkey/fdlibm/e_cosh.c +133 -0
- data/vendor/spidermonkey/fdlibm/e_exp.c +202 -0
- data/vendor/spidermonkey/fdlibm/e_fmod.c +184 -0
- data/vendor/spidermonkey/fdlibm/e_gamma.c +71 -0
- data/vendor/spidermonkey/fdlibm/e_gamma_r.c +70 -0
- data/vendor/spidermonkey/fdlibm/e_hypot.c +173 -0
- data/vendor/spidermonkey/fdlibm/e_j0.c +524 -0
- data/vendor/spidermonkey/fdlibm/e_j1.c +523 -0
- data/vendor/spidermonkey/fdlibm/e_jn.c +315 -0
- data/vendor/spidermonkey/fdlibm/e_lgamma.c +71 -0
- data/vendor/spidermonkey/fdlibm/e_lgamma_r.c +347 -0
- data/vendor/spidermonkey/fdlibm/e_log.c +184 -0
- data/vendor/spidermonkey/fdlibm/e_log10.c +134 -0
- data/vendor/spidermonkey/fdlibm/e_pow.c +386 -0
- data/vendor/spidermonkey/fdlibm/e_rem_pio2.c +222 -0
- data/vendor/spidermonkey/fdlibm/e_remainder.c +120 -0
- data/vendor/spidermonkey/fdlibm/e_scalb.c +89 -0
- data/vendor/spidermonkey/fdlibm/e_sinh.c +122 -0
- data/vendor/spidermonkey/fdlibm/e_sqrt.c +497 -0
- data/vendor/spidermonkey/fdlibm/fdlibm.h +273 -0
- data/vendor/spidermonkey/fdlibm/fdlibm.mak +1453 -0
- data/vendor/spidermonkey/fdlibm/fdlibm.mdp +0 -0
- data/vendor/spidermonkey/fdlibm/k_cos.c +135 -0
- data/vendor/spidermonkey/fdlibm/k_rem_pio2.c +354 -0
- data/vendor/spidermonkey/fdlibm/k_sin.c +114 -0
- data/vendor/spidermonkey/fdlibm/k_standard.c +785 -0
- data/vendor/spidermonkey/fdlibm/k_tan.c +170 -0
- data/vendor/spidermonkey/fdlibm/s_asinh.c +101 -0
- data/vendor/spidermonkey/fdlibm/s_atan.c +175 -0
- data/vendor/spidermonkey/fdlibm/s_cbrt.c +133 -0
- data/vendor/spidermonkey/fdlibm/s_ceil.c +120 -0
- data/vendor/spidermonkey/fdlibm/s_copysign.c +72 -0
- data/vendor/spidermonkey/fdlibm/s_cos.c +118 -0
- data/vendor/spidermonkey/fdlibm/s_erf.c +356 -0
- data/vendor/spidermonkey/fdlibm/s_expm1.c +267 -0
- data/vendor/spidermonkey/fdlibm/s_fabs.c +70 -0
- data/vendor/spidermonkey/fdlibm/s_finite.c +71 -0
- data/vendor/spidermonkey/fdlibm/s_floor.c +121 -0
- data/vendor/spidermonkey/fdlibm/s_frexp.c +99 -0
- data/vendor/spidermonkey/fdlibm/s_ilogb.c +85 -0
- data/vendor/spidermonkey/fdlibm/s_isnan.c +74 -0
- data/vendor/spidermonkey/fdlibm/s_ldexp.c +66 -0
- data/vendor/spidermonkey/fdlibm/s_lib_version.c +73 -0
- data/vendor/spidermonkey/fdlibm/s_log1p.c +211 -0
- data/vendor/spidermonkey/fdlibm/s_logb.c +79 -0
- data/vendor/spidermonkey/fdlibm/s_matherr.c +64 -0
- data/vendor/spidermonkey/fdlibm/s_modf.c +132 -0
- data/vendor/spidermonkey/fdlibm/s_nextafter.c +124 -0
- data/vendor/spidermonkey/fdlibm/s_rint.c +131 -0
- data/vendor/spidermonkey/fdlibm/s_scalbn.c +107 -0
- data/vendor/spidermonkey/fdlibm/s_signgam.c +40 -0
- data/vendor/spidermonkey/fdlibm/s_significand.c +68 -0
- data/vendor/spidermonkey/fdlibm/s_sin.c +118 -0
- data/vendor/spidermonkey/fdlibm/s_tan.c +112 -0
- data/vendor/spidermonkey/fdlibm/s_tanh.c +122 -0
- data/vendor/spidermonkey/fdlibm/w_acos.c +78 -0
- data/vendor/spidermonkey/fdlibm/w_acosh.c +78 -0
- data/vendor/spidermonkey/fdlibm/w_asin.c +80 -0
- data/vendor/spidermonkey/fdlibm/w_atan2.c +79 -0
- data/vendor/spidermonkey/fdlibm/w_atanh.c +81 -0
- data/vendor/spidermonkey/fdlibm/w_cosh.c +77 -0
- data/vendor/spidermonkey/fdlibm/w_exp.c +88 -0
- data/vendor/spidermonkey/fdlibm/w_fmod.c +78 -0
- data/vendor/spidermonkey/fdlibm/w_gamma.c +85 -0
- data/vendor/spidermonkey/fdlibm/w_gamma_r.c +81 -0
- data/vendor/spidermonkey/fdlibm/w_hypot.c +78 -0
- data/vendor/spidermonkey/fdlibm/w_j0.c +105 -0
- data/vendor/spidermonkey/fdlibm/w_j1.c +106 -0
- data/vendor/spidermonkey/fdlibm/w_jn.c +128 -0
- data/vendor/spidermonkey/fdlibm/w_lgamma.c +85 -0
- data/vendor/spidermonkey/fdlibm/w_lgamma_r.c +81 -0
- data/vendor/spidermonkey/fdlibm/w_log.c +78 -0
- data/vendor/spidermonkey/fdlibm/w_log10.c +81 -0
- data/vendor/spidermonkey/fdlibm/w_pow.c +99 -0
- data/vendor/spidermonkey/fdlibm/w_remainder.c +77 -0
- data/vendor/spidermonkey/fdlibm/w_scalb.c +95 -0
- data/vendor/spidermonkey/fdlibm/w_sinh.c +77 -0
- data/vendor/spidermonkey/fdlibm/w_sqrt.c +77 -0
- data/vendor/spidermonkey/javascript-trace.d +73 -0
- data/vendor/spidermonkey/js.c +3951 -0
- data/vendor/spidermonkey/js.mak +4438 -0
- data/vendor/spidermonkey/js.mdp +0 -0
- data/vendor/spidermonkey/js.msg +307 -0
- data/vendor/spidermonkey/js.pkg +2 -0
- data/vendor/spidermonkey/js3240.rc +79 -0
- data/vendor/spidermonkey/jsOS240.def +654 -0
- data/vendor/spidermonkey/jsapi.c +5836 -0
- data/vendor/spidermonkey/jsapi.h +2624 -0
- data/vendor/spidermonkey/jsarena.c +450 -0
- data/vendor/spidermonkey/jsarena.h +318 -0
- data/vendor/spidermonkey/jsarray.c +2988 -0
- data/vendor/spidermonkey/jsarray.h +124 -0
- data/vendor/spidermonkey/jsatom.c +1045 -0
- data/vendor/spidermonkey/jsatom.h +442 -0
- data/vendor/spidermonkey/jsbit.h +253 -0
- data/vendor/spidermonkey/jsbool.c +176 -0
- data/vendor/spidermonkey/jsbool.h +73 -0
- data/vendor/spidermonkey/jsclist.h +139 -0
- data/vendor/spidermonkey/jscntxt.c +1348 -0
- data/vendor/spidermonkey/jscntxt.h +1120 -0
- data/vendor/spidermonkey/jscompat.h +57 -0
- data/vendor/spidermonkey/jsconfig.h +248 -0
- data/vendor/spidermonkey/jsconfig.mk +181 -0
- data/vendor/spidermonkey/jscpucfg.c +383 -0
- data/vendor/spidermonkey/jscpucfg.h +212 -0
- data/vendor/spidermonkey/jsdate.c +2398 -0
- data/vendor/spidermonkey/jsdate.h +124 -0
- data/vendor/spidermonkey/jsdbgapi.c +1799 -0
- data/vendor/spidermonkey/jsdbgapi.h +464 -0
- data/vendor/spidermonkey/jsdhash.c +868 -0
- data/vendor/spidermonkey/jsdhash.h +592 -0
- data/vendor/spidermonkey/jsdtoa.c +3167 -0
- data/vendor/spidermonkey/jsdtoa.h +130 -0
- data/vendor/spidermonkey/jsdtracef.c +317 -0
- data/vendor/spidermonkey/jsdtracef.h +77 -0
- data/vendor/spidermonkey/jsemit.c +6909 -0
- data/vendor/spidermonkey/jsemit.h +741 -0
- data/vendor/spidermonkey/jsexn.c +1371 -0
- data/vendor/spidermonkey/jsexn.h +96 -0
- data/vendor/spidermonkey/jsfile.c +2736 -0
- data/vendor/spidermonkey/jsfile.h +56 -0
- data/vendor/spidermonkey/jsfile.msg +90 -0
- data/vendor/spidermonkey/jsfun.c +2634 -0
- data/vendor/spidermonkey/jsfun.h +254 -0
- data/vendor/spidermonkey/jsgc.c +3554 -0
- data/vendor/spidermonkey/jsgc.h +403 -0
- data/vendor/spidermonkey/jshash.c +476 -0
- data/vendor/spidermonkey/jshash.h +151 -0
- data/vendor/spidermonkey/jsify.pl +485 -0
- data/vendor/spidermonkey/jsinterp.c +6981 -0
- data/vendor/spidermonkey/jsinterp.h +521 -0
- data/vendor/spidermonkey/jsinvoke.c +43 -0
- data/vendor/spidermonkey/jsiter.c +1067 -0
- data/vendor/spidermonkey/jsiter.h +122 -0
- data/vendor/spidermonkey/jskeyword.tbl +124 -0
- data/vendor/spidermonkey/jskwgen.c +460 -0
- data/vendor/spidermonkey/jslibmath.h +266 -0
- data/vendor/spidermonkey/jslock.c +1309 -0
- data/vendor/spidermonkey/jslock.h +313 -0
- data/vendor/spidermonkey/jslocko.asm +60 -0
- data/vendor/spidermonkey/jslog2.c +94 -0
- data/vendor/spidermonkey/jslong.c +264 -0
- data/vendor/spidermonkey/jslong.h +412 -0
- data/vendor/spidermonkey/jsmath.c +568 -0
- data/vendor/spidermonkey/jsmath.h +57 -0
- data/vendor/spidermonkey/jsnum.c +1228 -0
- data/vendor/spidermonkey/jsnum.h +283 -0
- data/vendor/spidermonkey/jsobj.c +5266 -0
- data/vendor/spidermonkey/jsobj.h +709 -0
- data/vendor/spidermonkey/jsopcode.c +5245 -0
- data/vendor/spidermonkey/jsopcode.h +394 -0
- data/vendor/spidermonkey/jsopcode.tbl +523 -0
- data/vendor/spidermonkey/jsotypes.h +202 -0
- data/vendor/spidermonkey/jsparse.c +6680 -0
- data/vendor/spidermonkey/jsparse.h +511 -0
- data/vendor/spidermonkey/jsprf.c +1262 -0
- data/vendor/spidermonkey/jsprf.h +150 -0
- data/vendor/spidermonkey/jsproto.tbl +128 -0
- data/vendor/spidermonkey/jsprvtd.h +267 -0
- data/vendor/spidermonkey/jspubtd.h +744 -0
- data/vendor/spidermonkey/jsregexp.c +4352 -0
- data/vendor/spidermonkey/jsregexp.h +183 -0
- data/vendor/spidermonkey/jsreops.tbl +145 -0
- data/vendor/spidermonkey/jsscan.c +2003 -0
- data/vendor/spidermonkey/jsscan.h +387 -0
- data/vendor/spidermonkey/jsscope.c +1948 -0
- data/vendor/spidermonkey/jsscope.h +418 -0
- data/vendor/spidermonkey/jsscript.c +1832 -0
- data/vendor/spidermonkey/jsscript.h +287 -0
- data/vendor/spidermonkey/jsshell.msg +50 -0
- data/vendor/spidermonkey/jsstddef.h +83 -0
- data/vendor/spidermonkey/jsstr.c +5004 -0
- data/vendor/spidermonkey/jsstr.h +641 -0
- data/vendor/spidermonkey/jstypes.h +475 -0
- data/vendor/spidermonkey/jsutil.c +345 -0
- data/vendor/spidermonkey/jsutil.h +157 -0
- data/vendor/spidermonkey/jsxdrapi.c +800 -0
- data/vendor/spidermonkey/jsxdrapi.h +218 -0
- data/vendor/spidermonkey/jsxml.c +8471 -0
- data/vendor/spidermonkey/jsxml.h +349 -0
- data/vendor/spidermonkey/lock_SunOS.s +119 -0
- data/vendor/spidermonkey/perfect.js +39 -0
- data/vendor/spidermonkey/plify_jsdhash.sed +36 -0
- data/vendor/spidermonkey/prmjtime.c +846 -0
- data/vendor/spidermonkey/prmjtime.h +103 -0
- data/vendor/spidermonkey/resource.h +15 -0
- data/vendor/spidermonkey/rules.mk +197 -0
- data/vendor/spidermonkey/win32.order +384 -0
- metadata +4 -3
@@ -0,0 +1,254 @@
|
|
1
|
+
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
2
|
+
*
|
3
|
+
* ***** BEGIN LICENSE BLOCK *****
|
4
|
+
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
5
|
+
*
|
6
|
+
* The contents of this file are subject to the Mozilla Public License Version
|
7
|
+
* 1.1 (the "License"); you may not use this file except in compliance with
|
8
|
+
* the License. You may obtain a copy of the License at
|
9
|
+
* http://www.mozilla.org/MPL/
|
10
|
+
*
|
11
|
+
* Software distributed under the License is distributed on an "AS IS" basis,
|
12
|
+
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
13
|
+
* for the specific language governing rights and limitations under the
|
14
|
+
* License.
|
15
|
+
*
|
16
|
+
* The Original Code is Mozilla Communicator client code, released
|
17
|
+
* March 31, 1998.
|
18
|
+
*
|
19
|
+
* The Initial Developer of the Original Code is
|
20
|
+
* Netscape Communications Corporation.
|
21
|
+
* Portions created by the Initial Developer are Copyright (C) 1998
|
22
|
+
* the Initial Developer. All Rights Reserved.
|
23
|
+
*
|
24
|
+
* Contributor(s):
|
25
|
+
*
|
26
|
+
* Alternatively, the contents of this file may be used under the terms of
|
27
|
+
* either of the GNU General Public License Version 2 or later (the "GPL"),
|
28
|
+
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
29
|
+
* in which case the provisions of the GPL or the LGPL are applicable instead
|
30
|
+
* of those above. If you wish to allow use of your version of this file only
|
31
|
+
* under the terms of either the GPL or the LGPL, and not to allow others to
|
32
|
+
* use your version of this file under the terms of the MPL, indicate your
|
33
|
+
* decision by deleting the provisions above and replace them with the notice
|
34
|
+
* and other provisions required by the GPL or the LGPL. If you do not delete
|
35
|
+
* the provisions above, a recipient may use your version of this file under
|
36
|
+
* the terms of any one of the MPL, the GPL or the LGPL.
|
37
|
+
*
|
38
|
+
* ***** END LICENSE BLOCK ***** */
|
39
|
+
|
40
|
+
#ifndef jsfun_h___
|
41
|
+
#define jsfun_h___
|
42
|
+
/*
|
43
|
+
* JS function definitions.
|
44
|
+
*/
|
45
|
+
#include "jsprvtd.h"
|
46
|
+
#include "jspubtd.h"
|
47
|
+
#include "jsobj.h"
|
48
|
+
|
49
|
+
JS_BEGIN_EXTERN_C
|
50
|
+
|
51
|
+
typedef struct JSLocalNameMap JSLocalNameMap;
|
52
|
+
|
53
|
+
/*
|
54
|
+
* Depending on the number of arguments and variables in the function their
|
55
|
+
* names and attributes are stored either as a single atom or as an array of
|
56
|
+
* tagged atoms (when there are few locals) or as a hash-based map (when there
|
57
|
+
* are many locals). In the first 2 cases the lowest bit of the atom is used
|
58
|
+
* as a tag to distinguish const from var. See jsfun.c for details.
|
59
|
+
*/
|
60
|
+
typedef union JSLocalNames {
|
61
|
+
jsuword taggedAtom;
|
62
|
+
jsuword *array;
|
63
|
+
JSLocalNameMap *map;
|
64
|
+
} JSLocalNames;
|
65
|
+
|
66
|
+
struct JSFunction {
|
67
|
+
JSObject object; /* GC'ed object header */
|
68
|
+
uint16 nargs; /* maximum number of specified arguments,
|
69
|
+
reflected as f.length/f.arity */
|
70
|
+
uint16 flags; /* bound method and other flags, see jsapi.h */
|
71
|
+
union {
|
72
|
+
struct {
|
73
|
+
uint16 extra; /* number of arg slots for local GC roots */
|
74
|
+
uint16 minargs;/* minimum number of specified arguments, used
|
75
|
+
only when calling fast native */
|
76
|
+
JSNative native; /* native method pointer or null */
|
77
|
+
JSClass *clasp; /* if non-null, constructor for this class */
|
78
|
+
} n;
|
79
|
+
struct {
|
80
|
+
uint16 nvars; /* number of local variables */
|
81
|
+
uint16 spare; /* reserved for future use */
|
82
|
+
JSScript *script;/* interpreted bytecode descriptor or null */
|
83
|
+
JSLocalNames names; /* argument and variable names */
|
84
|
+
} i;
|
85
|
+
} u;
|
86
|
+
JSAtom *atom; /* name for diagnostics and decompiling */
|
87
|
+
};
|
88
|
+
|
89
|
+
#define JSFUN_EXPR_CLOSURE 0x4000 /* expression closure: function(x)x*x */
|
90
|
+
#define JSFUN_INTERPRETED 0x8000 /* use u.i if set, u.n if unset */
|
91
|
+
|
92
|
+
#define JSFUN_SCRIPT_OR_FAST_NATIVE (JSFUN_INTERPRETED | JSFUN_FAST_NATIVE)
|
93
|
+
|
94
|
+
#define FUN_OBJECT(fun) (&(fun)->object)
|
95
|
+
#define FUN_INTERPRETED(fun) ((fun)->flags & JSFUN_INTERPRETED)
|
96
|
+
#define FUN_SLOW_NATIVE(fun) (!((fun)->flags & JSFUN_SCRIPT_OR_FAST_NATIVE))
|
97
|
+
#define FUN_SCRIPT(fun) (FUN_INTERPRETED(fun) ? (fun)->u.i.script : NULL)
|
98
|
+
#define FUN_NATIVE(fun) (FUN_SLOW_NATIVE(fun) ? (fun)->u.n.native : NULL)
|
99
|
+
#define FUN_FAST_NATIVE(fun) (((fun)->flags & JSFUN_FAST_NATIVE) \
|
100
|
+
? (JSFastNative) (fun)->u.n.native \
|
101
|
+
: NULL)
|
102
|
+
#define FUN_MINARGS(fun) (((fun)->flags & JSFUN_FAST_NATIVE) \
|
103
|
+
? (fun)->u.n.minargs \
|
104
|
+
: (fun)->nargs)
|
105
|
+
|
106
|
+
extern JSClass js_ArgumentsClass;
|
107
|
+
extern JS_FRIEND_DATA(JSClass) js_CallClass;
|
108
|
+
|
109
|
+
/* JS_FRIEND_DATA so that VALUE_IS_FUNCTION is callable from the shell. */
|
110
|
+
extern JS_FRIEND_DATA(JSClass) js_FunctionClass;
|
111
|
+
|
112
|
+
#define HAS_FUNCTION_CLASS(obj) (STOBJ_GET_CLASS(obj) == &js_FunctionClass)
|
113
|
+
|
114
|
+
/*
|
115
|
+
* NB: jsapi.h and jsobj.h must be included before any call to this macro.
|
116
|
+
*/
|
117
|
+
#define VALUE_IS_FUNCTION(cx, v) \
|
118
|
+
(!JSVAL_IS_PRIMITIVE(v) && HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)))
|
119
|
+
|
120
|
+
/*
|
121
|
+
* Macro to access the private slot of the function object after the slot is
|
122
|
+
* initialized.
|
123
|
+
*/
|
124
|
+
#define GET_FUNCTION_PRIVATE(cx, funobj) \
|
125
|
+
(JS_ASSERT(HAS_FUNCTION_CLASS(funobj)), \
|
126
|
+
(JSFunction *) OBJ_GET_PRIVATE(cx, funobj))
|
127
|
+
|
128
|
+
extern JSObject *
|
129
|
+
js_InitFunctionClass(JSContext *cx, JSObject *obj);
|
130
|
+
|
131
|
+
extern JSObject *
|
132
|
+
js_InitArgumentsClass(JSContext *cx, JSObject *obj);
|
133
|
+
|
134
|
+
extern JSObject *
|
135
|
+
js_InitCallClass(JSContext *cx, JSObject *obj);
|
136
|
+
|
137
|
+
extern JSFunction *
|
138
|
+
js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN nargs,
|
139
|
+
uintN flags, JSObject *parent, JSAtom *atom);
|
140
|
+
|
141
|
+
extern void
|
142
|
+
js_TraceFunction(JSTracer *trc, JSFunction *fun);
|
143
|
+
|
144
|
+
extern void
|
145
|
+
js_FinalizeFunction(JSContext *cx, JSFunction *fun);
|
146
|
+
|
147
|
+
extern JSObject *
|
148
|
+
js_CloneFunctionObject(JSContext *cx, JSFunction *fun, JSObject *parent);
|
149
|
+
|
150
|
+
extern JSBool
|
151
|
+
js_LinkFunctionObject(JSContext *cx, JSFunction *fun, JSObject *object);
|
152
|
+
|
153
|
+
extern JSFunction *
|
154
|
+
js_DefineFunction(JSContext *cx, JSObject *obj, JSAtom *atom, JSNative native,
|
155
|
+
uintN nargs, uintN flags);
|
156
|
+
|
157
|
+
/*
|
158
|
+
* Flags for js_ValueToFunction and js_ReportIsNotFunction. We depend on the
|
159
|
+
* fact that JSINVOKE_CONSTRUCT (aka JSFRAME_CONSTRUCTING) is 1, and test that
|
160
|
+
* with #if/#error in jsfun.c.
|
161
|
+
*/
|
162
|
+
#define JSV2F_CONSTRUCT JSINVOKE_CONSTRUCT
|
163
|
+
#define JSV2F_ITERATOR JSINVOKE_ITERATOR
|
164
|
+
#define JSV2F_SEARCH_STACK 0x10000
|
165
|
+
|
166
|
+
extern JSFunction *
|
167
|
+
js_ValueToFunction(JSContext *cx, jsval *vp, uintN flags);
|
168
|
+
|
169
|
+
extern JSObject *
|
170
|
+
js_ValueToFunctionObject(JSContext *cx, jsval *vp, uintN flags);
|
171
|
+
|
172
|
+
extern JSObject *
|
173
|
+
js_ValueToCallableObject(JSContext *cx, jsval *vp, uintN flags);
|
174
|
+
|
175
|
+
extern void
|
176
|
+
js_ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags);
|
177
|
+
|
178
|
+
extern JSObject *
|
179
|
+
js_GetCallObject(JSContext *cx, JSStackFrame *fp, JSObject *parent);
|
180
|
+
|
181
|
+
extern JS_FRIEND_API(JSBool)
|
182
|
+
js_PutCallObject(JSContext *cx, JSStackFrame *fp);
|
183
|
+
|
184
|
+
extern JSBool
|
185
|
+
js_GetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp);
|
186
|
+
|
187
|
+
extern JSBool
|
188
|
+
js_GetCallVar(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
|
189
|
+
|
190
|
+
extern JSBool
|
191
|
+
js_GetArgsValue(JSContext *cx, JSStackFrame *fp, jsval *vp);
|
192
|
+
|
193
|
+
extern JSBool
|
194
|
+
js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id, jsval *vp);
|
195
|
+
|
196
|
+
extern JSObject *
|
197
|
+
js_GetArgsObject(JSContext *cx, JSStackFrame *fp);
|
198
|
+
|
199
|
+
extern JS_FRIEND_API(JSBool)
|
200
|
+
js_PutArgsObject(JSContext *cx, JSStackFrame *fp);
|
201
|
+
|
202
|
+
extern JSBool
|
203
|
+
js_XDRFunction(JSXDRState *xdr, JSObject **objp);
|
204
|
+
|
205
|
+
typedef enum JSLocalKind {
|
206
|
+
JSLOCAL_NONE,
|
207
|
+
JSLOCAL_ARG,
|
208
|
+
JSLOCAL_VAR,
|
209
|
+
JSLOCAL_CONST
|
210
|
+
} JSLocalKind;
|
211
|
+
|
212
|
+
#define JS_GET_LOCAL_NAME_COUNT(fun) ((fun)->nargs + (fun)->u.i.nvars)
|
213
|
+
|
214
|
+
extern JSBool
|
215
|
+
js_AddLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, JSLocalKind kind);
|
216
|
+
|
217
|
+
/*
|
218
|
+
* Look up an argument or variable name returning its kind when found or
|
219
|
+
* JSLOCAL_NONE when no such name exists. When indexp is not null and the name
|
220
|
+
* exists, *indexp will receive the index of the corresponding argument or
|
221
|
+
* variable.
|
222
|
+
*/
|
223
|
+
extern JSLocalKind
|
224
|
+
js_LookupLocal(JSContext *cx, JSFunction *fun, JSAtom *atom, uintN *indexp);
|
225
|
+
|
226
|
+
/*
|
227
|
+
* Functions to work with local names as an array of words.
|
228
|
+
*
|
229
|
+
* js_GetLocalNameArray returns the array or null when it cannot be allocated
|
230
|
+
* The function must be called only when JS_GET_LOCAL_NAME_COUNT(fun) is not
|
231
|
+
* zero. The function use the supplied pool to allocate the array.
|
232
|
+
*
|
233
|
+
* The elements of the array with index below fun->nargs correspond to the
|
234
|
+
* names of function arguments and of function variables otherwise. Use
|
235
|
+
* JS_LOCAL_NAME_TO_ATOM to convert array's element into an atom. It can be
|
236
|
+
* null when the element is an argument corresponding to a destructuring
|
237
|
+
* pattern. For a variable use JS_LOCAL_NAME_IS_CONST to check if it
|
238
|
+
* corresponds to the const declaration.
|
239
|
+
*/
|
240
|
+
extern jsuword *
|
241
|
+
js_GetLocalNameArray(JSContext *cx, JSFunction *fun, struct JSArenaPool *pool);
|
242
|
+
|
243
|
+
#define JS_LOCAL_NAME_TO_ATOM(nameWord) \
|
244
|
+
((JSAtom *) ((nameWord) & ~(jsuword) 1))
|
245
|
+
|
246
|
+
#define JS_LOCAL_NAME_IS_CONST(nameWord) \
|
247
|
+
((((nameWord) & (jsuword) 1)) != 0)
|
248
|
+
|
249
|
+
extern void
|
250
|
+
js_FreezeLocalNames(JSContext *cx, JSFunction *fun);
|
251
|
+
|
252
|
+
JS_END_EXTERN_C
|
253
|
+
|
254
|
+
#endif /* jsfun_h___ */
|
@@ -0,0 +1,3554 @@
|
|
1
|
+
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
2
|
+
* vim: set ts=8 sw=4 et tw=78:
|
3
|
+
*
|
4
|
+
* ***** BEGIN LICENSE BLOCK *****
|
5
|
+
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
6
|
+
*
|
7
|
+
* The contents of this file are subject to the Mozilla Public License Version
|
8
|
+
* 1.1 (the "License"); you may not use this file except in compliance with
|
9
|
+
* the License. You may obtain a copy of the License at
|
10
|
+
* http://www.mozilla.org/MPL/
|
11
|
+
*
|
12
|
+
* Software distributed under the License is distributed on an "AS IS" basis,
|
13
|
+
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
14
|
+
* for the specific language governing rights and limitations under the
|
15
|
+
* License.
|
16
|
+
*
|
17
|
+
* The Original Code is Mozilla Communicator client code, released
|
18
|
+
* March 31, 1998.
|
19
|
+
*
|
20
|
+
* The Initial Developer of the Original Code is
|
21
|
+
* Netscape Communications Corporation.
|
22
|
+
* Portions created by the Initial Developer are Copyright (C) 1998
|
23
|
+
* the Initial Developer. All Rights Reserved.
|
24
|
+
*
|
25
|
+
* Contributor(s):
|
26
|
+
*
|
27
|
+
* Alternatively, the contents of this file may be used under the terms of
|
28
|
+
* either of the GNU General Public License Version 2 or later (the "GPL"),
|
29
|
+
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
30
|
+
* in which case the provisions of the GPL or the LGPL are applicable instead
|
31
|
+
* of those above. If you wish to allow use of your version of this file only
|
32
|
+
* under the terms of either the GPL or the LGPL, and not to allow others to
|
33
|
+
* use your version of this file under the terms of the MPL, indicate your
|
34
|
+
* decision by deleting the provisions above and replace them with the notice
|
35
|
+
* and other provisions required by the GPL or the LGPL. If you do not delete
|
36
|
+
* the provisions above, a recipient may use your version of this file under
|
37
|
+
* the terms of any one of the MPL, the GPL or the LGPL.
|
38
|
+
*
|
39
|
+
* ***** END LICENSE BLOCK ***** */
|
40
|
+
|
41
|
+
/*
|
42
|
+
* JS Mark-and-Sweep Garbage Collector.
|
43
|
+
*
|
44
|
+
* This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
|
45
|
+
* jsgc.h). It allocates from a special GC arena pool with each arena allocated
|
46
|
+
* using malloc. It uses an ideally parallel array of flag bytes to hold the
|
47
|
+
* mark bit, finalizer type index, etc.
|
48
|
+
*
|
49
|
+
* XXX swizzle page to freelist for better locality of reference
|
50
|
+
*/
|
51
|
+
#include "jsstddef.h"
|
52
|
+
#include <stdlib.h> /* for free */
|
53
|
+
#include <string.h> /* for memset used when DEBUG */
|
54
|
+
#include "jstypes.h"
|
55
|
+
#include "jsutil.h" /* Added by JSIFY */
|
56
|
+
#include "jshash.h" /* Added by JSIFY */
|
57
|
+
#include "jsapi.h"
|
58
|
+
#include "jsatom.h"
|
59
|
+
#include "jsbit.h"
|
60
|
+
#include "jsclist.h"
|
61
|
+
#include "jscntxt.h"
|
62
|
+
#include "jsconfig.h"
|
63
|
+
#include "jsdbgapi.h"
|
64
|
+
#include "jsexn.h"
|
65
|
+
#include "jsfun.h"
|
66
|
+
#include "jsgc.h"
|
67
|
+
#include "jsinterp.h"
|
68
|
+
#include "jsiter.h"
|
69
|
+
#include "jslock.h"
|
70
|
+
#include "jsnum.h"
|
71
|
+
#include "jsobj.h"
|
72
|
+
#include "jsparse.h"
|
73
|
+
#include "jsscope.h"
|
74
|
+
#include "jsscript.h"
|
75
|
+
#include "jsstr.h"
|
76
|
+
|
77
|
+
#if JS_HAS_XML_SUPPORT
|
78
|
+
#include "jsxml.h"
|
79
|
+
#endif
|
80
|
+
|
81
|
+
/*
|
82
|
+
* Check if posix_memalign is available.
|
83
|
+
*/
|
84
|
+
#if _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 || MOZ_MEMORY
|
85
|
+
# define HAS_POSIX_MEMALIGN 1
|
86
|
+
#else
|
87
|
+
# define HAS_POSIX_MEMALIGN 0
|
88
|
+
#endif
|
89
|
+
|
90
|
+
/*
|
91
|
+
* jemalloc provides posix_memalign but the function has to be explicitly
|
92
|
+
* declared on Windows.
|
93
|
+
*/
|
94
|
+
#if HAS_POSIX_MEMALIGN && MOZ_MEMORY_WINDOWS
|
95
|
+
JS_BEGIN_EXTERN_C
|
96
|
+
extern int
|
97
|
+
posix_memalign(void **memptr, size_t alignment, size_t size);
|
98
|
+
JS_END_EXTERN_C
|
99
|
+
#endif
|
100
|
+
|
101
|
+
/*
|
102
|
+
* Include the headers for mmap unless we have posix_memalign and do not
|
103
|
+
* insist on mmap.
|
104
|
+
*/
|
105
|
+
#if JS_GC_USE_MMAP || (!defined JS_GC_USE_MMAP && !HAS_POSIX_MEMALIGN)
|
106
|
+
# if defined(XP_WIN)
|
107
|
+
# ifndef JS_GC_USE_MMAP
|
108
|
+
# define JS_GC_USE_MMAP 1
|
109
|
+
# endif
|
110
|
+
# include <windows.h>
|
111
|
+
# else
|
112
|
+
# if defined(XP_UNIX) || defined(XP_BEOS)
|
113
|
+
# include <unistd.h>
|
114
|
+
# endif
|
115
|
+
# if _POSIX_MAPPED_FILES > 0
|
116
|
+
# ifndef JS_GC_USE_MMAP
|
117
|
+
# define JS_GC_USE_MMAP 1
|
118
|
+
# endif
|
119
|
+
# include <sys/mman.h>
|
120
|
+
|
121
|
+
/* On Mac OS X MAP_ANONYMOUS is not defined. */
|
122
|
+
# if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
|
123
|
+
# define MAP_ANONYMOUS MAP_ANON
|
124
|
+
# endif
|
125
|
+
# else
|
126
|
+
# if JS_GC_USE_MMAP
|
127
|
+
# error "JS_GC_USE_MMAP is set when mmap is not available"
|
128
|
+
# endif
|
129
|
+
# endif
|
130
|
+
# endif
|
131
|
+
#endif
|
132
|
+
|
133
|
+
/*
|
134
|
+
* A GC arena contains a fixed number of flag bits for each thing in its heap,
|
135
|
+
* and supports O(1) lookup of a flag given its thing's address.
|
136
|
+
*
|
137
|
+
* To implement this, we allocate things of the same size from a GC arena
|
138
|
+
* containing GC_ARENA_SIZE bytes aligned on GC_ARENA_SIZE boundary. The
|
139
|
+
* following picture shows arena's layout:
|
140
|
+
*
|
141
|
+
* +------------------------------+--------------------+---------------+
|
142
|
+
* | allocation area for GC thing | flags of GC things | JSGCArenaInfo |
|
143
|
+
* +------------------------------+--------------------+---------------+
|
144
|
+
*
|
145
|
+
* To find the flag bits for the thing we calculate the thing index counting
|
146
|
+
* from arena's start using:
|
147
|
+
*
|
148
|
+
* thingIndex = (thingAddress & GC_ARENA_MASK) / thingSize
|
149
|
+
*
|
150
|
+
* The details of flag's lookup depend on thing's kind. For all GC things
|
151
|
+
* except doubles we use one byte of flags where the 4 bits determine thing's
|
152
|
+
* type and the rest is used to implement GC marking, finalization and
|
153
|
+
* locking. We calculate the address of flag's byte using:
|
154
|
+
*
|
155
|
+
* flagByteAddress =
|
156
|
+
* (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo) - thingIndex
|
157
|
+
*
|
158
|
+
* where
|
159
|
+
*
|
160
|
+
* (thingAddress | GC_ARENA_MASK) - sizeof(JSGCArenaInfo)
|
161
|
+
*
|
162
|
+
* is the last byte of flags' area.
|
163
|
+
*
|
164
|
+
* This implies that the things are allocated from the start of their area and
|
165
|
+
* flags are allocated from the end. This arrangement avoids a relatively
|
166
|
+
* expensive calculation of the location of the boundary separating things and
|
167
|
+
* flags. The boundary's offset from the start of the arena is given by:
|
168
|
+
*
|
169
|
+
* thingsPerArena * thingSize
|
170
|
+
*
|
171
|
+
* where thingsPerArena is the number of things that the arena can hold:
|
172
|
+
*
|
173
|
+
* (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / (thingSize + 1).
|
174
|
+
*
|
175
|
+
* To allocate doubles we use a specialized arena. It can contain only numbers
|
176
|
+
* so we do not need the type bits. Moreover, since the doubles do not require
|
177
|
+
* a finalizer and very few of them are locked via js_LockGCThing API, we use
|
178
|
+
* just one bit of flags per double to denote if it was marked during the
|
179
|
+
* marking phase of the GC. The locking is implemented via a hash table. Thus
|
180
|
+
* for doubles the flag area becomes a bitmap.
|
181
|
+
*
|
182
|
+
* JS_GC_USE_MMAP macro governs the choice of the aligned arena allocator.
|
183
|
+
* When it is true, a platform-dependent function like mmap is used to get
|
184
|
+
* memory aligned on CPU page boundaries. If the macro is false or undefined,
|
185
|
+
* posix_memalign is used when available. Otherwise the code uses malloc to
|
186
|
+
* over-allocate a chunk with js_gcArenasPerChunk aligned arenas. The
|
187
|
+
* approximate space overhead of this is 1/js_gcArenasPerChunk. For details,
|
188
|
+
* see NewGCChunk/DestroyGCChunk below.
|
189
|
+
*
|
190
|
+
* The code also allocates arenas in chunks when JS_GC_USE_MMAP is 1 to
|
191
|
+
* minimize the overhead of mmap/munmap. In this case js_gcArenasPerChunk can
|
192
|
+
* not be a compile-time constant as the system page size is not known until
|
193
|
+
* runtime.
|
194
|
+
*/
|
195
|
+
#if JS_GC_USE_MMAP
|
196
|
+
static uint32 js_gcArenasPerChunk = 0;
|
197
|
+
static JSBool js_gcUseMmap = JS_FALSE;
|
198
|
+
#elif HAS_POSIX_MEMALIGN
|
199
|
+
# define js_gcArenasPerChunk 1
|
200
|
+
#else
|
201
|
+
# define js_gcArenasPerChunk 7
|
202
|
+
#endif
|
203
|
+
|
204
|
+
#if defined(js_gcArenasPerChunk) && js_gcArenasPerChunk == 1
|
205
|
+
# define CHUNKED_ARENA_ALLOCATION 0
|
206
|
+
#else
|
207
|
+
# define CHUNKED_ARENA_ALLOCATION 1
|
208
|
+
#endif
|
209
|
+
|
210
|
+
#define GC_ARENA_SHIFT 12
|
211
|
+
#define GC_ARENA_MASK ((jsuword) JS_BITMASK(GC_ARENA_SHIFT))
|
212
|
+
#define GC_ARENA_SIZE JS_BIT(GC_ARENA_SHIFT)
|
213
|
+
|
214
|
+
/*
|
215
|
+
* JS_GC_ARENA_PAD defines the number of bytes to pad JSGCArenaInfo structure.
|
216
|
+
* It is used to improve allocation efficiency when using posix_memalign. If
|
217
|
+
* malloc's implementation uses internal headers, then calling
|
218
|
+
*
|
219
|
+
* posix_memalign(&p, GC_ARENA_SIZE, GC_ARENA_SIZE * js_gcArenasPerChunk)
|
220
|
+
*
|
221
|
+
* in a sequence leaves holes between allocations of the size GC_ARENA_SIZE
|
222
|
+
* due to the need to fit headers. JS_GC_ARENA_PAD mitigates that so the code
|
223
|
+
* calls
|
224
|
+
*
|
225
|
+
* posix_memalign(&p, GC_ARENA_SIZE,
|
226
|
+
* GC_ARENA_SIZE * js_gcArenasPerChunk - JS_GC_ARENA_PAD)
|
227
|
+
*
|
228
|
+
* When JS_GC_ARENA_PAD is equal or greater than the number of words in the
|
229
|
+
* system header, the system can pack all allocations together without holes.
|
230
|
+
*
|
231
|
+
* With JS_GC_USE_MEMALIGN we want at least 2 word pad unless posix_memalign
|
232
|
+
* comes from jemalloc that does not use any headers/trailers.
|
233
|
+
*/
|
234
|
+
#ifndef JS_GC_ARENA_PAD
|
235
|
+
# if HAS_POSIX_MEMALIGN && !MOZ_MEMORY
|
236
|
+
# define JS_GC_ARENA_PAD (2 * JS_BYTES_PER_WORD)
|
237
|
+
# else
|
238
|
+
# define JS_GC_ARENA_PAD 0
|
239
|
+
# endif
|
240
|
+
#endif
|
241
|
+
|
242
|
+
struct JSGCArenaInfo {
|
243
|
+
/*
|
244
|
+
* Allocation list for the arena or NULL if the arena holds double values.
|
245
|
+
*/
|
246
|
+
JSGCArenaList *list;
|
247
|
+
|
248
|
+
/*
|
249
|
+
* Pointer to the previous arena in a linked list. The arena can either
|
250
|
+
* belong to one of JSContext.gcArenaList lists or, when it does not have
|
251
|
+
* any allocated GC things, to the list of free arenas in the chunk with
|
252
|
+
* head stored in JSGCChunkInfo.lastFreeArena.
|
253
|
+
*/
|
254
|
+
JSGCArenaInfo *prev;
|
255
|
+
|
256
|
+
#if !CHUNKED_ARENA_ALLOCATION
|
257
|
+
jsuword prevUntracedPage;
|
258
|
+
#else
|
259
|
+
/*
|
260
|
+
* A link field for the list of arenas with marked but not yet traced
|
261
|
+
* things. The field is encoded as arena's page to share the space with
|
262
|
+
* firstArena and arenaIndex fields.
|
263
|
+
*/
|
264
|
+
jsuword prevUntracedPage : JS_BITS_PER_WORD - GC_ARENA_SHIFT;
|
265
|
+
|
266
|
+
/*
|
267
|
+
* When firstArena is false, the index of arena in the chunk. When
|
268
|
+
* firstArena is true, the index of a free arena holding JSGCChunkInfo or
|
269
|
+
* NO_FREE_ARENAS if there are no free arenas in the chunk.
|
270
|
+
*
|
271
|
+
* GET_ARENA_INDEX and GET_CHUNK_INFO_INDEX are convenience macros to
|
272
|
+
* access either of indexes.
|
273
|
+
*/
|
274
|
+
jsuword arenaIndex : GC_ARENA_SHIFT - 1;
|
275
|
+
|
276
|
+
/* Flag indicating if the arena is the first in the chunk. */
|
277
|
+
jsuword firstArena : 1;
|
278
|
+
#endif
|
279
|
+
|
280
|
+
union {
|
281
|
+
jsuword untracedThings; /* bitset for fast search of marked
|
282
|
+
but not yet traced things */
|
283
|
+
JSBool hasMarkedDoubles; /* the arena has marked doubles */
|
284
|
+
} u;
|
285
|
+
|
286
|
+
#if JS_GC_ARENA_PAD != 0
|
287
|
+
uint8 pad[JS_GC_ARENA_PAD];
|
288
|
+
#endif
|
289
|
+
};
|
290
|
+
|
291
|
+
/*
|
292
|
+
* Verify that the bit fields are indeed shared and JSGCArenaInfo is as small
|
293
|
+
* as possible. The code does not rely on this check so if on a particular
|
294
|
+
* platform this does not compile, then, as a workaround, comment the assert
|
295
|
+
* out and submit a bug report.
|
296
|
+
*/
|
297
|
+
JS_STATIC_ASSERT(offsetof(JSGCArenaInfo, u) == 3 * sizeof(jsuword));
|
298
|
+
|
299
|
+
/*
|
300
|
+
* Macros to convert between JSGCArenaInfo, the start address of the arena and
|
301
|
+
* arena's page defined as (start address) >> GC_ARENA_SHIFT.
|
302
|
+
*/
|
303
|
+
#define ARENA_INFO_OFFSET (GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo))
|
304
|
+
|
305
|
+
#define IS_ARENA_INFO_ADDRESS(arena) \
|
306
|
+
(((jsuword) (arena) & GC_ARENA_MASK) == ARENA_INFO_OFFSET)
|
307
|
+
|
308
|
+
#define ARENA_START_TO_INFO(arenaStart) \
|
309
|
+
(JS_ASSERT(((arenaStart) & (jsuword) GC_ARENA_MASK) == 0), \
|
310
|
+
(JSGCArenaInfo *) ((arenaStart) + (jsuword) ARENA_INFO_OFFSET))
|
311
|
+
|
312
|
+
#define ARENA_INFO_TO_START(arena) \
|
313
|
+
(JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
|
314
|
+
(jsuword) (arena) & ~(jsuword) GC_ARENA_MASK)
|
315
|
+
|
316
|
+
#define ARENA_PAGE_TO_INFO(arenaPage) \
|
317
|
+
(JS_ASSERT(arenaPage != 0), \
|
318
|
+
JS_ASSERT(!((jsuword)(arenaPage) >> (JS_BITS_PER_WORD-GC_ARENA_SHIFT))), \
|
319
|
+
ARENA_START_TO_INFO((arenaPage) << GC_ARENA_SHIFT))
|
320
|
+
|
321
|
+
#define ARENA_INFO_TO_PAGE(arena) \
|
322
|
+
(JS_ASSERT(IS_ARENA_INFO_ADDRESS(arena)), \
|
323
|
+
((jsuword) (arena) >> GC_ARENA_SHIFT))
|
324
|
+
|
325
|
+
#define GET_ARENA_INFO(chunk, index) \
|
326
|
+
(JS_ASSERT((index) < js_gcArenasPerChunk), \
|
327
|
+
ARENA_START_TO_INFO(chunk + ((index) << GC_ARENA_SHIFT)))
|
328
|
+
|
329
|
+
#if CHUNKED_ARENA_ALLOCATION
|
330
|
+
/*
|
331
|
+
* Definitions for allocating arenas in chunks.
|
332
|
+
*
|
333
|
+
* All chunks that have at least one free arena are put on the doubly-linked
|
334
|
+
* list with the head stored in JSRuntime.gcChunkList. JSGCChunkInfo contains
|
335
|
+
* the head of the chunk's free arena list together with the link fields for
|
336
|
+
* gcChunkList.
|
337
|
+
*
|
338
|
+
* Structure stored in one of chunk's free arenas. GET_CHUNK_INFO_INDEX gives
|
339
|
+
* the index of this arena. When all arenas in the chunk are used, it is
|
340
|
+
* removed from the list and the index is set to NO_FREE_ARENAS indicating
|
341
|
+
* that the chunk is not on gcChunkList and has no JSGCChunkInfo available.
|
342
|
+
*/
|
343
|
+
|
344
|
+
struct JSGCChunkInfo {
|
345
|
+
JSGCChunkInfo **prevp;
|
346
|
+
JSGCChunkInfo *next;
|
347
|
+
JSGCArenaInfo *lastFreeArena;
|
348
|
+
uint32 numFreeArenas;
|
349
|
+
};
|
350
|
+
|
351
|
+
#define NO_FREE_ARENAS JS_BITMASK(GC_ARENA_SHIFT - 1)
|
352
|
+
|
353
|
+
#ifdef js_gcArenasPerChunk
|
354
|
+
JS_STATIC_ASSERT(1 <= js_gcArenasPerChunk &&
|
355
|
+
js_gcArenasPerChunk <= NO_FREE_ARENAS);
|
356
|
+
#endif
|
357
|
+
|
358
|
+
#define GET_ARENA_CHUNK(arena, index) \
|
359
|
+
(JS_ASSERT(GET_ARENA_INDEX(arena) == index), \
|
360
|
+
ARENA_INFO_TO_START(arena) - ((index) << GC_ARENA_SHIFT))
|
361
|
+
|
362
|
+
#define GET_ARENA_INDEX(arena) \
|
363
|
+
((arena)->firstArena ? 0 : (uint32) (arena)->arenaIndex)
|
364
|
+
|
365
|
+
#define GET_CHUNK_INFO_INDEX(chunk) \
|
366
|
+
((uint32) ARENA_START_TO_INFO(chunk)->arenaIndex)
|
367
|
+
|
368
|
+
#define SET_CHUNK_INFO_INDEX(chunk, index) \
|
369
|
+
(JS_ASSERT((index) < js_gcArenasPerChunk || (index) == NO_FREE_ARENAS), \
|
370
|
+
(void) (ARENA_START_TO_INFO(chunk)->arenaIndex = (jsuword) (index)))
|
371
|
+
|
372
|
+
#define GET_CHUNK_INFO(chunk, infoIndex) \
|
373
|
+
(JS_ASSERT(GET_CHUNK_INFO_INDEX(chunk) == (infoIndex)), \
|
374
|
+
JS_ASSERT((uint32) (infoIndex) < js_gcArenasPerChunk), \
|
375
|
+
(JSGCChunkInfo *) ((chunk) + ((infoIndex) << GC_ARENA_SHIFT)))
|
376
|
+
|
377
|
+
#define CHUNK_INFO_TO_INDEX(ci) \
|
378
|
+
GET_ARENA_INDEX(ARENA_START_TO_INFO((jsuword)ci))
|
379
|
+
|
380
|
+
#endif
|
381
|
+
|
382
|
+
/*
|
383
|
+
* Macros for GC-thing operations.
|
384
|
+
*/
|
385
|
+
#define THINGS_PER_ARENA(thingSize) \
|
386
|
+
((GC_ARENA_SIZE - (uint32) sizeof(JSGCArenaInfo)) / ((thingSize) + 1U))
|
387
|
+
|
388
|
+
#define THING_TO_ARENA(thing) \
|
389
|
+
((JSGCArenaInfo *)(((jsuword) (thing) | GC_ARENA_MASK) + \
|
390
|
+
1 - sizeof(JSGCArenaInfo)))
|
391
|
+
|
392
|
+
#define THING_TO_INDEX(thing, thingSize) \
|
393
|
+
((uint32) ((jsuword) (thing) & GC_ARENA_MASK) / (uint32) (thingSize))
|
394
|
+
|
395
|
+
#define THING_FLAGS_END(arena) ((uint8 *)(arena))
|
396
|
+
|
397
|
+
#define THING_FLAGP(arena, thingIndex) \
|
398
|
+
(JS_ASSERT((jsuword) (thingIndex) \
|
399
|
+
< (jsuword) THINGS_PER_ARENA((arena)->list->thingSize)), \
|
400
|
+
(uint8 *)(arena) - 1 - (thingIndex))
|
401
|
+
|
402
|
+
#define THING_TO_FLAGP(thing, thingSize) \
|
403
|
+
THING_FLAGP(THING_TO_ARENA(thing), THING_TO_INDEX(thing, thingSize))
|
404
|
+
|
405
|
+
#define FLAGP_TO_ARENA(flagp) THING_TO_ARENA(flagp)
|
406
|
+
|
407
|
+
#define FLAGP_TO_INDEX(flagp) \
|
408
|
+
(JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) < ARENA_INFO_OFFSET), \
|
409
|
+
(ARENA_INFO_OFFSET - 1 - (uint32) ((jsuword) (flagp) & GC_ARENA_MASK)))
|
410
|
+
|
411
|
+
#define FLAGP_TO_THING(flagp, thingSize) \
|
412
|
+
(JS_ASSERT(((jsuword) (flagp) & GC_ARENA_MASK) >= \
|
413
|
+
(ARENA_INFO_OFFSET - THINGS_PER_ARENA(thingSize))), \
|
414
|
+
(JSGCThing *)(((jsuword) (flagp) & ~GC_ARENA_MASK) + \
|
415
|
+
(thingSize) * FLAGP_TO_INDEX(flagp)))
|
416
|
+
|
417
|
+
/*
|
418
|
+
* Macros for the specialized arena for doubles.
|
419
|
+
*
|
420
|
+
* DOUBLES_PER_ARENA defines the maximum number of doubles that the arena can
|
421
|
+
* hold. We find it as the following. Let n be the number of doubles in the
|
422
|
+
* arena. Together with the bitmap of flags and JSGCArenaInfo they should fit
|
423
|
+
* the arena. Hence DOUBLES_PER_ARENA or n_max is the maximum value of n for
|
424
|
+
* which the following holds:
|
425
|
+
*
|
426
|
+
* n*s + ceil(n/B) <= M (1)
|
427
|
+
*
|
428
|
+
* where "/" denotes normal real division,
|
429
|
+
* ceil(r) gives the least integer not smaller than the number r,
|
430
|
+
* s is the number of words in jsdouble,
|
431
|
+
* B is number of bits per word or B == JS_BITS_PER_WORD
|
432
|
+
* M is the number of words in the arena before JSGCArenaInfo or
|
433
|
+
* M == (GC_ARENA_SIZE - sizeof(JSGCArenaInfo)) / sizeof(jsuword).
|
434
|
+
* M == ARENA_INFO_OFFSET / sizeof(jsuword)
|
435
|
+
*
|
436
|
+
* We rewrite the inequality as
|
437
|
+
*
|
438
|
+
* n*B*s/B + ceil(n/B) <= M,
|
439
|
+
* ceil(n*B*s/B + n/B) <= M,
|
440
|
+
* ceil(n*(B*s + 1)/B) <= M (2)
|
441
|
+
*
|
442
|
+
* We define a helper function e(n, s, B),
|
443
|
+
*
|
444
|
+
* e(n, s, B) := ceil(n*(B*s + 1)/B) - n*(B*s + 1)/B, 0 <= e(n, s, B) < 1.
|
445
|
+
*
|
446
|
+
* It gives:
|
447
|
+
*
|
448
|
+
* n*(B*s + 1)/B + e(n, s, B) <= M,
|
449
|
+
* n + e*B/(B*s + 1) <= M*B/(B*s + 1)
|
450
|
+
*
|
451
|
+
* We apply the floor function to both sides of the last equation, where
|
452
|
+
* floor(r) gives the biggest integer not greater than r. As a consequence we
|
453
|
+
* have:
|
454
|
+
*
|
455
|
+
* floor(n + e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
|
456
|
+
* n + floor(e*B/(B*s + 1)) <= floor(M*B/(B*s + 1)),
|
457
|
+
* n <= floor(M*B/(B*s + 1)), (3)
|
458
|
+
*
|
459
|
+
* where floor(e*B/(B*s + 1)) is zero as e*B/(B*s + 1) < B/(B*s + 1) < 1.
|
460
|
+
* Thus any n that satisfies the original constraint (1) or its equivalent (2),
|
461
|
+
* must also satisfy (3). That is, we got an upper estimate for the maximum
|
462
|
+
* value of n. Lets show that this upper estimate,
|
463
|
+
*
|
464
|
+
* floor(M*B/(B*s + 1)), (4)
|
465
|
+
*
|
466
|
+
* also satisfies (1) and, as such, gives the required maximum value.
|
467
|
+
* Substituting it into (2) gives:
|
468
|
+
*
|
469
|
+
* ceil(floor(M*B/(B*s + 1))*(B*s + 1)/B) == ceil(floor(M/X)*X)
|
470
|
+
*
|
471
|
+
* where X == (B*s + 1)/B > 1. But then floor(M/X)*X <= M/X*X == M and
|
472
|
+
*
|
473
|
+
* ceil(floor(M/X)*X) <= ceil(M) == M.
|
474
|
+
*
|
475
|
+
* Thus the value of (4) gives the maximum n satisfying (1).
|
476
|
+
*
|
477
|
+
* For the final result we observe that in (4)
|
478
|
+
*
|
479
|
+
* M*B == ARENA_INFO_OFFSET / sizeof(jsuword) * JS_BITS_PER_WORD
|
480
|
+
* == ARENA_INFO_OFFSET * JS_BITS_PER_BYTE
|
481
|
+
*
|
482
|
+
* and
|
483
|
+
*
|
484
|
+
* B*s == JS_BITS_PER_WORD * sizeof(jsdouble) / sizeof(jsuword)
|
485
|
+
* == JS_BITS_PER_DOUBLE.
|
486
|
+
*/
|
487
|
+
#define DOUBLES_PER_ARENA \
|
488
|
+
((ARENA_INFO_OFFSET * JS_BITS_PER_BYTE) / (JS_BITS_PER_DOUBLE + 1))
|
489
|
+
|
490
|
+
/*
|
491
|
+
* Check that ARENA_INFO_OFFSET and sizeof(jsdouble) divides sizeof(jsuword).
|
492
|
+
*/
|
493
|
+
JS_STATIC_ASSERT(ARENA_INFO_OFFSET % sizeof(jsuword) == 0);
|
494
|
+
JS_STATIC_ASSERT(sizeof(jsdouble) % sizeof(jsuword) == 0);
|
495
|
+
JS_STATIC_ASSERT(sizeof(jsbitmap) == sizeof(jsuword));
|
496
|
+
|
497
|
+
#define DOUBLES_ARENA_BITMAP_WORDS \
|
498
|
+
(JS_HOWMANY(DOUBLES_PER_ARENA, JS_BITS_PER_WORD))
|
499
|
+
|
500
|
+
/* Check that DOUBLES_PER_ARENA indeed maximises (1). */
|
501
|
+
JS_STATIC_ASSERT(DOUBLES_PER_ARENA * sizeof(jsdouble) +
|
502
|
+
DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword) <=
|
503
|
+
ARENA_INFO_OFFSET);
|
504
|
+
|
505
|
+
JS_STATIC_ASSERT((DOUBLES_PER_ARENA + 1) * sizeof(jsdouble) +
|
506
|
+
sizeof(jsuword) *
|
507
|
+
JS_HOWMANY((DOUBLES_PER_ARENA + 1), JS_BITS_PER_WORD) >
|
508
|
+
ARENA_INFO_OFFSET);
|
509
|
+
|
510
|
+
/*
|
511
|
+
* When DOUBLES_PER_ARENA % BITS_PER_DOUBLE_FLAG_UNIT != 0, some bits in the
|
512
|
+
* last byte of the occupation bitmap are unused.
|
513
|
+
*/
|
514
|
+
#define UNUSED_DOUBLE_BITMAP_BITS \
|
515
|
+
(DOUBLES_ARENA_BITMAP_WORDS * JS_BITS_PER_WORD - DOUBLES_PER_ARENA)
|
516
|
+
|
517
|
+
JS_STATIC_ASSERT(UNUSED_DOUBLE_BITMAP_BITS < JS_BITS_PER_WORD);
|
518
|
+
|
519
|
+
#define DOUBLES_ARENA_BITMAP_OFFSET \
|
520
|
+
(ARENA_INFO_OFFSET - DOUBLES_ARENA_BITMAP_WORDS * sizeof(jsuword))
|
521
|
+
|
522
|
+
#define CHECK_DOUBLE_ARENA_INFO(arenaInfo) \
|
523
|
+
(JS_ASSERT(IS_ARENA_INFO_ADDRESS(arenaInfo)), \
|
524
|
+
JS_ASSERT(!(arenaInfo)->list)) \
|
525
|
+
|
526
|
+
/*
|
527
|
+
* Get the start of the bitmap area containing double mark flags in the arena.
|
528
|
+
* To access the flag the code uses
|
529
|
+
*
|
530
|
+
* JS_TEST_BIT(bitmapStart, index)
|
531
|
+
*
|
532
|
+
* That is, compared with the case of arenas with non-double things, we count
|
533
|
+
* flags from the start of the bitmap area, not from the end.
|
534
|
+
*/
|
535
|
+
#define DOUBLE_ARENA_BITMAP(arenaInfo) \
|
536
|
+
(CHECK_DOUBLE_ARENA_INFO(arenaInfo), \
|
537
|
+
(jsbitmap *) arenaInfo - DOUBLES_ARENA_BITMAP_WORDS)
|
538
|
+
|
539
|
+
#define DOUBLE_THING_TO_INDEX(thing) \
|
540
|
+
(CHECK_DOUBLE_ARENA_INFO(THING_TO_ARENA(thing)), \
|
541
|
+
JS_ASSERT(((jsuword) (thing) & GC_ARENA_MASK) < \
|
542
|
+
DOUBLES_ARENA_BITMAP_OFFSET), \
|
543
|
+
((uint32) (((jsuword) (thing) & GC_ARENA_MASK) / sizeof(jsdouble))))
|
544
|
+
|
545
|
+
static void
|
546
|
+
ClearDoubleArenaFlags(JSGCArenaInfo *a)
|
547
|
+
{
|
548
|
+
jsbitmap *bitmap, mask;
|
549
|
+
uintN nused;
|
550
|
+
|
551
|
+
/*
|
552
|
+
* When some high bits in the last byte of the double occupation bitmap
|
553
|
+
* are unused, we must set them. Otherwise RefillDoubleFreeList will
|
554
|
+
* assume that they corresponds to some free cells and tries to allocate
|
555
|
+
* them.
|
556
|
+
*
|
557
|
+
* Note that the code works correctly with UNUSED_DOUBLE_BITMAP_BITS == 0.
|
558
|
+
*/
|
559
|
+
bitmap = DOUBLE_ARENA_BITMAP(a);
|
560
|
+
memset(bitmap, 0, (DOUBLES_ARENA_BITMAP_WORDS - 1) * sizeof *bitmap);
|
561
|
+
mask = ((jsbitmap) 1 << UNUSED_DOUBLE_BITMAP_BITS) - 1;
|
562
|
+
nused = JS_BITS_PER_WORD - UNUSED_DOUBLE_BITMAP_BITS;
|
563
|
+
bitmap[DOUBLES_ARENA_BITMAP_WORDS - 1] = mask << nused;
|
564
|
+
}
|
565
|
+
|
566
|
+
static JS_INLINE JSBool
|
567
|
+
IsMarkedDouble(JSGCArenaInfo *a, uint32 index)
|
568
|
+
{
|
569
|
+
jsbitmap *bitmap;
|
570
|
+
|
571
|
+
JS_ASSERT(a->u.hasMarkedDoubles);
|
572
|
+
bitmap = DOUBLE_ARENA_BITMAP(a);
|
573
|
+
return JS_TEST_BIT(bitmap, index);
|
574
|
+
}
|
575
|
+
|
576
|
+
/*
|
577
|
+
* JSRuntime.gcDoubleArenaList.nextDoubleFlags points either to:
|
578
|
+
*
|
579
|
+
* 1. The next byte in the bitmap area for doubles to check for unmarked
|
580
|
+
* (or free) doubles.
|
581
|
+
* 2. Or to the end of the bitmap area when all GC cells of the arena are
|
582
|
+
* allocated.
|
583
|
+
* 3. Or to a special sentinel value indicating that there are no arenas
|
584
|
+
* to check for unmarked doubles.
|
585
|
+
*
|
586
|
+
* We set the sentinel to ARENA_INFO_OFFSET so the single check
|
587
|
+
*
|
588
|
+
* ((jsuword) nextDoubleFlags & GC_ARENA_MASK) == ARENA_INFO_OFFSET
|
589
|
+
*
|
590
|
+
* will cover both the second and the third cases.
|
591
|
+
*/
|
592
|
+
#define DOUBLE_BITMAP_SENTINEL ((jsbitmap *) ARENA_INFO_OFFSET)
|
593
|
+
|
594
|
+
#ifdef JS_THREADSAFE
|
595
|
+
/*
|
596
|
+
* The maximum number of things to put on the local free list by taking
|
597
|
+
* several things from the global free list or from the tail of the last
|
598
|
+
* allocated arena to amortize the cost of rt->gcLock.
|
599
|
+
*
|
600
|
+
* We use number 8 based on benchmarks from bug 312238.
|
601
|
+
*/
|
602
|
+
#define MAX_THREAD_LOCAL_THINGS 8
|
603
|
+
|
604
|
+
#endif
|
605
|
+
|
606
|
+
JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
|
607
|
+
|
608
|
+
JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
|
609
|
+
JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
|
610
|
+
|
611
|
+
/* We want to use all the available GC thing space for object's slots. */
|
612
|
+
JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(JSGCThing) == 0);
|
613
|
+
|
614
|
+
/*
|
615
|
+
* Ensure that JSObject is allocated from a different GC-list rather than
|
616
|
+
* jsdouble and JSString so we can easily finalize JSObject before these 2
|
617
|
+
* types of GC things. See comments in js_GC.
|
618
|
+
*/
|
619
|
+
JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(JSString)) !=
|
620
|
+
GC_FREELIST_INDEX(sizeof(JSObject)));
|
621
|
+
JS_STATIC_ASSERT(GC_FREELIST_INDEX(sizeof(jsdouble)) !=
|
622
|
+
GC_FREELIST_INDEX(sizeof(JSObject)));
|
623
|
+
|
624
|
+
/*
|
625
|
+
* JSPtrTable capacity growth descriptor. The table grows by powers of two
|
626
|
+
* starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
|
627
|
+
* growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
|
628
|
+
*/
|
629
|
+
typedef struct JSPtrTableInfo {
|
630
|
+
uint16 minCapacity;
|
631
|
+
uint16 linearGrowthThreshold;
|
632
|
+
} JSPtrTableInfo;
|
633
|
+
|
634
|
+
#define GC_ITERATOR_TABLE_MIN 4
|
635
|
+
#define GC_ITERATOR_TABLE_LINEAR 1024
|
636
|
+
|
637
|
+
static const JSPtrTableInfo iteratorTableInfo = {
|
638
|
+
GC_ITERATOR_TABLE_MIN,
|
639
|
+
GC_ITERATOR_TABLE_LINEAR
|
640
|
+
};
|
641
|
+
|
642
|
+
/* Calculate table capacity based on the current value of JSPtrTable.count. */
|
643
|
+
static size_t
|
644
|
+
PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
|
645
|
+
{
|
646
|
+
size_t linear, log, capacity;
|
647
|
+
|
648
|
+
linear = info->linearGrowthThreshold;
|
649
|
+
JS_ASSERT(info->minCapacity <= linear);
|
650
|
+
|
651
|
+
if (count == 0) {
|
652
|
+
capacity = 0;
|
653
|
+
} else if (count < linear) {
|
654
|
+
log = JS_CEILING_LOG2W(count);
|
655
|
+
JS_ASSERT(log != JS_BITS_PER_WORD);
|
656
|
+
capacity = (size_t)1 << log;
|
657
|
+
if (capacity < info->minCapacity)
|
658
|
+
capacity = info->minCapacity;
|
659
|
+
} else {
|
660
|
+
capacity = JS_ROUNDUP(count, linear);
|
661
|
+
}
|
662
|
+
|
663
|
+
JS_ASSERT(capacity >= count);
|
664
|
+
return capacity;
|
665
|
+
}
|
666
|
+
|
667
|
+
static void
|
668
|
+
FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
|
669
|
+
{
|
670
|
+
if (table->array) {
|
671
|
+
JS_ASSERT(table->count > 0);
|
672
|
+
free(table->array);
|
673
|
+
table->array = NULL;
|
674
|
+
table->count = 0;
|
675
|
+
}
|
676
|
+
JS_ASSERT(table->count == 0);
|
677
|
+
}
|
678
|
+
|
679
|
+
static JSBool
|
680
|
+
AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
|
681
|
+
void *ptr)
|
682
|
+
{
|
683
|
+
size_t count, capacity;
|
684
|
+
void **array;
|
685
|
+
|
686
|
+
count = table->count;
|
687
|
+
capacity = PtrTableCapacity(count, info);
|
688
|
+
|
689
|
+
if (count == capacity) {
|
690
|
+
if (capacity < info->minCapacity) {
|
691
|
+
JS_ASSERT(capacity == 0);
|
692
|
+
JS_ASSERT(!table->array);
|
693
|
+
capacity = info->minCapacity;
|
694
|
+
} else {
|
695
|
+
/*
|
696
|
+
* Simplify the overflow detection assuming pointer is bigger
|
697
|
+
* than byte.
|
698
|
+
*/
|
699
|
+
JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
|
700
|
+
capacity = (capacity < info->linearGrowthThreshold)
|
701
|
+
? 2 * capacity
|
702
|
+
: capacity + info->linearGrowthThreshold;
|
703
|
+
if (capacity > (size_t)-1 / sizeof table->array[0])
|
704
|
+
goto bad;
|
705
|
+
}
|
706
|
+
array = (void **) realloc(table->array,
|
707
|
+
capacity * sizeof table->array[0]);
|
708
|
+
if (!array)
|
709
|
+
goto bad;
|
710
|
+
#ifdef DEBUG
|
711
|
+
memset(array + count, JS_FREE_PATTERN,
|
712
|
+
(capacity - count) * sizeof table->array[0]);
|
713
|
+
#endif
|
714
|
+
table->array = array;
|
715
|
+
}
|
716
|
+
|
717
|
+
table->array[count] = ptr;
|
718
|
+
table->count = count + 1;
|
719
|
+
|
720
|
+
return JS_TRUE;
|
721
|
+
|
722
|
+
bad:
|
723
|
+
JS_ReportOutOfMemory(cx);
|
724
|
+
return JS_FALSE;
|
725
|
+
}
|
726
|
+
|
727
|
+
static void
|
728
|
+
ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
|
729
|
+
size_t newCount)
|
730
|
+
{
|
731
|
+
size_t oldCapacity, capacity;
|
732
|
+
void **array;
|
733
|
+
|
734
|
+
JS_ASSERT(newCount <= table->count);
|
735
|
+
if (newCount == table->count)
|
736
|
+
return;
|
737
|
+
|
738
|
+
oldCapacity = PtrTableCapacity(table->count, info);
|
739
|
+
table->count = newCount;
|
740
|
+
capacity = PtrTableCapacity(newCount, info);
|
741
|
+
|
742
|
+
if (oldCapacity != capacity) {
|
743
|
+
array = table->array;
|
744
|
+
JS_ASSERT(array);
|
745
|
+
if (capacity == 0) {
|
746
|
+
free(array);
|
747
|
+
table->array = NULL;
|
748
|
+
return;
|
749
|
+
}
|
750
|
+
array = (void **) realloc(array, capacity * sizeof array[0]);
|
751
|
+
if (array)
|
752
|
+
table->array = array;
|
753
|
+
}
|
754
|
+
#ifdef DEBUG
|
755
|
+
memset(table->array + newCount, JS_FREE_PATTERN,
|
756
|
+
(capacity - newCount) * sizeof table->array[0]);
|
757
|
+
#endif
|
758
|
+
}
|
759
|
+
|
760
|
+
#ifdef JS_GCMETER
|
761
|
+
# define METER(x) ((void) (x))
|
762
|
+
# define METER_IF(condition, x) ((void) ((condition) && (x)))
|
763
|
+
#else
|
764
|
+
# define METER(x) ((void) 0)
|
765
|
+
# define METER_IF(condition, x) ((void) 0)
|
766
|
+
#endif
|
767
|
+
|
768
|
+
#define METER_UPDATE_MAX(maxLval, rval) \
|
769
|
+
METER_IF((maxLval) < (rval), (maxLval) = (rval))
|
770
|
+
|
771
|
+
#if JS_GC_USE_MMAP || !HAS_POSIX_MEMALIGN
|
772
|
+
|
773
|
+
/*
|
774
|
+
* For chunks allocated via over-sized malloc, get a pointer to store the gap
|
775
|
+
* between the malloc's result and the first arena in the chunk.
|
776
|
+
*/
|
777
|
+
static uint32 *
|
778
|
+
GetMallocedChunkGapPtr(jsuword chunk)
|
779
|
+
{
|
780
|
+
JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
|
781
|
+
|
782
|
+
/* Use the memory after the chunk, see NewGCChunk for details. */
|
783
|
+
return (uint32 *) (chunk + (js_gcArenasPerChunk << GC_ARENA_SHIFT));
|
784
|
+
}
|
785
|
+
|
786
|
+
#endif
|
787
|
+
|
788
|
+
static jsuword
|
789
|
+
NewGCChunk(void)
|
790
|
+
{
|
791
|
+
void *p;
|
792
|
+
|
793
|
+
#if JS_GC_USE_MMAP
|
794
|
+
if (js_gcUseMmap) {
|
795
|
+
# if defined(XP_WIN)
|
796
|
+
p = VirtualAlloc(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
|
797
|
+
MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
|
798
|
+
return (jsuword) p;
|
799
|
+
# else
|
800
|
+
p = mmap(NULL, js_gcArenasPerChunk << GC_ARENA_SHIFT,
|
801
|
+
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
802
|
+
return (p == MAP_FAILED) ? 0 : (jsuword) p;
|
803
|
+
# endif
|
804
|
+
}
|
805
|
+
#endif
|
806
|
+
|
807
|
+
#if HAS_POSIX_MEMALIGN
|
808
|
+
if (0 != posix_memalign(&p, GC_ARENA_SIZE,
|
809
|
+
GC_ARENA_SIZE * js_gcArenasPerChunk -
|
810
|
+
JS_GC_ARENA_PAD)) {
|
811
|
+
return 0;
|
812
|
+
}
|
813
|
+
return (jsuword) p;
|
814
|
+
#else
|
815
|
+
/*
|
816
|
+
* Implement chunk allocation using oversized malloc if mmap and
|
817
|
+
* posix_memalign are not available.
|
818
|
+
*
|
819
|
+
* Since malloc allocates pointers aligned on the word boundary, to get
|
820
|
+
* js_gcArenasPerChunk aligned arenas, we need to malloc only
|
821
|
+
*
|
822
|
+
* ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT) - sizeof(size_t)
|
823
|
+
*
|
824
|
+
* bytes. But since we stores the gap between the malloced pointer and the
|
825
|
+
* first arena in the chunk after the chunk, we need to ask for
|
826
|
+
*
|
827
|
+
* ((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT)
|
828
|
+
*
|
829
|
+
* bytes to ensure that we always have room to store the gap.
|
830
|
+
*/
|
831
|
+
p = malloc((js_gcArenasPerChunk + 1) << GC_ARENA_SHIFT);
|
832
|
+
if (!p)
|
833
|
+
return 0;
|
834
|
+
|
835
|
+
{
|
836
|
+
jsuword chunk;
|
837
|
+
|
838
|
+
chunk = ((jsuword) p + GC_ARENA_MASK) & ~GC_ARENA_MASK;
|
839
|
+
*GetMallocedChunkGapPtr(chunk) = (uint32) (chunk - (jsuword) p);
|
840
|
+
return chunk;
|
841
|
+
}
|
842
|
+
#endif
|
843
|
+
}
|
844
|
+
|
845
|
+
static void
|
846
|
+
DestroyGCChunk(jsuword chunk)
|
847
|
+
{
|
848
|
+
JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
|
849
|
+
#if JS_GC_USE_MMAP
|
850
|
+
if (js_gcUseMmap) {
|
851
|
+
# if defined(XP_WIN)
|
852
|
+
VirtualFree((void *) chunk, 0, MEM_RELEASE);
|
853
|
+
# else
|
854
|
+
munmap((void *) chunk, js_gcArenasPerChunk << GC_ARENA_SHIFT);
|
855
|
+
# endif
|
856
|
+
return;
|
857
|
+
}
|
858
|
+
#endif
|
859
|
+
|
860
|
+
#if HAS_POSIX_MEMALIGN
|
861
|
+
free((void *) chunk);
|
862
|
+
#else
|
863
|
+
/* See comments in NewGCChunk. */
|
864
|
+
JS_ASSERT(*GetMallocedChunkGapPtr(chunk) < GC_ARENA_SIZE);
|
865
|
+
free((void *) (chunk - *GetMallocedChunkGapPtr(chunk)));
|
866
|
+
#endif
|
867
|
+
}
|
868
|
+
|
869
|
+
#if CHUNKED_ARENA_ALLOCATION
|
870
|
+
|
871
|
+
static void
|
872
|
+
AddChunkToList(JSRuntime *rt, JSGCChunkInfo *ci)
|
873
|
+
{
|
874
|
+
ci->prevp = &rt->gcChunkList;
|
875
|
+
ci->next = rt->gcChunkList;
|
876
|
+
if (rt->gcChunkList) {
|
877
|
+
JS_ASSERT(rt->gcChunkList->prevp == &rt->gcChunkList);
|
878
|
+
rt->gcChunkList->prevp = &ci->next;
|
879
|
+
}
|
880
|
+
rt->gcChunkList = ci;
|
881
|
+
}
|
882
|
+
|
883
|
+
static void
|
884
|
+
RemoveChunkFromList(JSRuntime *rt, JSGCChunkInfo *ci)
|
885
|
+
{
|
886
|
+
*ci->prevp = ci->next;
|
887
|
+
if (ci->next) {
|
888
|
+
JS_ASSERT(ci->next->prevp == &ci->next);
|
889
|
+
ci->next->prevp = ci->prevp;
|
890
|
+
}
|
891
|
+
}
|
892
|
+
|
893
|
+
#endif
|
894
|
+
|
895
|
+
static JSGCArenaInfo *
|
896
|
+
NewGCArena(JSRuntime *rt)
|
897
|
+
{
|
898
|
+
jsuword chunk;
|
899
|
+
JSGCArenaInfo *a;
|
900
|
+
|
901
|
+
if (rt->gcBytes >= rt->gcMaxBytes)
|
902
|
+
return NULL;
|
903
|
+
|
904
|
+
#if CHUNKED_ARENA_ALLOCATION
|
905
|
+
if (js_gcArenasPerChunk == 1) {
|
906
|
+
#endif
|
907
|
+
chunk = NewGCChunk();
|
908
|
+
if (chunk == 0)
|
909
|
+
return NULL;
|
910
|
+
a = ARENA_START_TO_INFO(chunk);
|
911
|
+
#if CHUNKED_ARENA_ALLOCATION
|
912
|
+
} else {
|
913
|
+
JSGCChunkInfo *ci;
|
914
|
+
uint32 i;
|
915
|
+
JSGCArenaInfo *aprev;
|
916
|
+
|
917
|
+
ci = rt->gcChunkList;
|
918
|
+
if (!ci) {
|
919
|
+
chunk = NewGCChunk();
|
920
|
+
if (chunk == 0)
|
921
|
+
return NULL;
|
922
|
+
JS_ASSERT((chunk & GC_ARENA_MASK) == 0);
|
923
|
+
a = GET_ARENA_INFO(chunk, 0);
|
924
|
+
a->firstArena = JS_TRUE;
|
925
|
+
a->arenaIndex = 0;
|
926
|
+
aprev = NULL;
|
927
|
+
i = 0;
|
928
|
+
do {
|
929
|
+
a->prev = aprev;
|
930
|
+
aprev = a;
|
931
|
+
++i;
|
932
|
+
a = GET_ARENA_INFO(chunk, i);
|
933
|
+
a->firstArena = JS_FALSE;
|
934
|
+
a->arenaIndex = i;
|
935
|
+
} while (i != js_gcArenasPerChunk - 1);
|
936
|
+
ci = GET_CHUNK_INFO(chunk, 0);
|
937
|
+
ci->lastFreeArena = aprev;
|
938
|
+
ci->numFreeArenas = js_gcArenasPerChunk - 1;
|
939
|
+
AddChunkToList(rt, ci);
|
940
|
+
} else {
|
941
|
+
JS_ASSERT(ci->prevp == &rt->gcChunkList);
|
942
|
+
a = ci->lastFreeArena;
|
943
|
+
aprev = a->prev;
|
944
|
+
if (!aprev) {
|
945
|
+
JS_ASSERT(ci->numFreeArenas == 1);
|
946
|
+
JS_ASSERT(ARENA_INFO_TO_START(a) == (jsuword) ci);
|
947
|
+
RemoveChunkFromList(rt, ci);
|
948
|
+
chunk = GET_ARENA_CHUNK(a, GET_ARENA_INDEX(a));
|
949
|
+
SET_CHUNK_INFO_INDEX(chunk, NO_FREE_ARENAS);
|
950
|
+
} else {
|
951
|
+
JS_ASSERT(ci->numFreeArenas >= 2);
|
952
|
+
JS_ASSERT(ARENA_INFO_TO_START(a) != (jsuword) ci);
|
953
|
+
ci->lastFreeArena = aprev;
|
954
|
+
ci->numFreeArenas--;
|
955
|
+
}
|
956
|
+
}
|
957
|
+
}
|
958
|
+
#endif
|
959
|
+
|
960
|
+
rt->gcBytes += GC_ARENA_SIZE;
|
961
|
+
a->prevUntracedPage = 0;
|
962
|
+
memset(&a->u, 0, sizeof(a->u));
|
963
|
+
|
964
|
+
return a;
|
965
|
+
}
|
966
|
+
|
967
|
+
static void
|
968
|
+
DestroyGCArenas(JSRuntime *rt, JSGCArenaInfo *last)
|
969
|
+
{
|
970
|
+
JSGCArenaInfo *a;
|
971
|
+
|
972
|
+
while (last) {
|
973
|
+
a = last;
|
974
|
+
last = last->prev;
|
975
|
+
|
976
|
+
METER(rt->gcStats.afree++);
|
977
|
+
JS_ASSERT(rt->gcBytes >= GC_ARENA_SIZE);
|
978
|
+
rt->gcBytes -= GC_ARENA_SIZE;
|
979
|
+
|
980
|
+
#if CHUNKED_ARENA_ALLOCATION
|
981
|
+
if (js_gcArenasPerChunk == 1) {
|
982
|
+
#endif
|
983
|
+
DestroyGCChunk(ARENA_INFO_TO_START(a));
|
984
|
+
#if CHUNKED_ARENA_ALLOCATION
|
985
|
+
} else {
|
986
|
+
uint32 arenaIndex;
|
987
|
+
jsuword chunk;
|
988
|
+
uint32 chunkInfoIndex;
|
989
|
+
JSGCChunkInfo *ci;
|
990
|
+
# ifdef DEBUG
|
991
|
+
jsuword firstArena;
|
992
|
+
|
993
|
+
firstArena = a->firstArena;
|
994
|
+
arenaIndex = a->arenaIndex;
|
995
|
+
memset((void *) ARENA_INFO_TO_START(a), JS_FREE_PATTERN,
|
996
|
+
GC_ARENA_SIZE - JS_GC_ARENA_PAD);
|
997
|
+
a->firstArena = firstArena;
|
998
|
+
a->arenaIndex = arenaIndex;
|
999
|
+
# endif
|
1000
|
+
arenaIndex = GET_ARENA_INDEX(a);
|
1001
|
+
chunk = GET_ARENA_CHUNK(a, arenaIndex);
|
1002
|
+
chunkInfoIndex = GET_CHUNK_INFO_INDEX(chunk);
|
1003
|
+
if (chunkInfoIndex == NO_FREE_ARENAS) {
|
1004
|
+
chunkInfoIndex = arenaIndex;
|
1005
|
+
SET_CHUNK_INFO_INDEX(chunk, arenaIndex);
|
1006
|
+
ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
|
1007
|
+
a->prev = NULL;
|
1008
|
+
ci->lastFreeArena = a;
|
1009
|
+
ci->numFreeArenas = 1;
|
1010
|
+
AddChunkToList(rt, ci);
|
1011
|
+
} else {
|
1012
|
+
JS_ASSERT(chunkInfoIndex != arenaIndex);
|
1013
|
+
ci = GET_CHUNK_INFO(chunk, chunkInfoIndex);
|
1014
|
+
JS_ASSERT(ci->numFreeArenas != 0);
|
1015
|
+
JS_ASSERT(ci->lastFreeArena);
|
1016
|
+
JS_ASSERT(a != ci->lastFreeArena);
|
1017
|
+
if (ci->numFreeArenas == js_gcArenasPerChunk - 1) {
|
1018
|
+
RemoveChunkFromList(rt, ci);
|
1019
|
+
DestroyGCChunk(chunk);
|
1020
|
+
} else {
|
1021
|
+
++ci->numFreeArenas;
|
1022
|
+
a->prev = ci->lastFreeArena;
|
1023
|
+
ci->lastFreeArena = a;
|
1024
|
+
}
|
1025
|
+
}
|
1026
|
+
}
|
1027
|
+
# endif
|
1028
|
+
}
|
1029
|
+
}
|
1030
|
+
|
1031
|
+
static void
|
1032
|
+
InitGCArenaLists(JSRuntime *rt)
|
1033
|
+
{
|
1034
|
+
uintN i, thingSize;
|
1035
|
+
JSGCArenaList *arenaList;
|
1036
|
+
|
1037
|
+
for (i = 0; i < GC_NUM_FREELISTS; i++) {
|
1038
|
+
arenaList = &rt->gcArenaList[i];
|
1039
|
+
thingSize = GC_FREELIST_NBYTES(i);
|
1040
|
+
JS_ASSERT((size_t)(uint16)thingSize == thingSize);
|
1041
|
+
arenaList->last = NULL;
|
1042
|
+
arenaList->lastCount = (uint16) THINGS_PER_ARENA(thingSize);
|
1043
|
+
arenaList->thingSize = (uint16) thingSize;
|
1044
|
+
arenaList->freeList = NULL;
|
1045
|
+
}
|
1046
|
+
rt->gcDoubleArenaList.first = NULL;
|
1047
|
+
rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
|
1048
|
+
}
|
1049
|
+
|
1050
|
+
static void
|
1051
|
+
FinishGCArenaLists(JSRuntime *rt)
|
1052
|
+
{
|
1053
|
+
uintN i;
|
1054
|
+
JSGCArenaList *arenaList;
|
1055
|
+
|
1056
|
+
for (i = 0; i < GC_NUM_FREELISTS; i++) {
|
1057
|
+
arenaList = &rt->gcArenaList[i];
|
1058
|
+
DestroyGCArenas(rt, arenaList->last);
|
1059
|
+
arenaList->last = NULL;
|
1060
|
+
arenaList->lastCount = THINGS_PER_ARENA(arenaList->thingSize);
|
1061
|
+
arenaList->freeList = NULL;
|
1062
|
+
}
|
1063
|
+
DestroyGCArenas(rt, rt->gcDoubleArenaList.first);
|
1064
|
+
rt->gcDoubleArenaList.first = NULL;
|
1065
|
+
rt->gcDoubleArenaList.nextDoubleFlags = DOUBLE_BITMAP_SENTINEL;
|
1066
|
+
|
1067
|
+
rt->gcBytes = 0;
|
1068
|
+
JS_ASSERT(rt->gcChunkList == 0);
|
1069
|
+
}
|
1070
|
+
|
1071
|
+
/*
|
1072
|
+
* This function must not be called when thing is jsdouble.
|
1073
|
+
*/
|
1074
|
+
static uint8 *
|
1075
|
+
GetGCThingFlags(void *thing)
|
1076
|
+
{
|
1077
|
+
JSGCArenaInfo *a;
|
1078
|
+
uint32 index;
|
1079
|
+
|
1080
|
+
a = THING_TO_ARENA(thing);
|
1081
|
+
index = THING_TO_INDEX(thing, a->list->thingSize);
|
1082
|
+
return THING_FLAGP(a, index);
|
1083
|
+
}
|
1084
|
+
|
1085
|
+
/*
|
1086
|
+
* This function returns null when thing is jsdouble.
|
1087
|
+
*/
|
1088
|
+
static uint8 *
|
1089
|
+
GetGCThingFlagsOrNull(void *thing)
|
1090
|
+
{
|
1091
|
+
JSGCArenaInfo *a;
|
1092
|
+
uint32 index;
|
1093
|
+
|
1094
|
+
a = THING_TO_ARENA(thing);
|
1095
|
+
if (!a->list)
|
1096
|
+
return NULL;
|
1097
|
+
index = THING_TO_INDEX(thing, a->list->thingSize);
|
1098
|
+
return THING_FLAGP(a, index);
|
1099
|
+
}
|
1100
|
+
|
1101
|
+
intN
|
1102
|
+
js_GetExternalStringGCType(JSString *str)
|
1103
|
+
{
|
1104
|
+
uintN type;
|
1105
|
+
|
1106
|
+
type = (uintN) *GetGCThingFlags(str) & GCF_TYPEMASK;
|
1107
|
+
JS_ASSERT(type == GCX_STRING || type >= GCX_EXTERNAL_STRING);
|
1108
|
+
return (type == GCX_STRING) ? -1 : (intN) (type - GCX_EXTERNAL_STRING);
|
1109
|
+
}
|
1110
|
+
|
1111
|
+
static uint32
|
1112
|
+
MapGCFlagsToTraceKind(uintN flags)
|
1113
|
+
{
|
1114
|
+
uint32 type;
|
1115
|
+
|
1116
|
+
type = flags & GCF_TYPEMASK;
|
1117
|
+
JS_ASSERT(type != GCX_DOUBLE);
|
1118
|
+
JS_ASSERT(type < GCX_NTYPES);
|
1119
|
+
return (type < GCX_EXTERNAL_STRING) ? type : JSTRACE_STRING;
|
1120
|
+
}
|
1121
|
+
|
1122
|
+
JS_FRIEND_API(uint32)
|
1123
|
+
js_GetGCThingTraceKind(void *thing)
|
1124
|
+
{
|
1125
|
+
JSGCArenaInfo *a;
|
1126
|
+
uint32 index;
|
1127
|
+
|
1128
|
+
a = THING_TO_ARENA(thing);
|
1129
|
+
if (!a->list)
|
1130
|
+
return JSTRACE_DOUBLE;
|
1131
|
+
|
1132
|
+
index = THING_TO_INDEX(thing, a->list->thingSize);
|
1133
|
+
return MapGCFlagsToTraceKind(*THING_FLAGP(a, index));
|
1134
|
+
}
|
1135
|
+
|
1136
|
+
JSRuntime*
|
1137
|
+
js_GetGCStringRuntime(JSString *str)
|
1138
|
+
{
|
1139
|
+
JSGCArenaList *list;
|
1140
|
+
|
1141
|
+
list = THING_TO_ARENA(str)->list;
|
1142
|
+
|
1143
|
+
JS_ASSERT(list->thingSize == sizeof(JSGCThing));
|
1144
|
+
JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
|
1145
|
+
|
1146
|
+
return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
|
1147
|
+
}
|
1148
|
+
|
1149
|
+
JSBool
|
1150
|
+
js_IsAboutToBeFinalized(JSContext *cx, void *thing)
|
1151
|
+
{
|
1152
|
+
JSGCArenaInfo *a;
|
1153
|
+
uint32 index, flags;
|
1154
|
+
|
1155
|
+
a = THING_TO_ARENA(thing);
|
1156
|
+
if (!a->list) {
|
1157
|
+
/*
|
1158
|
+
* Check if arena has no marked doubles. In that case the bitmap with
|
1159
|
+
* the mark flags contains all garbage as it is initialized only when
|
1160
|
+
* marking the first double in the arena.
|
1161
|
+
*/
|
1162
|
+
if (!a->u.hasMarkedDoubles)
|
1163
|
+
return JS_TRUE;
|
1164
|
+
index = DOUBLE_THING_TO_INDEX(thing);
|
1165
|
+
return !IsMarkedDouble(a, index);
|
1166
|
+
}
|
1167
|
+
index = THING_TO_INDEX(thing, a->list->thingSize);
|
1168
|
+
flags = *THING_FLAGP(a, index);
|
1169
|
+
return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
|
1170
|
+
}
|
1171
|
+
|
1172
|
+
/* This is compatible with JSDHashEntryStub. */
|
1173
|
+
typedef struct JSGCRootHashEntry {
|
1174
|
+
JSDHashEntryHdr hdr;
|
1175
|
+
void *root;
|
1176
|
+
const char *name;
|
1177
|
+
} JSGCRootHashEntry;
|
1178
|
+
|
1179
|
+
/* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
|
1180
|
+
#define GC_ROOTS_SIZE 256
|
1181
|
+
|
1182
|
+
#if CHUNKED_ARENA_ALLOCATION
|
1183
|
+
|
1184
|
+
/*
|
1185
|
+
* For a CPU with extremely large pages using them for GC things wastes
|
1186
|
+
* too much memory.
|
1187
|
+
*/
|
1188
|
+
# define GC_ARENAS_PER_CPU_PAGE_LIMIT JS_BIT(18 - GC_ARENA_SHIFT)
|
1189
|
+
|
1190
|
+
JS_STATIC_ASSERT(GC_ARENAS_PER_CPU_PAGE_LIMIT <= NO_FREE_ARENAS);
|
1191
|
+
|
1192
|
+
#endif
|
1193
|
+
|
1194
|
+
JSBool
|
1195
|
+
js_InitGC(JSRuntime *rt, uint32 maxbytes)
|
1196
|
+
{
|
1197
|
+
#if JS_GC_USE_MMAP
|
1198
|
+
if (js_gcArenasPerChunk == 0) {
|
1199
|
+
size_t cpuPageSize, arenasPerPage;
|
1200
|
+
# if defined(XP_WIN)
|
1201
|
+
SYSTEM_INFO si;
|
1202
|
+
|
1203
|
+
GetSystemInfo(&si);
|
1204
|
+
cpuPageSize = si.dwPageSize;
|
1205
|
+
|
1206
|
+
# elif defined(XP_UNIX) || defined(XP_BEOS)
|
1207
|
+
cpuPageSize = (size_t) sysconf(_SC_PAGESIZE);
|
1208
|
+
# else
|
1209
|
+
# error "Not implemented"
|
1210
|
+
# endif
|
1211
|
+
/* cpuPageSize is a power of 2. */
|
1212
|
+
JS_ASSERT((cpuPageSize & (cpuPageSize - 1)) == 0);
|
1213
|
+
arenasPerPage = cpuPageSize >> GC_ARENA_SHIFT;
|
1214
|
+
#ifdef DEBUG
|
1215
|
+
if (arenasPerPage == 0) {
|
1216
|
+
fprintf(stderr,
|
1217
|
+
"JS engine warning: the size of the CPU page, %u bytes, is too low to use\n"
|
1218
|
+
"paged allocation for the garbage collector. Please report this.\n",
|
1219
|
+
(unsigned) cpuPageSize);
|
1220
|
+
}
|
1221
|
+
#endif
|
1222
|
+
if (arenasPerPage - 1 <= (size_t) (GC_ARENAS_PER_CPU_PAGE_LIMIT - 1)) {
|
1223
|
+
/*
|
1224
|
+
* Use at least 4 GC arenas per paged allocation chunk to minimize
|
1225
|
+
* the overhead of mmap/VirtualAlloc.
|
1226
|
+
*/
|
1227
|
+
js_gcUseMmap = JS_TRUE;
|
1228
|
+
js_gcArenasPerChunk = JS_MAX((uint32) arenasPerPage, 4);
|
1229
|
+
} else {
|
1230
|
+
js_gcUseMmap = JS_FALSE;
|
1231
|
+
js_gcArenasPerChunk = 7;
|
1232
|
+
}
|
1233
|
+
}
|
1234
|
+
JS_ASSERT(1 <= js_gcArenasPerChunk &&
|
1235
|
+
js_gcArenasPerChunk <= NO_FREE_ARENAS);
|
1236
|
+
#endif
|
1237
|
+
|
1238
|
+
InitGCArenaLists(rt);
|
1239
|
+
if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
|
1240
|
+
sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
|
1241
|
+
rt->gcRootsHash.ops = NULL;
|
1242
|
+
return JS_FALSE;
|
1243
|
+
}
|
1244
|
+
rt->gcLocksHash = NULL; /* create lazily */
|
1245
|
+
|
1246
|
+
/*
|
1247
|
+
* Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
|
1248
|
+
* for default backward API compatibility.
|
1249
|
+
*/
|
1250
|
+
rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
|
1251
|
+
rt->gcStackPoolLifespan = 30000;
|
1252
|
+
|
1253
|
+
METER(memset(&rt->gcStats, 0, sizeof rt->gcStats));
|
1254
|
+
return JS_TRUE;
|
1255
|
+
}
|
1256
|
+
|
1257
|
+
#ifdef JS_GCMETER
|
1258
|
+
|
1259
|
+
static void
|
1260
|
+
UpdateArenaStats(JSGCArenaStats *st, uint32 nlivearenas, uint32 nkilledArenas,
|
1261
|
+
uint32 nthings)
|
1262
|
+
{
|
1263
|
+
size_t narenas;
|
1264
|
+
|
1265
|
+
narenas = nlivearenas + nkilledArenas;
|
1266
|
+
JS_ASSERT(narenas >= st->livearenas);
|
1267
|
+
|
1268
|
+
st->newarenas = narenas - st->livearenas;
|
1269
|
+
st->narenas = narenas;
|
1270
|
+
st->livearenas = nlivearenas;
|
1271
|
+
if (st->maxarenas < narenas)
|
1272
|
+
st->maxarenas = narenas;
|
1273
|
+
st->totalarenas += narenas;
|
1274
|
+
|
1275
|
+
st->nthings = nthings;
|
1276
|
+
if (st->maxthings < nthings)
|
1277
|
+
st->maxthings = nthings;
|
1278
|
+
st->totalthings += nthings;
|
1279
|
+
}
|
1280
|
+
|
1281
|
+
JS_FRIEND_API(void)
|
1282
|
+
js_DumpGCStats(JSRuntime *rt, FILE *fp)
|
1283
|
+
{
|
1284
|
+
int i;
|
1285
|
+
size_t sumArenas, sumTotalArenas;
|
1286
|
+
size_t sumThings, sumMaxThings;
|
1287
|
+
size_t sumThingSize, sumTotalThingSize;
|
1288
|
+
size_t sumArenaCapacity, sumTotalArenaCapacity;
|
1289
|
+
JSGCArenaStats *st;
|
1290
|
+
size_t thingSize, thingsPerArena;
|
1291
|
+
size_t sumAlloc, sumLocalAlloc, sumFail, sumRetry;
|
1292
|
+
|
1293
|
+
fprintf(fp, "\nGC allocation statistics:\n");
|
1294
|
+
|
1295
|
+
#define UL(x) ((unsigned long)(x))
|
1296
|
+
#define ULSTAT(x) UL(rt->gcStats.x)
|
1297
|
+
#define PERCENT(x,y) (100.0 * (double) (x) / (double) (y))
|
1298
|
+
|
1299
|
+
sumArenas = 0;
|
1300
|
+
sumTotalArenas = 0;
|
1301
|
+
sumThings = 0;
|
1302
|
+
sumMaxThings = 0;
|
1303
|
+
sumThingSize = 0;
|
1304
|
+
sumTotalThingSize = 0;
|
1305
|
+
sumArenaCapacity = 0;
|
1306
|
+
sumTotalArenaCapacity = 0;
|
1307
|
+
sumAlloc = 0;
|
1308
|
+
sumLocalAlloc = 0;
|
1309
|
+
sumFail = 0;
|
1310
|
+
sumRetry = 0;
|
1311
|
+
for (i = -1; i < (int) GC_NUM_FREELISTS; i++) {
|
1312
|
+
if (i == -1) {
|
1313
|
+
thingSize = sizeof(jsdouble);
|
1314
|
+
thingsPerArena = DOUBLES_PER_ARENA;
|
1315
|
+
st = &rt->gcStats.doubleArenaStats;
|
1316
|
+
fprintf(fp,
|
1317
|
+
"Arena list for double values (%lu doubles per arena):",
|
1318
|
+
UL(thingsPerArena));
|
1319
|
+
} else {
|
1320
|
+
thingSize = rt->gcArenaList[i].thingSize;
|
1321
|
+
thingsPerArena = THINGS_PER_ARENA(thingSize);
|
1322
|
+
st = &rt->gcStats.arenaStats[i];
|
1323
|
+
fprintf(fp,
|
1324
|
+
"Arena list %d (thing size %lu, %lu things per arena):",
|
1325
|
+
i, UL(GC_FREELIST_NBYTES(i)), UL(thingsPerArena));
|
1326
|
+
}
|
1327
|
+
if (st->maxarenas == 0) {
|
1328
|
+
fputs(" NEVER USED\n", fp);
|
1329
|
+
continue;
|
1330
|
+
}
|
1331
|
+
putc('\n', fp);
|
1332
|
+
fprintf(fp, " arenas before GC: %lu\n", UL(st->narenas));
|
1333
|
+
fprintf(fp, " new arenas before GC: %lu (%.1f%%)\n",
|
1334
|
+
UL(st->newarenas), PERCENT(st->newarenas, st->narenas));
|
1335
|
+
fprintf(fp, " arenas after GC: %lu (%.1f%%)\n",
|
1336
|
+
UL(st->livearenas), PERCENT(st->livearenas, st->narenas));
|
1337
|
+
fprintf(fp, " max arenas: %lu\n", UL(st->maxarenas));
|
1338
|
+
fprintf(fp, " things: %lu\n", UL(st->nthings));
|
1339
|
+
fprintf(fp, " GC cell utilization: %.1f%%\n",
|
1340
|
+
PERCENT(st->nthings, thingsPerArena * st->narenas));
|
1341
|
+
fprintf(fp, " average cell utilization: %.1f%%\n",
|
1342
|
+
PERCENT(st->totalthings, thingsPerArena * st->totalarenas));
|
1343
|
+
fprintf(fp, " max things: %lu\n", UL(st->maxthings));
|
1344
|
+
fprintf(fp, " alloc attempts: %lu\n", UL(st->alloc));
|
1345
|
+
fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
|
1346
|
+
UL(st->localalloc), PERCENT(st->localalloc, st->alloc));
|
1347
|
+
sumArenas += st->narenas;
|
1348
|
+
sumTotalArenas += st->totalarenas;
|
1349
|
+
sumThings += st->nthings;
|
1350
|
+
sumMaxThings += st->maxthings;
|
1351
|
+
sumThingSize += thingSize * st->nthings;
|
1352
|
+
sumTotalThingSize += thingSize * st->totalthings;
|
1353
|
+
sumArenaCapacity += thingSize * thingsPerArena * st->narenas;
|
1354
|
+
sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas;
|
1355
|
+
sumAlloc += st->alloc;
|
1356
|
+
sumLocalAlloc += st->localalloc;
|
1357
|
+
sumFail += st->fail;
|
1358
|
+
sumRetry += st->retry;
|
1359
|
+
}
|
1360
|
+
fprintf(fp, "TOTAL STATS:\n");
|
1361
|
+
fprintf(fp, " bytes allocated: %lu\n", UL(rt->gcBytes));
|
1362
|
+
fprintf(fp, " total GC arenas: %lu\n", UL(sumArenas));
|
1363
|
+
fprintf(fp, " total GC things: %lu\n", UL(sumThings));
|
1364
|
+
fprintf(fp, " max total GC things: %lu\n", UL(sumMaxThings));
|
1365
|
+
fprintf(fp, " GC cell utilization: %.1f%%\n",
|
1366
|
+
PERCENT(sumThingSize, sumArenaCapacity));
|
1367
|
+
fprintf(fp, " average cell utilization: %.1f%%\n",
|
1368
|
+
PERCENT(sumTotalThingSize, sumTotalArenaCapacity));
|
1369
|
+
fprintf(fp, "allocation retries after GC: %lu\n", UL(sumRetry));
|
1370
|
+
fprintf(fp, " alloc attempts: %lu\n", UL(sumAlloc));
|
1371
|
+
fprintf(fp, " alloc without locks: %1u (%.1f%%)\n",
|
1372
|
+
UL(sumLocalAlloc), PERCENT(sumLocalAlloc, sumAlloc));
|
1373
|
+
fprintf(fp, " allocation failures: %lu\n", UL(sumFail));
|
1374
|
+
fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
|
1375
|
+
fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
|
1376
|
+
fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
|
1377
|
+
fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
|
1378
|
+
fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
|
1379
|
+
fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
|
1380
|
+
fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
|
1381
|
+
fprintf(fp, " delayed tracing calls: %lu\n", ULSTAT(untraced));
|
1382
|
+
#ifdef DEBUG
|
1383
|
+
fprintf(fp, " max trace later count: %lu\n", ULSTAT(maxuntraced));
|
1384
|
+
#endif
|
1385
|
+
fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
|
1386
|
+
fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
|
1387
|
+
fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
|
1388
|
+
fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
|
1389
|
+
fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
|
1390
|
+
fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
|
1391
|
+
fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
|
1392
|
+
fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
|
1393
|
+
fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
|
1394
|
+
|
1395
|
+
#undef UL
|
1396
|
+
#undef ULSTAT
|
1397
|
+
#undef PERCENT
|
1398
|
+
|
1399
|
+
#ifdef JS_ARENAMETER
|
1400
|
+
JS_DumpArenaStats(fp);
|
1401
|
+
#endif
|
1402
|
+
}
|
1403
|
+
#endif
|
1404
|
+
|
1405
|
+
#ifdef DEBUG
|
1406
|
+
static void
|
1407
|
+
CheckLeakedRoots(JSRuntime *rt);
|
1408
|
+
#endif
|
1409
|
+
|
1410
|
+
void
|
1411
|
+
js_FinishGC(JSRuntime *rt)
|
1412
|
+
{
|
1413
|
+
#ifdef JS_ARENAMETER
|
1414
|
+
JS_DumpArenaStats(stdout);
|
1415
|
+
#endif
|
1416
|
+
#ifdef JS_GCMETER
|
1417
|
+
js_DumpGCStats(rt, stdout);
|
1418
|
+
#endif
|
1419
|
+
|
1420
|
+
FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
|
1421
|
+
FinishGCArenaLists(rt);
|
1422
|
+
|
1423
|
+
if (rt->gcRootsHash.ops) {
|
1424
|
+
#ifdef DEBUG
|
1425
|
+
CheckLeakedRoots(rt);
|
1426
|
+
#endif
|
1427
|
+
JS_DHashTableFinish(&rt->gcRootsHash);
|
1428
|
+
rt->gcRootsHash.ops = NULL;
|
1429
|
+
}
|
1430
|
+
if (rt->gcLocksHash) {
|
1431
|
+
JS_DHashTableDestroy(rt->gcLocksHash);
|
1432
|
+
rt->gcLocksHash = NULL;
|
1433
|
+
}
|
1434
|
+
}
|
1435
|
+
|
1436
|
+
JSBool
|
1437
|
+
js_AddRoot(JSContext *cx, void *rp, const char *name)
|
1438
|
+
{
|
1439
|
+
JSBool ok = js_AddRootRT(cx->runtime, rp, name);
|
1440
|
+
if (!ok)
|
1441
|
+
JS_ReportOutOfMemory(cx);
|
1442
|
+
return ok;
|
1443
|
+
}
|
1444
|
+
|
1445
|
+
JSBool
|
1446
|
+
js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
|
1447
|
+
{
|
1448
|
+
JSBool ok;
|
1449
|
+
JSGCRootHashEntry *rhe;
|
1450
|
+
|
1451
|
+
/*
|
1452
|
+
* Due to the long-standing, but now removed, use of rt->gcLock across the
|
1453
|
+
* bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
|
1454
|
+
* properly with a racing GC, without calling JS_AddRoot from a request.
|
1455
|
+
* We have to preserve API compatibility here, now that we avoid holding
|
1456
|
+
* rt->gcLock across the mark phase (including the root hashtable mark).
|
1457
|
+
*
|
1458
|
+
* If the GC is running and we're called on another thread, wait for this
|
1459
|
+
* GC activation to finish. We can safely wait here (in the case where we
|
1460
|
+
* are called within a request on another thread's context) without fear
|
1461
|
+
* of deadlock because the GC doesn't set rt->gcRunning until after it has
|
1462
|
+
* waited for all active requests to end.
|
1463
|
+
*/
|
1464
|
+
JS_LOCK_GC(rt);
|
1465
|
+
#ifdef JS_THREADSAFE
|
1466
|
+
JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
|
1467
|
+
if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
|
1468
|
+
do {
|
1469
|
+
JS_AWAIT_GC_DONE(rt);
|
1470
|
+
} while (rt->gcLevel > 0);
|
1471
|
+
}
|
1472
|
+
#endif
|
1473
|
+
rhe = (JSGCRootHashEntry *)
|
1474
|
+
JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_ADD);
|
1475
|
+
if (rhe) {
|
1476
|
+
rhe->root = rp;
|
1477
|
+
rhe->name = name;
|
1478
|
+
ok = JS_TRUE;
|
1479
|
+
} else {
|
1480
|
+
ok = JS_FALSE;
|
1481
|
+
}
|
1482
|
+
JS_UNLOCK_GC(rt);
|
1483
|
+
return ok;
|
1484
|
+
}
|
1485
|
+
|
1486
|
+
JSBool
|
1487
|
+
js_RemoveRoot(JSRuntime *rt, void *rp)
|
1488
|
+
{
|
1489
|
+
/*
|
1490
|
+
* Due to the JS_RemoveRootRT API, we may be called outside of a request.
|
1491
|
+
* Same synchronization drill as above in js_AddRoot.
|
1492
|
+
*/
|
1493
|
+
JS_LOCK_GC(rt);
|
1494
|
+
#ifdef JS_THREADSAFE
|
1495
|
+
JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
|
1496
|
+
if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
|
1497
|
+
do {
|
1498
|
+
JS_AWAIT_GC_DONE(rt);
|
1499
|
+
} while (rt->gcLevel > 0);
|
1500
|
+
}
|
1501
|
+
#endif
|
1502
|
+
(void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
|
1503
|
+
rt->gcPoke = JS_TRUE;
|
1504
|
+
JS_UNLOCK_GC(rt);
|
1505
|
+
return JS_TRUE;
|
1506
|
+
}
|
1507
|
+
|
1508
|
+
#ifdef DEBUG
|
1509
|
+
|
1510
|
+
JS_STATIC_DLL_CALLBACK(JSDHashOperator)
|
1511
|
+
js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
|
1512
|
+
{
|
1513
|
+
uint32 *leakedroots = (uint32 *)arg;
|
1514
|
+
JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
|
1515
|
+
|
1516
|
+
(*leakedroots)++;
|
1517
|
+
fprintf(stderr,
|
1518
|
+
"JS engine warning: leaking GC root \'%s\' at %p\n",
|
1519
|
+
rhe->name ? (char *)rhe->name : "", rhe->root);
|
1520
|
+
|
1521
|
+
return JS_DHASH_NEXT;
|
1522
|
+
}
|
1523
|
+
|
1524
|
+
static void
|
1525
|
+
CheckLeakedRoots(JSRuntime *rt)
|
1526
|
+
{
|
1527
|
+
uint32 leakedroots = 0;
|
1528
|
+
|
1529
|
+
/* Warn (but don't assert) debug builds of any remaining roots. */
|
1530
|
+
JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
|
1531
|
+
&leakedroots);
|
1532
|
+
if (leakedroots > 0) {
|
1533
|
+
if (leakedroots == 1) {
|
1534
|
+
fprintf(stderr,
|
1535
|
+
"JS engine warning: 1 GC root remains after destroying the JSRuntime at %p.\n"
|
1536
|
+
" This root may point to freed memory. Objects reachable\n"
|
1537
|
+
" through it have not been finalized.\n",
|
1538
|
+
(void *) rt);
|
1539
|
+
} else {
|
1540
|
+
fprintf(stderr,
|
1541
|
+
"JS engine warning: %lu GC roots remain after destroying the JSRuntime at %p.\n"
|
1542
|
+
" These roots may point to freed memory. Objects reachable\n"
|
1543
|
+
" through them have not been finalized.\n",
|
1544
|
+
(unsigned long) leakedroots, (void *) rt);
|
1545
|
+
}
|
1546
|
+
}
|
1547
|
+
}
|
1548
|
+
|
1549
|
+
typedef struct NamedRootDumpArgs {
|
1550
|
+
void (*dump)(const char *name, void *rp, void *data);
|
1551
|
+
void *data;
|
1552
|
+
} NamedRootDumpArgs;
|
1553
|
+
|
1554
|
+
JS_STATIC_DLL_CALLBACK(JSDHashOperator)
|
1555
|
+
js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
|
1556
|
+
void *arg)
|
1557
|
+
{
|
1558
|
+
NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
|
1559
|
+
JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
|
1560
|
+
|
1561
|
+
if (rhe->name)
|
1562
|
+
args->dump(rhe->name, rhe->root, args->data);
|
1563
|
+
return JS_DHASH_NEXT;
|
1564
|
+
}
|
1565
|
+
|
1566
|
+
void
|
1567
|
+
js_DumpNamedRoots(JSRuntime *rt,
|
1568
|
+
void (*dump)(const char *name, void *rp, void *data),
|
1569
|
+
void *data)
|
1570
|
+
{
|
1571
|
+
NamedRootDumpArgs args;
|
1572
|
+
|
1573
|
+
args.dump = dump;
|
1574
|
+
args.data = data;
|
1575
|
+
JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
|
1576
|
+
}
|
1577
|
+
|
1578
|
+
#endif /* DEBUG */
|
1579
|
+
|
1580
|
+
typedef struct GCRootMapArgs {
|
1581
|
+
JSGCRootMapFun map;
|
1582
|
+
void *data;
|
1583
|
+
} GCRootMapArgs;
|
1584
|
+
|
1585
|
+
JS_STATIC_DLL_CALLBACK(JSDHashOperator)
|
1586
|
+
js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
|
1587
|
+
void *arg)
|
1588
|
+
{
|
1589
|
+
GCRootMapArgs *args = (GCRootMapArgs *) arg;
|
1590
|
+
JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
|
1591
|
+
intN mapflags;
|
1592
|
+
int op;
|
1593
|
+
|
1594
|
+
mapflags = args->map(rhe->root, rhe->name, args->data);
|
1595
|
+
|
1596
|
+
#if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
|
1597
|
+
JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
|
1598
|
+
JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
|
1599
|
+
op = (JSDHashOperator)mapflags;
|
1600
|
+
#else
|
1601
|
+
op = JS_DHASH_NEXT;
|
1602
|
+
if (mapflags & JS_MAP_GCROOT_STOP)
|
1603
|
+
op |= JS_DHASH_STOP;
|
1604
|
+
if (mapflags & JS_MAP_GCROOT_REMOVE)
|
1605
|
+
op |= JS_DHASH_REMOVE;
|
1606
|
+
#endif
|
1607
|
+
|
1608
|
+
return (JSDHashOperator) op;
|
1609
|
+
}
|
1610
|
+
|
1611
|
+
uint32
|
1612
|
+
js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
|
1613
|
+
{
|
1614
|
+
GCRootMapArgs args;
|
1615
|
+
uint32 rv;
|
1616
|
+
|
1617
|
+
args.map = map;
|
1618
|
+
args.data = data;
|
1619
|
+
JS_LOCK_GC(rt);
|
1620
|
+
rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
|
1621
|
+
JS_UNLOCK_GC(rt);
|
1622
|
+
return rv;
|
1623
|
+
}
|
1624
|
+
|
1625
|
+
JSBool
|
1626
|
+
js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
|
1627
|
+
{
|
1628
|
+
JSRuntime *rt;
|
1629
|
+
JSBool ok;
|
1630
|
+
|
1631
|
+
rt = cx->runtime;
|
1632
|
+
JS_ASSERT(!rt->gcRunning);
|
1633
|
+
|
1634
|
+
JS_LOCK_GC(rt);
|
1635
|
+
ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
|
1636
|
+
JS_UNLOCK_GC(rt);
|
1637
|
+
return ok;
|
1638
|
+
}
|
1639
|
+
|
1640
|
+
static void
|
1641
|
+
CloseNativeIterators(JSContext *cx)
|
1642
|
+
{
|
1643
|
+
JSRuntime *rt;
|
1644
|
+
size_t count, newCount, i;
|
1645
|
+
void **array;
|
1646
|
+
JSObject *obj;
|
1647
|
+
|
1648
|
+
rt = cx->runtime;
|
1649
|
+
count = rt->gcIteratorTable.count;
|
1650
|
+
array = rt->gcIteratorTable.array;
|
1651
|
+
|
1652
|
+
newCount = 0;
|
1653
|
+
for (i = 0; i != count; ++i) {
|
1654
|
+
obj = (JSObject *)array[i];
|
1655
|
+
if (js_IsAboutToBeFinalized(cx, obj))
|
1656
|
+
js_CloseNativeIterator(cx, obj);
|
1657
|
+
else
|
1658
|
+
array[newCount++] = obj;
|
1659
|
+
}
|
1660
|
+
ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
|
1661
|
+
}
|
1662
|
+
|
1663
|
+
#if defined(DEBUG_brendan) || defined(DEBUG_timeless)
|
1664
|
+
#define DEBUG_gchist
|
1665
|
+
#endif
|
1666
|
+
|
1667
|
+
#ifdef DEBUG_gchist
|
1668
|
+
#define NGCHIST 64
|
1669
|
+
|
1670
|
+
static struct GCHist {
|
1671
|
+
JSBool lastDitch;
|
1672
|
+
JSGCThing *freeList;
|
1673
|
+
} gchist[NGCHIST];
|
1674
|
+
|
1675
|
+
unsigned gchpos = 0;
|
1676
|
+
#endif
|
1677
|
+
|
1678
|
+
void *
|
1679
|
+
js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
|
1680
|
+
{
|
1681
|
+
JSRuntime *rt;
|
1682
|
+
uintN flindex;
|
1683
|
+
JSBool doGC;
|
1684
|
+
JSGCThing *thing;
|
1685
|
+
uint8 *flagp;
|
1686
|
+
JSGCArenaList *arenaList;
|
1687
|
+
JSGCArenaInfo *a;
|
1688
|
+
uintN thingsLimit;
|
1689
|
+
JSLocalRootStack *lrs;
|
1690
|
+
#ifdef JS_GCMETER
|
1691
|
+
JSGCArenaStats *astats;
|
1692
|
+
#endif
|
1693
|
+
#ifdef JS_THREADSAFE
|
1694
|
+
JSBool gcLocked;
|
1695
|
+
uintN localMallocBytes;
|
1696
|
+
JSGCThing **flbase, **lastptr;
|
1697
|
+
JSGCThing *tmpthing;
|
1698
|
+
uint8 *tmpflagp;
|
1699
|
+
uintN maxFreeThings; /* max to take from the global free list */
|
1700
|
+
#endif
|
1701
|
+
|
1702
|
+
JS_ASSERT((flags & GCF_TYPEMASK) != GCX_DOUBLE);
|
1703
|
+
rt = cx->runtime;
|
1704
|
+
nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
|
1705
|
+
flindex = GC_FREELIST_INDEX(nbytes);
|
1706
|
+
|
1707
|
+
/* Updates of metering counters here may not be thread-safe. */
|
1708
|
+
METER(astats = &cx->runtime->gcStats.arenaStats[flindex]);
|
1709
|
+
METER(astats->alloc++);
|
1710
|
+
|
1711
|
+
#ifdef JS_THREADSAFE
|
1712
|
+
gcLocked = JS_FALSE;
|
1713
|
+
JS_ASSERT(cx->thread);
|
1714
|
+
flbase = cx->thread->gcFreeLists;
|
1715
|
+
JS_ASSERT(flbase);
|
1716
|
+
thing = flbase[flindex];
|
1717
|
+
localMallocBytes = cx->thread->gcMallocBytes;
|
1718
|
+
if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
|
1719
|
+
flagp = thing->flagp;
|
1720
|
+
flbase[flindex] = thing->next;
|
1721
|
+
METER(astats->localalloc++);
|
1722
|
+
goto success;
|
1723
|
+
}
|
1724
|
+
|
1725
|
+
JS_LOCK_GC(rt);
|
1726
|
+
gcLocked = JS_TRUE;
|
1727
|
+
|
1728
|
+
/* Transfer thread-local counter to global one. */
|
1729
|
+
if (localMallocBytes != 0) {
|
1730
|
+
cx->thread->gcMallocBytes = 0;
|
1731
|
+
if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
|
1732
|
+
rt->gcMallocBytes = rt->gcMaxMallocBytes;
|
1733
|
+
else
|
1734
|
+
rt->gcMallocBytes += localMallocBytes;
|
1735
|
+
}
|
1736
|
+
#endif
|
1737
|
+
JS_ASSERT(!rt->gcRunning);
|
1738
|
+
if (rt->gcRunning) {
|
1739
|
+
METER(rt->gcStats.finalfail++);
|
1740
|
+
JS_UNLOCK_GC(rt);
|
1741
|
+
return NULL;
|
1742
|
+
}
|
1743
|
+
|
1744
|
+
doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke);
|
1745
|
+
#ifdef JS_GC_ZEAL
|
1746
|
+
doGC = doGC || rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke);
|
1747
|
+
#endif
|
1748
|
+
|
1749
|
+
arenaList = &rt->gcArenaList[flindex];
|
1750
|
+
for (;;) {
|
1751
|
+
if (doGC) {
|
1752
|
+
/*
|
1753
|
+
* Keep rt->gcLock across the call into js_GC so we don't starve
|
1754
|
+
* and lose to racing threads who deplete the heap just after
|
1755
|
+
* js_GC has replenished it (or has synchronized with a racing
|
1756
|
+
* GC that collected a bunch of garbage). This unfair scheduling
|
1757
|
+
* can happen on certain operating systems. For the gory details,
|
1758
|
+
* see bug 162779 at https://bugzilla.mozilla.org/.
|
1759
|
+
*/
|
1760
|
+
js_GC(cx, GC_LAST_DITCH);
|
1761
|
+
METER(astats->retry++);
|
1762
|
+
}
|
1763
|
+
|
1764
|
+
/* Try to get thing from the free list. */
|
1765
|
+
thing = arenaList->freeList;
|
1766
|
+
if (thing) {
|
1767
|
+
arenaList->freeList = thing->next;
|
1768
|
+
flagp = thing->flagp;
|
1769
|
+
JS_ASSERT(*flagp & GCF_FINAL);
|
1770
|
+
|
1771
|
+
#ifdef JS_THREADSAFE
|
1772
|
+
/*
|
1773
|
+
* Refill the local free list by taking several things from the
|
1774
|
+
* global free list unless we are still at rt->gcMaxMallocBytes
|
1775
|
+
* barrier or the free list is already populated. The former
|
1776
|
+
* happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
|
1777
|
+
* or no gcPoke. The latter is caused via allocating new things
|
1778
|
+
* in gcCallback(cx, JSGC_END).
|
1779
|
+
*/
|
1780
|
+
if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
|
1781
|
+
break;
|
1782
|
+
tmpthing = arenaList->freeList;
|
1783
|
+
if (tmpthing) {
|
1784
|
+
maxFreeThings = MAX_THREAD_LOCAL_THINGS;
|
1785
|
+
do {
|
1786
|
+
if (!tmpthing->next)
|
1787
|
+
break;
|
1788
|
+
tmpthing = tmpthing->next;
|
1789
|
+
} while (--maxFreeThings != 0);
|
1790
|
+
|
1791
|
+
flbase[flindex] = arenaList->freeList;
|
1792
|
+
arenaList->freeList = tmpthing->next;
|
1793
|
+
tmpthing->next = NULL;
|
1794
|
+
}
|
1795
|
+
#endif
|
1796
|
+
break;
|
1797
|
+
}
|
1798
|
+
|
1799
|
+
/*
|
1800
|
+
* Try to allocate things from the last arena. If it is fully used,
|
1801
|
+
* check if we can allocate a new one and, if we cannot, consider
|
1802
|
+
* doing a "last ditch" GC unless already tried.
|
1803
|
+
*/
|
1804
|
+
thingsLimit = THINGS_PER_ARENA(nbytes);
|
1805
|
+
if (arenaList->lastCount != thingsLimit) {
|
1806
|
+
JS_ASSERT(arenaList->lastCount < thingsLimit);
|
1807
|
+
a = arenaList->last;
|
1808
|
+
} else {
|
1809
|
+
a = NewGCArena(rt);
|
1810
|
+
if (!a) {
|
1811
|
+
if (doGC)
|
1812
|
+
goto fail;
|
1813
|
+
doGC = JS_TRUE;
|
1814
|
+
continue;
|
1815
|
+
}
|
1816
|
+
a->list = arenaList;
|
1817
|
+
a->prev = arenaList->last;
|
1818
|
+
a->prevUntracedPage = 0;
|
1819
|
+
a->u.untracedThings = 0;
|
1820
|
+
arenaList->last = a;
|
1821
|
+
arenaList->lastCount = 0;
|
1822
|
+
}
|
1823
|
+
|
1824
|
+
flagp = THING_FLAGP(a, arenaList->lastCount);
|
1825
|
+
thing = FLAGP_TO_THING(flagp, nbytes);
|
1826
|
+
arenaList->lastCount++;
|
1827
|
+
|
1828
|
+
#ifdef JS_THREADSAFE
|
1829
|
+
/*
|
1830
|
+
* Refill the local free list by taking free things from the last
|
1831
|
+
* arena. Prefer to order free things by ascending address in the
|
1832
|
+
* (unscientific) hope of better cache locality.
|
1833
|
+
*/
|
1834
|
+
if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
|
1835
|
+
break;
|
1836
|
+
lastptr = &flbase[flindex];
|
1837
|
+
maxFreeThings = thingsLimit - arenaList->lastCount;
|
1838
|
+
if (maxFreeThings > MAX_THREAD_LOCAL_THINGS)
|
1839
|
+
maxFreeThings = MAX_THREAD_LOCAL_THINGS;
|
1840
|
+
while (maxFreeThings != 0) {
|
1841
|
+
--maxFreeThings;
|
1842
|
+
|
1843
|
+
tmpflagp = THING_FLAGP(a, arenaList->lastCount);
|
1844
|
+
tmpthing = FLAGP_TO_THING(tmpflagp, nbytes);
|
1845
|
+
arenaList->lastCount++;
|
1846
|
+
tmpthing->flagp = tmpflagp;
|
1847
|
+
*tmpflagp = GCF_FINAL; /* signifying that thing is free */
|
1848
|
+
|
1849
|
+
*lastptr = tmpthing;
|
1850
|
+
lastptr = &tmpthing->next;
|
1851
|
+
}
|
1852
|
+
*lastptr = NULL;
|
1853
|
+
#endif
|
1854
|
+
break;
|
1855
|
+
}
|
1856
|
+
|
1857
|
+
/* We successfully allocated the thing. */
|
1858
|
+
#ifdef JS_THREADSAFE
|
1859
|
+
success:
|
1860
|
+
#endif
|
1861
|
+
lrs = cx->localRootStack;
|
1862
|
+
if (lrs) {
|
1863
|
+
/*
|
1864
|
+
* If we're in a local root scope, don't set newborn[type] at all, to
|
1865
|
+
* avoid entraining garbage from it for an unbounded amount of time
|
1866
|
+
* on this context. A caller will leave the local root scope and pop
|
1867
|
+
* this reference, allowing thing to be GC'd if it has no other refs.
|
1868
|
+
* See JS_EnterLocalRootScope and related APIs.
|
1869
|
+
*/
|
1870
|
+
if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
|
1871
|
+
/*
|
1872
|
+
* When we fail for a thing allocated through the tail of the last
|
1873
|
+
* arena, thing's flag byte is not initialized. So to prevent GC
|
1874
|
+
* accessing the uninitialized flags during the finalization, we
|
1875
|
+
* always mark the thing as final. See bug 337407.
|
1876
|
+
*/
|
1877
|
+
*flagp = GCF_FINAL;
|
1878
|
+
goto fail;
|
1879
|
+
}
|
1880
|
+
} else {
|
1881
|
+
/*
|
1882
|
+
* No local root scope, so we're stuck with the old, fragile model of
|
1883
|
+
* depending on a pigeon-hole newborn per type per context.
|
1884
|
+
*/
|
1885
|
+
cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
|
1886
|
+
}
|
1887
|
+
|
1888
|
+
/* We can't fail now, so update flags. */
|
1889
|
+
*flagp = (uint8)flags;
|
1890
|
+
|
1891
|
+
#ifdef DEBUG_gchist
|
1892
|
+
gchist[gchpos].lastDitch = doGC;
|
1893
|
+
gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
|
1894
|
+
if (++gchpos == NGCHIST)
|
1895
|
+
gchpos = 0;
|
1896
|
+
#endif
|
1897
|
+
|
1898
|
+
/* This is not thread-safe for thread-local allocations. */
|
1899
|
+
METER_IF(flags & GCF_LOCK, rt->gcStats.lockborn++);
|
1900
|
+
|
1901
|
+
#ifdef JS_THREADSAFE
|
1902
|
+
if (gcLocked)
|
1903
|
+
JS_UNLOCK_GC(rt);
|
1904
|
+
#endif
|
1905
|
+
JS_COUNT_OPERATION(cx, JSOW_ALLOCATION);
|
1906
|
+
return thing;
|
1907
|
+
|
1908
|
+
fail:
|
1909
|
+
#ifdef JS_THREADSAFE
|
1910
|
+
if (gcLocked)
|
1911
|
+
JS_UNLOCK_GC(rt);
|
1912
|
+
#endif
|
1913
|
+
METER(astats->fail++);
|
1914
|
+
JS_ReportOutOfMemory(cx);
|
1915
|
+
return NULL;
|
1916
|
+
}
|
1917
|
+
|
1918
|
+
static JSGCDoubleCell *
|
1919
|
+
RefillDoubleFreeList(JSContext *cx)
|
1920
|
+
{
|
1921
|
+
JSRuntime *rt;
|
1922
|
+
jsbitmap *doubleFlags, usedBits;
|
1923
|
+
JSBool doGC;
|
1924
|
+
JSGCArenaInfo *a;
|
1925
|
+
uintN bit, index;
|
1926
|
+
JSGCDoubleCell *cell, *list, *lastcell;
|
1927
|
+
|
1928
|
+
JS_ASSERT(!cx->doubleFreeList);
|
1929
|
+
|
1930
|
+
rt = cx->runtime;
|
1931
|
+
JS_LOCK_GC(rt);
|
1932
|
+
|
1933
|
+
JS_ASSERT(!rt->gcRunning);
|
1934
|
+
if (rt->gcRunning) {
|
1935
|
+
METER(rt->gcStats.finalfail++);
|
1936
|
+
JS_UNLOCK_GC(rt);
|
1937
|
+
return NULL;
|
1938
|
+
}
|
1939
|
+
|
1940
|
+
doGC = rt->gcMallocBytes >= rt->gcMaxMallocBytes && rt->gcPoke;
|
1941
|
+
#ifdef JS_GC_ZEAL
|
1942
|
+
doGC = doGC || rt->gcZeal >= 2 || (rt->gcZeal >= 1 && rt->gcPoke);
|
1943
|
+
#endif
|
1944
|
+
if (doGC)
|
1945
|
+
goto do_gc;
|
1946
|
+
|
1947
|
+
/*
|
1948
|
+
* Loop until we find a flag bitmap byte with unset bits indicating free
|
1949
|
+
* double cells, then set all bits as used and put the cells to the free
|
1950
|
+
* list for the current context.
|
1951
|
+
*/
|
1952
|
+
doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
|
1953
|
+
for (;;) {
|
1954
|
+
if (((jsuword) doubleFlags & GC_ARENA_MASK) ==
|
1955
|
+
ARENA_INFO_OFFSET) {
|
1956
|
+
if (doubleFlags == DOUBLE_BITMAP_SENTINEL ||
|
1957
|
+
!((JSGCArenaInfo *) doubleFlags)->prev) {
|
1958
|
+
a = NewGCArena(rt);
|
1959
|
+
if (!a) {
|
1960
|
+
if (doGC) {
|
1961
|
+
METER(rt->gcStats.doubleArenaStats.fail++);
|
1962
|
+
JS_UNLOCK_GC(rt);
|
1963
|
+
JS_ReportOutOfMemory(cx);
|
1964
|
+
return NULL;
|
1965
|
+
}
|
1966
|
+
doGC = JS_TRUE;
|
1967
|
+
do_gc:
|
1968
|
+
js_GC(cx, GC_LAST_DITCH);
|
1969
|
+
METER(rt->gcStats.doubleArenaStats.retry++);
|
1970
|
+
doubleFlags = rt->gcDoubleArenaList.nextDoubleFlags;
|
1971
|
+
continue;
|
1972
|
+
}
|
1973
|
+
a->list = NULL;
|
1974
|
+
a->prev = NULL;
|
1975
|
+
if (doubleFlags == DOUBLE_BITMAP_SENTINEL) {
|
1976
|
+
JS_ASSERT(!rt->gcDoubleArenaList.first);
|
1977
|
+
rt->gcDoubleArenaList.first = a;
|
1978
|
+
} else {
|
1979
|
+
JS_ASSERT(rt->gcDoubleArenaList.first);
|
1980
|
+
((JSGCArenaInfo *) doubleFlags)->prev = a;
|
1981
|
+
}
|
1982
|
+
ClearDoubleArenaFlags(a);
|
1983
|
+
doubleFlags = DOUBLE_ARENA_BITMAP(a);
|
1984
|
+
break;
|
1985
|
+
}
|
1986
|
+
doubleFlags =
|
1987
|
+
DOUBLE_ARENA_BITMAP(((JSGCArenaInfo *) doubleFlags)->prev);
|
1988
|
+
}
|
1989
|
+
|
1990
|
+
/*
|
1991
|
+
* When doubleFlags points the last bitmap's word in the arena, its
|
1992
|
+
* high bits corresponds to non-existing cells. ClearDoubleArenaFlags
|
1993
|
+
* sets such bits to 1. Thus even for this last word its bit is unset
|
1994
|
+
* iff the corresponding cell exists and free.
|
1995
|
+
*/
|
1996
|
+
if (*doubleFlags != (jsbitmap) -1)
|
1997
|
+
break;
|
1998
|
+
++doubleFlags;
|
1999
|
+
}
|
2000
|
+
|
2001
|
+
rt->gcDoubleArenaList.nextDoubleFlags = doubleFlags + 1;
|
2002
|
+
usedBits = *doubleFlags;
|
2003
|
+
JS_ASSERT(usedBits != (jsbitmap) -1);
|
2004
|
+
*doubleFlags = (jsbitmap) -1;
|
2005
|
+
JS_UNLOCK_GC(rt);
|
2006
|
+
|
2007
|
+
/*
|
2008
|
+
* Find the index corresponding to the first bit in *doubleFlags. The last
|
2009
|
+
* bit will have "index + JS_BITS_PER_WORD - 1".
|
2010
|
+
*/
|
2011
|
+
index = ((uintN) ((jsuword) doubleFlags & GC_ARENA_MASK) -
|
2012
|
+
DOUBLES_ARENA_BITMAP_OFFSET) * JS_BITS_PER_BYTE;
|
2013
|
+
cell = (JSGCDoubleCell *) ((jsuword) doubleFlags & ~GC_ARENA_MASK) + index;
|
2014
|
+
|
2015
|
+
if (usedBits == 0) {
|
2016
|
+
/* The common case when all doubles from *doubleFlags are free. */
|
2017
|
+
JS_ASSERT(index + JS_BITS_PER_WORD <= DOUBLES_PER_ARENA);
|
2018
|
+
list = cell;
|
2019
|
+
for (lastcell = cell + JS_BITS_PER_WORD - 1; cell != lastcell; ++cell)
|
2020
|
+
cell->link = cell + 1;
|
2021
|
+
lastcell->link = NULL;
|
2022
|
+
} else {
|
2023
|
+
/*
|
2024
|
+
* Assemble the free list from free cells from *doubleFlags starting
|
2025
|
+
* from the tail. In the loop
|
2026
|
+
*
|
2027
|
+
* index + bit >= DOUBLES_PER_ARENA
|
2028
|
+
*
|
2029
|
+
* when bit is one of the unused bits. We do not check for such bits
|
2030
|
+
* explicitly as they must be set and the "if" check filters them out.
|
2031
|
+
*/
|
2032
|
+
JS_ASSERT(index + JS_BITS_PER_WORD <=
|
2033
|
+
DOUBLES_PER_ARENA + UNUSED_DOUBLE_BITMAP_BITS);
|
2034
|
+
bit = JS_BITS_PER_WORD;
|
2035
|
+
cell += bit;
|
2036
|
+
list = NULL;
|
2037
|
+
do {
|
2038
|
+
--bit;
|
2039
|
+
--cell;
|
2040
|
+
if (!(((jsbitmap) 1 << bit) & usedBits)) {
|
2041
|
+
JS_ASSERT(index + bit < DOUBLES_PER_ARENA);
|
2042
|
+
JS_ASSERT_IF(index + bit == DOUBLES_PER_ARENA - 1, !list);
|
2043
|
+
cell->link = list;
|
2044
|
+
list = cell;
|
2045
|
+
}
|
2046
|
+
} while (bit != 0);
|
2047
|
+
}
|
2048
|
+
JS_ASSERT(list);
|
2049
|
+
JS_COUNT_OPERATION(cx, JSOW_ALLOCATION * JS_BITS_PER_WORD);
|
2050
|
+
|
2051
|
+
/*
|
2052
|
+
* We delegate assigning cx->doubleFreeList to js_NewDoubleInRootedValue as
|
2053
|
+
* it immediately consumes the head of the list.
|
2054
|
+
*/
|
2055
|
+
return list;
|
2056
|
+
}
|
2057
|
+
|
2058
|
+
JSBool
|
2059
|
+
js_NewDoubleInRootedValue(JSContext *cx, jsdouble d, jsval *vp)
|
2060
|
+
{
|
2061
|
+
#ifdef JS_GCMETER
|
2062
|
+
JSGCArenaStats *astats;
|
2063
|
+
#endif
|
2064
|
+
JSGCDoubleCell *cell;
|
2065
|
+
|
2066
|
+
/* Updates of metering counters here are not thread-safe. */
|
2067
|
+
METER(astats = &cx->runtime->gcStats.doubleArenaStats);
|
2068
|
+
METER(astats->alloc++);
|
2069
|
+
cell = cx->doubleFreeList;
|
2070
|
+
if (!cell) {
|
2071
|
+
cell = RefillDoubleFreeList(cx);
|
2072
|
+
if (!cell) {
|
2073
|
+
METER(astats->fail++);
|
2074
|
+
return JS_FALSE;
|
2075
|
+
}
|
2076
|
+
} else {
|
2077
|
+
METER(astats->localalloc++);
|
2078
|
+
}
|
2079
|
+
cx->doubleFreeList = cell->link;
|
2080
|
+
cell->number = d;
|
2081
|
+
*vp = DOUBLE_TO_JSVAL(&cell->number);
|
2082
|
+
return JS_TRUE;
|
2083
|
+
}
|
2084
|
+
|
2085
|
+
jsdouble *
|
2086
|
+
js_NewWeaklyRootedDouble(JSContext *cx, jsdouble d)
|
2087
|
+
{
|
2088
|
+
jsval v;
|
2089
|
+
jsdouble *dp;
|
2090
|
+
|
2091
|
+
if (!js_NewDoubleInRootedValue(cx, d, &v))
|
2092
|
+
return NULL;
|
2093
|
+
|
2094
|
+
JS_ASSERT(JSVAL_IS_DOUBLE(v));
|
2095
|
+
dp = JSVAL_TO_DOUBLE(v);
|
2096
|
+
if (cx->localRootStack) {
|
2097
|
+
if (js_PushLocalRoot(cx, cx->localRootStack, v) < 0)
|
2098
|
+
return NULL;
|
2099
|
+
} else {
|
2100
|
+
cx->weakRoots.newborn[GCX_DOUBLE] = dp;
|
2101
|
+
}
|
2102
|
+
return dp;
|
2103
|
+
}
|
2104
|
+
|
2105
|
+
/*
|
2106
|
+
* Shallow GC-things can be locked just by setting the GCF_LOCK bit, because
|
2107
|
+
* they have no descendants to mark during the GC. Currently the optimization
|
2108
|
+
* is only used for non-dependant strings.
|
2109
|
+
*/
|
2110
|
+
#define GC_THING_IS_SHALLOW(flagp, thing) \
|
2111
|
+
((flagp) && \
|
2112
|
+
((*(flagp) & GCF_TYPEMASK) >= GCX_EXTERNAL_STRING || \
|
2113
|
+
((*(flagp) & GCF_TYPEMASK) == GCX_STRING && \
|
2114
|
+
!JSSTRING_IS_DEPENDENT((JSString *) (thing)))))
|
2115
|
+
|
2116
|
+
/* This is compatible with JSDHashEntryStub. */
|
2117
|
+
typedef struct JSGCLockHashEntry {
|
2118
|
+
JSDHashEntryHdr hdr;
|
2119
|
+
const void *thing;
|
2120
|
+
uint32 count;
|
2121
|
+
} JSGCLockHashEntry;
|
2122
|
+
|
2123
|
+
JSBool
|
2124
|
+
js_LockGCThingRT(JSRuntime *rt, void *thing)
|
2125
|
+
{
|
2126
|
+
JSBool shallow, ok;
|
2127
|
+
uint8 *flagp;
|
2128
|
+
JSGCLockHashEntry *lhe;
|
2129
|
+
|
2130
|
+
if (!thing)
|
2131
|
+
return JS_TRUE;
|
2132
|
+
|
2133
|
+
flagp = GetGCThingFlagsOrNull(thing);
|
2134
|
+
JS_LOCK_GC(rt);
|
2135
|
+
shallow = GC_THING_IS_SHALLOW(flagp, thing);
|
2136
|
+
|
2137
|
+
/*
|
2138
|
+
* Avoid adding a rt->gcLocksHash entry for shallow things until someone
|
2139
|
+
* nests a lock.
|
2140
|
+
*/
|
2141
|
+
if (shallow && !(*flagp & GCF_LOCK)) {
|
2142
|
+
*flagp |= GCF_LOCK;
|
2143
|
+
METER(rt->gcStats.lock++);
|
2144
|
+
ok = JS_TRUE;
|
2145
|
+
goto out;
|
2146
|
+
}
|
2147
|
+
|
2148
|
+
if (!rt->gcLocksHash) {
|
2149
|
+
rt->gcLocksHash = JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
|
2150
|
+
sizeof(JSGCLockHashEntry),
|
2151
|
+
GC_ROOTS_SIZE);
|
2152
|
+
if (!rt->gcLocksHash) {
|
2153
|
+
ok = JS_FALSE;
|
2154
|
+
goto out;
|
2155
|
+
}
|
2156
|
+
}
|
2157
|
+
|
2158
|
+
lhe = (JSGCLockHashEntry *)
|
2159
|
+
JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
|
2160
|
+
if (!lhe) {
|
2161
|
+
ok = JS_FALSE;
|
2162
|
+
goto out;
|
2163
|
+
}
|
2164
|
+
if (!lhe->thing) {
|
2165
|
+
lhe->thing = thing;
|
2166
|
+
lhe->count = 1;
|
2167
|
+
} else {
|
2168
|
+
JS_ASSERT(lhe->count >= 1);
|
2169
|
+
lhe->count++;
|
2170
|
+
}
|
2171
|
+
|
2172
|
+
METER(rt->gcStats.lock++);
|
2173
|
+
ok = JS_TRUE;
|
2174
|
+
out:
|
2175
|
+
JS_UNLOCK_GC(rt);
|
2176
|
+
return ok;
|
2177
|
+
}
|
2178
|
+
|
2179
|
+
JSBool
|
2180
|
+
js_UnlockGCThingRT(JSRuntime *rt, void *thing)
|
2181
|
+
{
|
2182
|
+
uint8 *flagp;
|
2183
|
+
JSBool shallow;
|
2184
|
+
JSGCLockHashEntry *lhe;
|
2185
|
+
|
2186
|
+
if (!thing)
|
2187
|
+
return JS_TRUE;
|
2188
|
+
|
2189
|
+
flagp = GetGCThingFlagsOrNull(thing);
|
2190
|
+
JS_LOCK_GC(rt);
|
2191
|
+
shallow = GC_THING_IS_SHALLOW(flagp, thing);
|
2192
|
+
|
2193
|
+
if (shallow && !(*flagp & GCF_LOCK))
|
2194
|
+
goto out;
|
2195
|
+
if (!rt->gcLocksHash ||
|
2196
|
+
(lhe = (JSGCLockHashEntry *)
|
2197
|
+
JS_DHashTableOperate(rt->gcLocksHash, thing,
|
2198
|
+
JS_DHASH_LOOKUP),
|
2199
|
+
JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
|
2200
|
+
/* Shallow entry is not in the hash -> clear its lock bit. */
|
2201
|
+
if (shallow)
|
2202
|
+
*flagp &= ~GCF_LOCK;
|
2203
|
+
else
|
2204
|
+
goto out;
|
2205
|
+
} else {
|
2206
|
+
if (--lhe->count != 0)
|
2207
|
+
goto out;
|
2208
|
+
JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
|
2209
|
+
}
|
2210
|
+
|
2211
|
+
rt->gcPoke = JS_TRUE;
|
2212
|
+
METER(rt->gcStats.unlock++);
|
2213
|
+
out:
|
2214
|
+
JS_UNLOCK_GC(rt);
|
2215
|
+
return JS_TRUE;
|
2216
|
+
}
|
2217
|
+
|
2218
|
+
JS_PUBLIC_API(void)
|
2219
|
+
JS_TraceChildren(JSTracer *trc, void *thing, uint32 kind)
|
2220
|
+
{
|
2221
|
+
JSObject *obj;
|
2222
|
+
size_t nslots, i;
|
2223
|
+
jsval v;
|
2224
|
+
JSString *str;
|
2225
|
+
|
2226
|
+
switch (kind) {
|
2227
|
+
case JSTRACE_OBJECT:
|
2228
|
+
/* If obj has no map, it must be a newborn. */
|
2229
|
+
obj = (JSObject *) thing;
|
2230
|
+
if (!obj->map)
|
2231
|
+
break;
|
2232
|
+
if (obj->map->ops->trace) {
|
2233
|
+
obj->map->ops->trace(trc, obj);
|
2234
|
+
} else {
|
2235
|
+
nslots = STOBJ_NSLOTS(obj);
|
2236
|
+
for (i = 0; i != nslots; ++i) {
|
2237
|
+
v = STOBJ_GET_SLOT(obj, i);
|
2238
|
+
if (JSVAL_IS_TRACEABLE(v)) {
|
2239
|
+
JS_SET_TRACING_INDEX(trc, "slot", i);
|
2240
|
+
JS_CallTracer(trc, JSVAL_TO_TRACEABLE(v),
|
2241
|
+
JSVAL_TRACE_KIND(v));
|
2242
|
+
}
|
2243
|
+
}
|
2244
|
+
}
|
2245
|
+
break;
|
2246
|
+
|
2247
|
+
case JSTRACE_STRING:
|
2248
|
+
str = (JSString *)thing;
|
2249
|
+
if (JSSTRING_IS_DEPENDENT(str))
|
2250
|
+
JS_CALL_STRING_TRACER(trc, JSSTRDEP_BASE(str), "base");
|
2251
|
+
break;
|
2252
|
+
|
2253
|
+
#if JS_HAS_XML_SUPPORT
|
2254
|
+
case JSTRACE_NAMESPACE:
|
2255
|
+
js_TraceXMLNamespace(trc, (JSXMLNamespace *)thing);
|
2256
|
+
break;
|
2257
|
+
|
2258
|
+
case JSTRACE_QNAME:
|
2259
|
+
js_TraceXMLQName(trc, (JSXMLQName *)thing);
|
2260
|
+
break;
|
2261
|
+
|
2262
|
+
case JSTRACE_XML:
|
2263
|
+
js_TraceXML(trc, (JSXML *)thing);
|
2264
|
+
break;
|
2265
|
+
#endif
|
2266
|
+
}
|
2267
|
+
}
|
2268
|
+
|
2269
|
+
/*
|
2270
|
+
* Number of things covered by a single bit of JSGCArenaInfo.u.untracedThings.
|
2271
|
+
*/
|
2272
|
+
#define THINGS_PER_UNTRACED_BIT(thingSize) \
|
2273
|
+
JS_HOWMANY(THINGS_PER_ARENA(thingSize), JS_BITS_PER_WORD)
|
2274
|
+
|
2275
|
+
static void
|
2276
|
+
DelayTracingChildren(JSRuntime *rt, uint8 *flagp)
|
2277
|
+
{
|
2278
|
+
JSGCArenaInfo *a;
|
2279
|
+
uint32 untracedBitIndex;
|
2280
|
+
jsuword bit;
|
2281
|
+
|
2282
|
+
/*
|
2283
|
+
* Things with children to be traced later are marked with
|
2284
|
+
* GCF_MARK | GCF_FINAL flags.
|
2285
|
+
*/
|
2286
|
+
JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
|
2287
|
+
*flagp |= GCF_FINAL;
|
2288
|
+
|
2289
|
+
METER(rt->gcStats.untraced++);
|
2290
|
+
#ifdef DEBUG
|
2291
|
+
++rt->gcTraceLaterCount;
|
2292
|
+
METER_UPDATE_MAX(rt->gcStats.maxuntraced, rt->gcTraceLaterCount);
|
2293
|
+
#endif
|
2294
|
+
|
2295
|
+
a = FLAGP_TO_ARENA(flagp);
|
2296
|
+
untracedBitIndex = FLAGP_TO_INDEX(flagp) /
|
2297
|
+
THINGS_PER_UNTRACED_BIT(a->list->thingSize);
|
2298
|
+
JS_ASSERT(untracedBitIndex < JS_BITS_PER_WORD);
|
2299
|
+
bit = (jsuword)1 << untracedBitIndex;
|
2300
|
+
if (a->u.untracedThings != 0) {
|
2301
|
+
JS_ASSERT(rt->gcUntracedArenaStackTop);
|
2302
|
+
if (a->u.untracedThings & bit) {
|
2303
|
+
/* bit already covers things with children to trace later. */
|
2304
|
+
return;
|
2305
|
+
}
|
2306
|
+
a->u.untracedThings |= bit;
|
2307
|
+
} else {
|
2308
|
+
/*
|
2309
|
+
* The thing is the first thing with not yet traced children in the
|
2310
|
+
* whole arena, so push the arena on the stack of arenas with things
|
2311
|
+
* to be traced later unless the arena has already been pushed. We
|
2312
|
+
* detect that through checking prevUntracedPage as the field is 0
|
2313
|
+
* only for not yet pushed arenas. To ensure that
|
2314
|
+
* prevUntracedPage != 0
|
2315
|
+
* even when the stack contains one element, we make prevUntracedPage
|
2316
|
+
* for the arena at the bottom to point to itself.
|
2317
|
+
*
|
2318
|
+
* See comments in TraceDelayedChildren.
|
2319
|
+
*/
|
2320
|
+
a->u.untracedThings = bit;
|
2321
|
+
if (a->prevUntracedPage == 0) {
|
2322
|
+
if (!rt->gcUntracedArenaStackTop) {
|
2323
|
+
/* Stack was empty, mark the arena as the bottom element. */
|
2324
|
+
a->prevUntracedPage = ARENA_INFO_TO_PAGE(a);
|
2325
|
+
} else {
|
2326
|
+
JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
|
2327
|
+
a->prevUntracedPage =
|
2328
|
+
ARENA_INFO_TO_PAGE(rt->gcUntracedArenaStackTop);
|
2329
|
+
}
|
2330
|
+
rt->gcUntracedArenaStackTop = a;
|
2331
|
+
}
|
2332
|
+
}
|
2333
|
+
JS_ASSERT(rt->gcUntracedArenaStackTop);
|
2334
|
+
}
|
2335
|
+
|
2336
|
+
static void
|
2337
|
+
TraceDelayedChildren(JSTracer *trc)
|
2338
|
+
{
|
2339
|
+
JSRuntime *rt;
|
2340
|
+
JSGCArenaInfo *a, *aprev;
|
2341
|
+
uint32 thingSize;
|
2342
|
+
uint32 thingsPerUntracedBit;
|
2343
|
+
uint32 untracedBitIndex, thingIndex, indexLimit, endIndex;
|
2344
|
+
JSGCThing *thing;
|
2345
|
+
uint8 *flagp;
|
2346
|
+
|
2347
|
+
rt = trc->context->runtime;
|
2348
|
+
a = rt->gcUntracedArenaStackTop;
|
2349
|
+
if (!a) {
|
2350
|
+
JS_ASSERT(rt->gcTraceLaterCount == 0);
|
2351
|
+
return;
|
2352
|
+
}
|
2353
|
+
|
2354
|
+
for (;;) {
|
2355
|
+
/*
|
2356
|
+
* The following assert verifies that the current arena belongs to the
|
2357
|
+
* untraced stack, since DelayTracingChildren ensures that even for
|
2358
|
+
* stack's bottom prevUntracedPage != 0 but rather points to itself.
|
2359
|
+
*/
|
2360
|
+
JS_ASSERT(a->prevUntracedPage != 0);
|
2361
|
+
JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage != 0);
|
2362
|
+
thingSize = a->list->thingSize;
|
2363
|
+
indexLimit = (a == a->list->last)
|
2364
|
+
? a->list->lastCount
|
2365
|
+
: THINGS_PER_ARENA(thingSize);
|
2366
|
+
thingsPerUntracedBit = THINGS_PER_UNTRACED_BIT(thingSize);
|
2367
|
+
|
2368
|
+
/*
|
2369
|
+
* We cannot use do-while loop here as a->u.untracedThings can be zero
|
2370
|
+
* before the loop as a leftover from the previous iterations. See
|
2371
|
+
* comments after the loop.
|
2372
|
+
*/
|
2373
|
+
while (a->u.untracedThings != 0) {
|
2374
|
+
untracedBitIndex = JS_FLOOR_LOG2W(a->u.untracedThings);
|
2375
|
+
a->u.untracedThings &= ~((jsuword)1 << untracedBitIndex);
|
2376
|
+
thingIndex = untracedBitIndex * thingsPerUntracedBit;
|
2377
|
+
endIndex = thingIndex + thingsPerUntracedBit;
|
2378
|
+
|
2379
|
+
/*
|
2380
|
+
* endIndex can go beyond the last allocated thing as the real
|
2381
|
+
* limit can be "inside" the bit.
|
2382
|
+
*/
|
2383
|
+
if (endIndex > indexLimit)
|
2384
|
+
endIndex = indexLimit;
|
2385
|
+
JS_ASSERT(thingIndex < indexLimit);
|
2386
|
+
|
2387
|
+
do {
|
2388
|
+
/*
|
2389
|
+
* Skip free or already traced things that share the bit
|
2390
|
+
* with untraced ones.
|
2391
|
+
*/
|
2392
|
+
flagp = THING_FLAGP(a, thingIndex);
|
2393
|
+
if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
|
2394
|
+
continue;
|
2395
|
+
*flagp &= ~GCF_FINAL;
|
2396
|
+
#ifdef DEBUG
|
2397
|
+
JS_ASSERT(rt->gcTraceLaterCount != 0);
|
2398
|
+
--rt->gcTraceLaterCount;
|
2399
|
+
#endif
|
2400
|
+
thing = FLAGP_TO_THING(flagp, thingSize);
|
2401
|
+
JS_TraceChildren(trc, thing, MapGCFlagsToTraceKind(*flagp));
|
2402
|
+
} while (++thingIndex != endIndex);
|
2403
|
+
}
|
2404
|
+
|
2405
|
+
/*
|
2406
|
+
* We finished tracing of all things in the the arena but we can only
|
2407
|
+
* pop it from the stack if the arena is the stack's top.
|
2408
|
+
*
|
2409
|
+
* When JS_TraceChildren from the above calls JS_CallTracer that in
|
2410
|
+
* turn on low C stack calls DelayTracingChildren and the latter
|
2411
|
+
* pushes new arenas to the untraced stack, we have to skip popping
|
2412
|
+
* of this arena until it becomes the top of the stack again.
|
2413
|
+
*/
|
2414
|
+
if (a == rt->gcUntracedArenaStackTop) {
|
2415
|
+
aprev = ARENA_PAGE_TO_INFO(a->prevUntracedPage);
|
2416
|
+
a->prevUntracedPage = 0;
|
2417
|
+
if (a == aprev) {
|
2418
|
+
/*
|
2419
|
+
* prevUntracedPage points to itself and we reached the
|
2420
|
+
* bottom of the stack.
|
2421
|
+
*/
|
2422
|
+
break;
|
2423
|
+
}
|
2424
|
+
rt->gcUntracedArenaStackTop = a = aprev;
|
2425
|
+
} else {
|
2426
|
+
a = rt->gcUntracedArenaStackTop;
|
2427
|
+
}
|
2428
|
+
}
|
2429
|
+
JS_ASSERT(rt->gcUntracedArenaStackTop);
|
2430
|
+
JS_ASSERT(rt->gcUntracedArenaStackTop->prevUntracedPage == 0);
|
2431
|
+
rt->gcUntracedArenaStackTop = NULL;
|
2432
|
+
JS_ASSERT(rt->gcTraceLaterCount == 0);
|
2433
|
+
}
|
2434
|
+
|
2435
|
+
JS_PUBLIC_API(void)
|
2436
|
+
JS_CallTracer(JSTracer *trc, void *thing, uint32 kind)
|
2437
|
+
{
|
2438
|
+
JSContext *cx;
|
2439
|
+
JSRuntime *rt;
|
2440
|
+
JSGCArenaInfo *a;
|
2441
|
+
uintN index;
|
2442
|
+
uint8 *flagp;
|
2443
|
+
|
2444
|
+
JS_ASSERT(thing);
|
2445
|
+
JS_ASSERT(JS_IS_VALID_TRACE_KIND(kind));
|
2446
|
+
JS_ASSERT(trc->debugPrinter || trc->debugPrintArg);
|
2447
|
+
|
2448
|
+
if (!IS_GC_MARKING_TRACER(trc)) {
|
2449
|
+
trc->callback(trc, thing, kind);
|
2450
|
+
goto out;
|
2451
|
+
}
|
2452
|
+
|
2453
|
+
cx = trc->context;
|
2454
|
+
rt = cx->runtime;
|
2455
|
+
JS_ASSERT(rt->gcMarkingTracer == trc);
|
2456
|
+
JS_ASSERT(rt->gcLevel > 0);
|
2457
|
+
|
2458
|
+
/*
|
2459
|
+
* Optimize for string and double as their size is known and their tracing
|
2460
|
+
* is not recursive.
|
2461
|
+
*/
|
2462
|
+
switch (kind) {
|
2463
|
+
case JSTRACE_DOUBLE:
|
2464
|
+
a = THING_TO_ARENA(thing);
|
2465
|
+
JS_ASSERT(!a->list);
|
2466
|
+
if (!a->u.hasMarkedDoubles) {
|
2467
|
+
ClearDoubleArenaFlags(a);
|
2468
|
+
a->u.hasMarkedDoubles = JS_TRUE;
|
2469
|
+
}
|
2470
|
+
index = DOUBLE_THING_TO_INDEX(thing);
|
2471
|
+
JS_SET_BIT(DOUBLE_ARENA_BITMAP(a), index);
|
2472
|
+
goto out;
|
2473
|
+
|
2474
|
+
case JSTRACE_STRING:
|
2475
|
+
for (;;) {
|
2476
|
+
flagp = THING_TO_FLAGP(thing, sizeof(JSGCThing));
|
2477
|
+
JS_ASSERT((*flagp & GCF_FINAL) == 0);
|
2478
|
+
JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
|
2479
|
+
if (!JSSTRING_IS_DEPENDENT((JSString *) thing)) {
|
2480
|
+
*flagp |= GCF_MARK;
|
2481
|
+
goto out;
|
2482
|
+
}
|
2483
|
+
if (*flagp & GCF_MARK)
|
2484
|
+
goto out;
|
2485
|
+
*flagp |= GCF_MARK;
|
2486
|
+
thing = JSSTRDEP_BASE((JSString *) thing);
|
2487
|
+
}
|
2488
|
+
/* NOTREACHED */
|
2489
|
+
}
|
2490
|
+
|
2491
|
+
flagp = GetGCThingFlags(thing);
|
2492
|
+
JS_ASSERT(kind == MapGCFlagsToTraceKind(*flagp));
|
2493
|
+
if (*flagp & GCF_MARK)
|
2494
|
+
goto out;
|
2495
|
+
|
2496
|
+
/*
|
2497
|
+
* We check for non-final flag only if mark is unset as
|
2498
|
+
* DelayTracingChildren uses the flag. See comments in the function.
|
2499
|
+
*/
|
2500
|
+
JS_ASSERT(*flagp != GCF_FINAL);
|
2501
|
+
*flagp |= GCF_MARK;
|
2502
|
+
if (!cx->insideGCMarkCallback) {
|
2503
|
+
/*
|
2504
|
+
* With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
|
2505
|
+
* uses the non-recursive code that otherwise would be called only on
|
2506
|
+
* a low C stack condition.
|
2507
|
+
*/
|
2508
|
+
#ifdef JS_GC_ASSUME_LOW_C_STACK
|
2509
|
+
# define RECURSION_TOO_DEEP() JS_TRUE
|
2510
|
+
#else
|
2511
|
+
int stackDummy;
|
2512
|
+
# define RECURSION_TOO_DEEP() (!JS_CHECK_STACK_SIZE(cx, stackDummy))
|
2513
|
+
#endif
|
2514
|
+
if (RECURSION_TOO_DEEP())
|
2515
|
+
DelayTracingChildren(rt, flagp);
|
2516
|
+
else
|
2517
|
+
JS_TraceChildren(trc, thing, kind);
|
2518
|
+
} else {
|
2519
|
+
/*
|
2520
|
+
* For API compatibility we allow for the callback to assume that
|
2521
|
+
* after it calls JS_MarkGCThing for the last time, the callback can
|
2522
|
+
* start to finalize its own objects that are only referenced by
|
2523
|
+
* unmarked GC things.
|
2524
|
+
*
|
2525
|
+
* Since we do not know which call from inside the callback is the
|
2526
|
+
* last, we ensure that children of all marked things are traced and
|
2527
|
+
* call TraceDelayedChildren(trc) after tracing the thing.
|
2528
|
+
*
|
2529
|
+
* As TraceDelayedChildren unconditionally invokes JS_TraceChildren
|
2530
|
+
* for the things with untraced children, calling DelayTracingChildren
|
2531
|
+
* is useless here. Hence we always trace thing's children even with a
|
2532
|
+
* low native stack.
|
2533
|
+
*/
|
2534
|
+
cx->insideGCMarkCallback = JS_FALSE;
|
2535
|
+
JS_TraceChildren(trc, thing, kind);
|
2536
|
+
TraceDelayedChildren(trc);
|
2537
|
+
cx->insideGCMarkCallback = JS_TRUE;
|
2538
|
+
}
|
2539
|
+
|
2540
|
+
out:
|
2541
|
+
#ifdef DEBUG
|
2542
|
+
trc->debugPrinter = NULL;
|
2543
|
+
trc->debugPrintArg = NULL;
|
2544
|
+
#endif
|
2545
|
+
return; /* to avoid out: right_curl when DEBUG is not defined */
|
2546
|
+
}
|
2547
|
+
|
2548
|
+
void
|
2549
|
+
js_CallValueTracerIfGCThing(JSTracer *trc, jsval v)
|
2550
|
+
{
|
2551
|
+
void *thing;
|
2552
|
+
uint32 kind;
|
2553
|
+
|
2554
|
+
if (JSVAL_IS_DOUBLE(v) || JSVAL_IS_STRING(v)) {
|
2555
|
+
thing = JSVAL_TO_TRACEABLE(v);
|
2556
|
+
kind = JSVAL_TRACE_KIND(v);
|
2557
|
+
JS_ASSERT(kind == js_GetGCThingTraceKind(JSVAL_TO_GCTHING(v)));
|
2558
|
+
} else if (JSVAL_IS_OBJECT(v) && v != JSVAL_NULL) {
|
2559
|
+
/* v can be an arbitrary GC thing reinterpreted as an object. */
|
2560
|
+
thing = JSVAL_TO_OBJECT(v);
|
2561
|
+
kind = js_GetGCThingTraceKind(thing);
|
2562
|
+
} else {
|
2563
|
+
return;
|
2564
|
+
}
|
2565
|
+
JS_CallTracer(trc, thing, kind);
|
2566
|
+
}
|
2567
|
+
|
2568
|
+
JS_STATIC_DLL_CALLBACK(JSDHashOperator)
|
2569
|
+
gc_root_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
|
2570
|
+
void *arg)
|
2571
|
+
{
|
2572
|
+
JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
|
2573
|
+
JSTracer *trc = (JSTracer *)arg;
|
2574
|
+
jsval *rp = (jsval *)rhe->root;
|
2575
|
+
jsval v = *rp;
|
2576
|
+
|
2577
|
+
/* Ignore null object and scalar values. */
|
2578
|
+
if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
|
2579
|
+
#ifdef DEBUG
|
2580
|
+
JSBool root_points_to_gcArenaList = JS_FALSE;
|
2581
|
+
jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
|
2582
|
+
JSRuntime *rt;
|
2583
|
+
uintN i;
|
2584
|
+
JSGCArenaList *arenaList;
|
2585
|
+
uint32 thingSize;
|
2586
|
+
JSGCArenaInfo *a;
|
2587
|
+
size_t limit;
|
2588
|
+
|
2589
|
+
rt = trc->context->runtime;
|
2590
|
+
for (i = 0; i < GC_NUM_FREELISTS; i++) {
|
2591
|
+
arenaList = &rt->gcArenaList[i];
|
2592
|
+
thingSize = arenaList->thingSize;
|
2593
|
+
limit = (size_t) arenaList->lastCount * thingSize;
|
2594
|
+
for (a = arenaList->last; a; a = a->prev) {
|
2595
|
+
if (thing - ARENA_INFO_TO_START(a) < limit) {
|
2596
|
+
root_points_to_gcArenaList = JS_TRUE;
|
2597
|
+
break;
|
2598
|
+
}
|
2599
|
+
limit = (size_t) THINGS_PER_ARENA(thingSize) * thingSize;
|
2600
|
+
}
|
2601
|
+
}
|
2602
|
+
if (!root_points_to_gcArenaList) {
|
2603
|
+
for (a = rt->gcDoubleArenaList.first; a; a = a->prev) {
|
2604
|
+
if (thing - ARENA_INFO_TO_START(a) <
|
2605
|
+
DOUBLES_PER_ARENA * sizeof(jsdouble)) {
|
2606
|
+
root_points_to_gcArenaList = JS_TRUE;
|
2607
|
+
break;
|
2608
|
+
}
|
2609
|
+
}
|
2610
|
+
}
|
2611
|
+
if (!root_points_to_gcArenaList && rhe->name) {
|
2612
|
+
fprintf(stderr,
|
2613
|
+
"JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
|
2614
|
+
"invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
|
2615
|
+
"The root's name is \"%s\".\n",
|
2616
|
+
rhe->name);
|
2617
|
+
}
|
2618
|
+
JS_ASSERT(root_points_to_gcArenaList);
|
2619
|
+
#endif
|
2620
|
+
JS_SET_TRACING_NAME(trc, rhe->name ? rhe->name : "root");
|
2621
|
+
js_CallValueTracerIfGCThing(trc, v);
|
2622
|
+
}
|
2623
|
+
|
2624
|
+
return JS_DHASH_NEXT;
|
2625
|
+
}
|
2626
|
+
|
2627
|
+
JS_STATIC_DLL_CALLBACK(JSDHashOperator)
|
2628
|
+
gc_lock_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num,
|
2629
|
+
void *arg)
|
2630
|
+
{
|
2631
|
+
JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
|
2632
|
+
void *thing = (void *)lhe->thing;
|
2633
|
+
JSTracer *trc = (JSTracer *)arg;
|
2634
|
+
uint32 traceKind;
|
2635
|
+
|
2636
|
+
JS_ASSERT(lhe->count >= 1);
|
2637
|
+
traceKind = js_GetGCThingTraceKind(thing);
|
2638
|
+
JS_CALL_TRACER(trc, thing, traceKind, "locked object");
|
2639
|
+
return JS_DHASH_NEXT;
|
2640
|
+
}
|
2641
|
+
|
2642
|
+
#define TRACE_JSVALS(trc, len, vec, name) \
|
2643
|
+
JS_BEGIN_MACRO \
|
2644
|
+
jsval _v, *_vp, *_end; \
|
2645
|
+
\
|
2646
|
+
for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
|
2647
|
+
_v = *_vp; \
|
2648
|
+
if (JSVAL_IS_TRACEABLE(_v)) { \
|
2649
|
+
JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \
|
2650
|
+
JS_CallTracer(trc, JSVAL_TO_TRACEABLE(_v), \
|
2651
|
+
JSVAL_TRACE_KIND(_v)); \
|
2652
|
+
} \
|
2653
|
+
} \
|
2654
|
+
JS_END_MACRO
|
2655
|
+
|
2656
|
+
void
|
2657
|
+
js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp)
|
2658
|
+
{
|
2659
|
+
uintN nslots, minargs, skip;
|
2660
|
+
|
2661
|
+
if (fp->callobj)
|
2662
|
+
JS_CALL_OBJECT_TRACER(trc, fp->callobj, "call");
|
2663
|
+
if (fp->argsobj)
|
2664
|
+
JS_CALL_OBJECT_TRACER(trc, fp->argsobj, "arguments");
|
2665
|
+
if (fp->varobj)
|
2666
|
+
JS_CALL_OBJECT_TRACER(trc, fp->varobj, "variables");
|
2667
|
+
if (fp->script) {
|
2668
|
+
js_TraceScript(trc, fp->script);
|
2669
|
+
/*
|
2670
|
+
* Don't mark what has not been pushed yet, or what has been
|
2671
|
+
* popped already.
|
2672
|
+
*/
|
2673
|
+
if (fp->regs) {
|
2674
|
+
nslots = (uintN) (fp->regs->sp - fp->spbase);
|
2675
|
+
JS_ASSERT(nslots <= fp->script->depth);
|
2676
|
+
TRACE_JSVALS(trc, nslots, fp->spbase, "operand");
|
2677
|
+
}
|
2678
|
+
}
|
2679
|
+
|
2680
|
+
/* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
|
2681
|
+
JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
|
2682
|
+
(fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
|
2683
|
+
JS_CALL_VALUE_TRACER(trc, (jsval)fp->thisp, "this");
|
2684
|
+
|
2685
|
+
if (fp->callee)
|
2686
|
+
JS_CALL_OBJECT_TRACER(trc, fp->callee, "callee");
|
2687
|
+
|
2688
|
+
if (fp->argv) {
|
2689
|
+
nslots = fp->argc;
|
2690
|
+
skip = 0;
|
2691
|
+
if (fp->fun) {
|
2692
|
+
minargs = FUN_MINARGS(fp->fun);
|
2693
|
+
if (minargs > nslots)
|
2694
|
+
nslots = minargs;
|
2695
|
+
if (!FUN_INTERPRETED(fp->fun)) {
|
2696
|
+
JS_ASSERT(!(fp->fun->flags & JSFUN_FAST_NATIVE));
|
2697
|
+
nslots += fp->fun->u.n.extra;
|
2698
|
+
}
|
2699
|
+
if (fp->fun->flags & JSFRAME_ROOTED_ARGV)
|
2700
|
+
skip = 2 + fp->argc;
|
2701
|
+
}
|
2702
|
+
TRACE_JSVALS(trc, 2 + nslots - skip, fp->argv - 2 + skip, "operand");
|
2703
|
+
}
|
2704
|
+
JS_CALL_VALUE_TRACER(trc, fp->rval, "rval");
|
2705
|
+
if (fp->vars)
|
2706
|
+
TRACE_JSVALS(trc, fp->nvars, fp->vars, "var");
|
2707
|
+
if (fp->scopeChain)
|
2708
|
+
JS_CALL_OBJECT_TRACER(trc, fp->scopeChain, "scope chain");
|
2709
|
+
if (fp->sharpArray)
|
2710
|
+
JS_CALL_OBJECT_TRACER(trc, fp->sharpArray, "sharp array");
|
2711
|
+
|
2712
|
+
if (fp->xmlNamespace)
|
2713
|
+
JS_CALL_OBJECT_TRACER(trc, fp->xmlNamespace, "xmlNamespace");
|
2714
|
+
}
|
2715
|
+
|
2716
|
+
static void
|
2717
|
+
TraceWeakRoots(JSTracer *trc, JSWeakRoots *wr)
|
2718
|
+
{
|
2719
|
+
uint32 i;
|
2720
|
+
void *thing;
|
2721
|
+
|
2722
|
+
#ifdef DEBUG
|
2723
|
+
static const char *weakRootNames[JSTRACE_LIMIT] = {
|
2724
|
+
"newborn object",
|
2725
|
+
"newborn double",
|
2726
|
+
"newborn string",
|
2727
|
+
"newborn namespace",
|
2728
|
+
"newborn qname",
|
2729
|
+
"newborn xml"
|
2730
|
+
};
|
2731
|
+
#endif
|
2732
|
+
|
2733
|
+
for (i = 0; i != JSTRACE_LIMIT; i++) {
|
2734
|
+
thing = wr->newborn[i];
|
2735
|
+
if (thing)
|
2736
|
+
JS_CALL_TRACER(trc, thing, i, weakRootNames[i]);
|
2737
|
+
}
|
2738
|
+
JS_ASSERT(i == GCX_EXTERNAL_STRING);
|
2739
|
+
for (; i != GCX_NTYPES; ++i) {
|
2740
|
+
thing = wr->newborn[i];
|
2741
|
+
if (thing) {
|
2742
|
+
JS_SET_TRACING_INDEX(trc, "newborn external string",
|
2743
|
+
i - GCX_EXTERNAL_STRING);
|
2744
|
+
JS_CallTracer(trc, thing, JSTRACE_STRING);
|
2745
|
+
}
|
2746
|
+
}
|
2747
|
+
|
2748
|
+
JS_CALL_VALUE_TRACER(trc, wr->lastAtom, "lastAtom");
|
2749
|
+
JS_SET_TRACING_NAME(trc, "lastInternalResult");
|
2750
|
+
js_CallValueTracerIfGCThing(trc, wr->lastInternalResult);
|
2751
|
+
}
|
2752
|
+
|
2753
|
+
JS_FRIEND_API(void)
|
2754
|
+
js_TraceContext(JSTracer *trc, JSContext *acx)
|
2755
|
+
{
|
2756
|
+
JSArena *a;
|
2757
|
+
int64 age;
|
2758
|
+
JSStackFrame *fp, *nextChain;
|
2759
|
+
JSStackHeader *sh;
|
2760
|
+
JSTempValueRooter *tvr;
|
2761
|
+
|
2762
|
+
if (IS_GC_MARKING_TRACER(trc)) {
|
2763
|
+
/*
|
2764
|
+
* Release stackPool here, if it has been in existence for longer than
|
2765
|
+
* the limit specified by gcStackPoolLifespan.
|
2766
|
+
*/
|
2767
|
+
a = acx->stackPool.current;
|
2768
|
+
if (a == acx->stackPool.first.next &&
|
2769
|
+
a->avail == a->base + sizeof(int64)) {
|
2770
|
+
age = JS_Now() - *(int64 *) a->base;
|
2771
|
+
if (age > (int64) acx->runtime->gcStackPoolLifespan * 1000)
|
2772
|
+
JS_FinishArenaPool(&acx->stackPool);
|
2773
|
+
}
|
2774
|
+
|
2775
|
+
/*
|
2776
|
+
* Clear the double free list to release all the pre-allocated doubles.
|
2777
|
+
*/
|
2778
|
+
acx->doubleFreeList = NULL;
|
2779
|
+
}
|
2780
|
+
|
2781
|
+
/*
|
2782
|
+
* Iterate frame chain and dormant chains.
|
2783
|
+
*
|
2784
|
+
* (NB: see comment on this whole "dormant" thing in js_Execute.)
|
2785
|
+
*/
|
2786
|
+
fp = acx->fp;
|
2787
|
+
nextChain = acx->dormantFrameChain;
|
2788
|
+
if (!fp)
|
2789
|
+
goto next_chain;
|
2790
|
+
|
2791
|
+
/* The top frame must not be dormant. */
|
2792
|
+
JS_ASSERT(!fp->dormantNext);
|
2793
|
+
for (;;) {
|
2794
|
+
do {
|
2795
|
+
js_TraceStackFrame(trc, fp);
|
2796
|
+
} while ((fp = fp->down) != NULL);
|
2797
|
+
|
2798
|
+
next_chain:
|
2799
|
+
if (!nextChain)
|
2800
|
+
break;
|
2801
|
+
fp = nextChain;
|
2802
|
+
nextChain = nextChain->dormantNext;
|
2803
|
+
}
|
2804
|
+
|
2805
|
+
/* Mark other roots-by-definition in acx. */
|
2806
|
+
if (acx->globalObject)
|
2807
|
+
JS_CALL_OBJECT_TRACER(trc, acx->globalObject, "global object");
|
2808
|
+
TraceWeakRoots(trc, &acx->weakRoots);
|
2809
|
+
if (acx->throwing) {
|
2810
|
+
JS_CALL_VALUE_TRACER(trc, acx->exception, "exception");
|
2811
|
+
} else {
|
2812
|
+
/* Avoid keeping GC-ed junk stored in JSContext.exception. */
|
2813
|
+
acx->exception = JSVAL_NULL;
|
2814
|
+
}
|
2815
|
+
#if JS_HAS_LVALUE_RETURN
|
2816
|
+
if (acx->rval2set)
|
2817
|
+
JS_CALL_VALUE_TRACER(trc, acx->rval2, "rval2");
|
2818
|
+
#endif
|
2819
|
+
|
2820
|
+
for (sh = acx->stackHeaders; sh; sh = sh->down) {
|
2821
|
+
METER(trc->context->runtime->gcStats.stackseg++);
|
2822
|
+
METER(trc->context->runtime->gcStats.segslots += sh->nslots);
|
2823
|
+
TRACE_JSVALS(trc, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
|
2824
|
+
}
|
2825
|
+
|
2826
|
+
if (acx->localRootStack)
|
2827
|
+
js_TraceLocalRoots(trc, acx->localRootStack);
|
2828
|
+
|
2829
|
+
for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
|
2830
|
+
switch (tvr->count) {
|
2831
|
+
case JSTVU_SINGLE:
|
2832
|
+
JS_SET_TRACING_NAME(trc, "tvr->u.value");
|
2833
|
+
js_CallValueTracerIfGCThing(trc, tvr->u.value);
|
2834
|
+
break;
|
2835
|
+
case JSTVU_TRACE:
|
2836
|
+
tvr->u.trace(trc, tvr);
|
2837
|
+
break;
|
2838
|
+
case JSTVU_SPROP:
|
2839
|
+
TRACE_SCOPE_PROPERTY(trc, tvr->u.sprop);
|
2840
|
+
break;
|
2841
|
+
case JSTVU_WEAK_ROOTS:
|
2842
|
+
TraceWeakRoots(trc, tvr->u.weakRoots);
|
2843
|
+
break;
|
2844
|
+
case JSTVU_PARSE_CONTEXT:
|
2845
|
+
js_TraceParseContext(trc, tvr->u.parseContext);
|
2846
|
+
break;
|
2847
|
+
case JSTVU_SCRIPT:
|
2848
|
+
js_TraceScript(trc, tvr->u.script);
|
2849
|
+
break;
|
2850
|
+
default:
|
2851
|
+
JS_ASSERT(tvr->count >= 0);
|
2852
|
+
TRACE_JSVALS(trc, tvr->count, tvr->u.array, "tvr->u.array");
|
2853
|
+
}
|
2854
|
+
}
|
2855
|
+
|
2856
|
+
if (acx->sharpObjectMap.depth > 0)
|
2857
|
+
js_TraceSharpMap(trc, &acx->sharpObjectMap);
|
2858
|
+
}
|
2859
|
+
|
2860
|
+
void
|
2861
|
+
js_TraceRuntime(JSTracer *trc, JSBool allAtoms)
|
2862
|
+
{
|
2863
|
+
JSRuntime *rt = trc->context->runtime;
|
2864
|
+
JSContext *iter, *acx;
|
2865
|
+
|
2866
|
+
JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_traversal, trc);
|
2867
|
+
if (rt->gcLocksHash)
|
2868
|
+
JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_traversal, trc);
|
2869
|
+
js_TraceAtomState(trc, allAtoms);
|
2870
|
+
js_TraceNativeIteratorStates(trc);
|
2871
|
+
js_TraceRuntimeNumberState(trc);
|
2872
|
+
|
2873
|
+
iter = NULL;
|
2874
|
+
while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL)
|
2875
|
+
js_TraceContext(trc, acx);
|
2876
|
+
|
2877
|
+
if (rt->gcExtraRootsTraceOp)
|
2878
|
+
rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData);
|
2879
|
+
}
|
2880
|
+
|
2881
|
+
static void
|
2882
|
+
ProcessSetSlotRequest(JSContext *cx, JSSetSlotRequest *ssr)
|
2883
|
+
{
|
2884
|
+
JSObject *obj, *pobj;
|
2885
|
+
uint32 slot;
|
2886
|
+
|
2887
|
+
obj = ssr->obj;
|
2888
|
+
pobj = ssr->pobj;
|
2889
|
+
slot = ssr->slot;
|
2890
|
+
|
2891
|
+
while (pobj) {
|
2892
|
+
pobj = js_GetWrappedObject(cx, pobj);
|
2893
|
+
if (pobj == obj) {
|
2894
|
+
ssr->errnum = JSMSG_CYCLIC_VALUE;
|
2895
|
+
return;
|
2896
|
+
}
|
2897
|
+
pobj = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj, slot));
|
2898
|
+
}
|
2899
|
+
|
2900
|
+
pobj = ssr->pobj;
|
2901
|
+
|
2902
|
+
if (slot == JSSLOT_PROTO && OBJ_IS_NATIVE(obj)) {
|
2903
|
+
JSScope *scope, *newscope;
|
2904
|
+
JSObject *oldproto;
|
2905
|
+
|
2906
|
+
/* Check to see whether obj shares its prototype's scope. */
|
2907
|
+
scope = OBJ_SCOPE(obj);
|
2908
|
+
oldproto = STOBJ_GET_PROTO(obj);
|
2909
|
+
if (oldproto && OBJ_SCOPE(oldproto) == scope) {
|
2910
|
+
/* Either obj needs a new empty scope, or it should share pobj's. */
|
2911
|
+
if (!pobj ||
|
2912
|
+
!OBJ_IS_NATIVE(pobj) ||
|
2913
|
+
OBJ_GET_CLASS(cx, pobj) != STOBJ_GET_CLASS(oldproto)) {
|
2914
|
+
/*
|
2915
|
+
* With no proto and no scope of its own, obj is truly empty.
|
2916
|
+
*
|
2917
|
+
* If pobj is not native, obj needs its own empty scope -- it
|
2918
|
+
* should not continue to share oldproto's scope once oldproto
|
2919
|
+
* is not on obj's prototype chain. That would put properties
|
2920
|
+
* from oldproto's scope ahead of properties defined by pobj,
|
2921
|
+
* in lookup order.
|
2922
|
+
*
|
2923
|
+
* If pobj's class differs from oldproto's, we may need a new
|
2924
|
+
* scope to handle differences in private and reserved slots,
|
2925
|
+
* so we suboptimally but safely make one.
|
2926
|
+
*/
|
2927
|
+
if (!js_GetMutableScope(cx, obj)) {
|
2928
|
+
ssr->errnum = JSMSG_OUT_OF_MEMORY;
|
2929
|
+
return;
|
2930
|
+
}
|
2931
|
+
} else if (OBJ_SCOPE(pobj) != scope) {
|
2932
|
+
newscope = (JSScope *) js_HoldObjectMap(cx, pobj->map);
|
2933
|
+
obj->map = &newscope->map;
|
2934
|
+
js_DropObjectMap(cx, &scope->map, obj);
|
2935
|
+
JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
|
2936
|
+
}
|
2937
|
+
}
|
2938
|
+
|
2939
|
+
/*
|
2940
|
+
* Regenerate property cache shape ids for all of the scopes along the
|
2941
|
+
* old prototype chain, in case any property cache entries were filled
|
2942
|
+
* by looking up starting from obj.
|
2943
|
+
*/
|
2944
|
+
while (oldproto && OBJ_IS_NATIVE(oldproto)) {
|
2945
|
+
scope = OBJ_SCOPE(oldproto);
|
2946
|
+
SCOPE_MAKE_UNIQUE_SHAPE(cx, scope);
|
2947
|
+
oldproto = STOBJ_GET_PROTO(scope->object);
|
2948
|
+
}
|
2949
|
+
}
|
2950
|
+
|
2951
|
+
/* Finally, do the deed. */
|
2952
|
+
STOBJ_SET_SLOT(obj, slot, OBJECT_TO_JSVAL(pobj));
|
2953
|
+
}
|
2954
|
+
|
2955
|
+
/*
|
2956
|
+
* The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
|
2957
|
+
* rt->gcLock already held, so the lock should be kept on return.
|
2958
|
+
*/
|
2959
|
+
void
|
2960
|
+
js_GC(JSContext *cx, JSGCInvocationKind gckind)
|
2961
|
+
{
|
2962
|
+
JSRuntime *rt;
|
2963
|
+
JSBool keepAtoms;
|
2964
|
+
JSGCCallback callback;
|
2965
|
+
uintN i, type;
|
2966
|
+
JSTracer trc;
|
2967
|
+
uint32 thingSize, indexLimit;
|
2968
|
+
JSGCArenaInfo *a, **ap, *emptyArenas;
|
2969
|
+
uint8 flags, *flagp;
|
2970
|
+
JSGCThing *thing, *freeList;
|
2971
|
+
JSGCArenaList *arenaList;
|
2972
|
+
JSBool allClear;
|
2973
|
+
#ifdef JS_THREADSAFE
|
2974
|
+
uint32 requestDebit;
|
2975
|
+
JSContext *acx, *iter;
|
2976
|
+
#endif
|
2977
|
+
#ifdef JS_GCMETER
|
2978
|
+
uint32 nlivearenas, nkilledarenas, nthings;
|
2979
|
+
#endif
|
2980
|
+
|
2981
|
+
rt = cx->runtime;
|
2982
|
+
#ifdef JS_THREADSAFE
|
2983
|
+
/* Avoid deadlock. */
|
2984
|
+
JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
|
2985
|
+
#endif
|
2986
|
+
|
2987
|
+
if (gckind & GC_KEEP_ATOMS) {
|
2988
|
+
/*
|
2989
|
+
* The set slot request and last ditch GC kinds preserve all atoms and
|
2990
|
+
* weak roots.
|
2991
|
+
*/
|
2992
|
+
keepAtoms = JS_TRUE;
|
2993
|
+
} else {
|
2994
|
+
/* Keep atoms when a suspended compile is running on another context. */
|
2995
|
+
keepAtoms = (rt->gcKeepAtoms != 0);
|
2996
|
+
JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
|
2997
|
+
}
|
2998
|
+
|
2999
|
+
/*
|
3000
|
+
* Don't collect garbage if the runtime isn't up, and cx is not the last
|
3001
|
+
* context in the runtime. The last context must force a GC, and nothing
|
3002
|
+
* should suppress that final collection or there may be shutdown leaks,
|
3003
|
+
* or runtime bloat until the next context is created.
|
3004
|
+
*/
|
3005
|
+
if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
|
3006
|
+
return;
|
3007
|
+
|
3008
|
+
restart_at_beginning:
|
3009
|
+
/*
|
3010
|
+
* Let the API user decide to defer a GC if it wants to (unless this
|
3011
|
+
* is the last context). Invoke the callback regardless. Sample the
|
3012
|
+
* callback in case we are freely racing with a JS_SetGCCallback{,RT} on
|
3013
|
+
* another thread.
|
3014
|
+
*/
|
3015
|
+
if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) {
|
3016
|
+
JSBool ok;
|
3017
|
+
|
3018
|
+
if (gckind & GC_LOCK_HELD)
|
3019
|
+
JS_UNLOCK_GC(rt);
|
3020
|
+
ok = callback(cx, JSGC_BEGIN);
|
3021
|
+
if (gckind & GC_LOCK_HELD)
|
3022
|
+
JS_LOCK_GC(rt);
|
3023
|
+
if (!ok && gckind != GC_LAST_CONTEXT)
|
3024
|
+
return;
|
3025
|
+
}
|
3026
|
+
|
3027
|
+
/* Lock out other GC allocator and collector invocations. */
|
3028
|
+
if (!(gckind & GC_LOCK_HELD))
|
3029
|
+
JS_LOCK_GC(rt);
|
3030
|
+
|
3031
|
+
METER(rt->gcStats.poke++);
|
3032
|
+
rt->gcPoke = JS_FALSE;
|
3033
|
+
|
3034
|
+
#ifdef JS_THREADSAFE
|
3035
|
+
JS_ASSERT(cx->thread->id == js_CurrentThreadId());
|
3036
|
+
|
3037
|
+
/* Bump gcLevel and return rather than nest on this thread. */
|
3038
|
+
if (rt->gcThread == cx->thread) {
|
3039
|
+
JS_ASSERT(rt->gcLevel > 0);
|
3040
|
+
rt->gcLevel++;
|
3041
|
+
METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
|
3042
|
+
if (!(gckind & GC_LOCK_HELD))
|
3043
|
+
JS_UNLOCK_GC(rt);
|
3044
|
+
return;
|
3045
|
+
}
|
3046
|
+
|
3047
|
+
/*
|
3048
|
+
* If we're in one or more requests (possibly on more than one context)
|
3049
|
+
* running on the current thread, indicate, temporarily, that all these
|
3050
|
+
* requests are inactive. If cx->thread is NULL, then cx is not using
|
3051
|
+
* the request model, and does not contribute to rt->requestCount.
|
3052
|
+
*/
|
3053
|
+
requestDebit = 0;
|
3054
|
+
if (cx->thread) {
|
3055
|
+
JSCList *head, *link;
|
3056
|
+
|
3057
|
+
/*
|
3058
|
+
* Check all contexts on cx->thread->contextList for active requests,
|
3059
|
+
* counting each such context against requestDebit.
|
3060
|
+
*/
|
3061
|
+
head = &cx->thread->contextList;
|
3062
|
+
for (link = head->next; link != head; link = link->next) {
|
3063
|
+
acx = CX_FROM_THREAD_LINKS(link);
|
3064
|
+
JS_ASSERT(acx->thread == cx->thread);
|
3065
|
+
if (acx->requestDepth)
|
3066
|
+
requestDebit++;
|
3067
|
+
}
|
3068
|
+
} else {
|
3069
|
+
/*
|
3070
|
+
* We assert, but check anyway, in case someone is misusing the API.
|
3071
|
+
* Avoiding the loop over all of rt's contexts is a win in the event
|
3072
|
+
* that the GC runs only on request-less contexts with null threads,
|
3073
|
+
* in a special thread such as might be used by the UI/DOM/Layout
|
3074
|
+
* "mozilla" or "main" thread in Mozilla-the-browser.
|
3075
|
+
*/
|
3076
|
+
JS_ASSERT(cx->requestDepth == 0);
|
3077
|
+
if (cx->requestDepth)
|
3078
|
+
requestDebit = 1;
|
3079
|
+
}
|
3080
|
+
if (requestDebit) {
|
3081
|
+
JS_ASSERT(requestDebit <= rt->requestCount);
|
3082
|
+
rt->requestCount -= requestDebit;
|
3083
|
+
if (rt->requestCount == 0)
|
3084
|
+
JS_NOTIFY_REQUEST_DONE(rt);
|
3085
|
+
}
|
3086
|
+
|
3087
|
+
/* If another thread is already in GC, don't attempt GC; wait instead. */
|
3088
|
+
if (rt->gcLevel > 0) {
|
3089
|
+
/* Bump gcLevel to restart the current GC, so it finds new garbage. */
|
3090
|
+
rt->gcLevel++;
|
3091
|
+
METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
|
3092
|
+
|
3093
|
+
/* Wait for the other thread to finish, then resume our request. */
|
3094
|
+
while (rt->gcLevel > 0)
|
3095
|
+
JS_AWAIT_GC_DONE(rt);
|
3096
|
+
if (requestDebit)
|
3097
|
+
rt->requestCount += requestDebit;
|
3098
|
+
if (!(gckind & GC_LOCK_HELD))
|
3099
|
+
JS_UNLOCK_GC(rt);
|
3100
|
+
return;
|
3101
|
+
}
|
3102
|
+
|
3103
|
+
/* No other thread is in GC, so indicate that we're now in GC. */
|
3104
|
+
rt->gcLevel = 1;
|
3105
|
+
rt->gcThread = cx->thread;
|
3106
|
+
|
3107
|
+
/* Wait for all other requests to finish. */
|
3108
|
+
while (rt->requestCount > 0)
|
3109
|
+
JS_AWAIT_REQUEST_DONE(rt);
|
3110
|
+
|
3111
|
+
#else /* !JS_THREADSAFE */
|
3112
|
+
|
3113
|
+
/* Bump gcLevel and return rather than nest; the outer gc will restart. */
|
3114
|
+
rt->gcLevel++;
|
3115
|
+
METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
|
3116
|
+
if (rt->gcLevel > 1)
|
3117
|
+
return;
|
3118
|
+
|
3119
|
+
#endif /* !JS_THREADSAFE */
|
3120
|
+
|
3121
|
+
/*
|
3122
|
+
* Set rt->gcRunning here within the GC lock, and after waiting for any
|
3123
|
+
* active requests to end, so that new requests that try to JS_AddRoot,
|
3124
|
+
* JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
|
3125
|
+
* rt->gcLevel to drop to zero, while request-less calls to the *Root*
|
3126
|
+
* APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
|
3127
|
+
* waiting for GC to finish.
|
3128
|
+
*/
|
3129
|
+
rt->gcRunning = JS_TRUE;
|
3130
|
+
|
3131
|
+
if (gckind == GC_SET_SLOT_REQUEST) {
|
3132
|
+
JSSetSlotRequest *ssr;
|
3133
|
+
|
3134
|
+
while ((ssr = rt->setSlotRequests) != NULL) {
|
3135
|
+
rt->setSlotRequests = ssr->next;
|
3136
|
+
JS_UNLOCK_GC(rt);
|
3137
|
+
ssr->next = NULL;
|
3138
|
+
ProcessSetSlotRequest(cx, ssr);
|
3139
|
+
JS_LOCK_GC(rt);
|
3140
|
+
}
|
3141
|
+
|
3142
|
+
/*
|
3143
|
+
* We assume here that killing links to parent and prototype objects
|
3144
|
+
* does not create garbage (such objects typically are long-lived and
|
3145
|
+
* widely shared, e.g. global objects, Function.prototype, etc.). We
|
3146
|
+
* collect garbage only if a racing thread attempted GC and is waiting
|
3147
|
+
* for us to finish (gcLevel > 1) or if someone already poked us.
|
3148
|
+
*/
|
3149
|
+
if (rt->gcLevel == 1 && !rt->gcPoke)
|
3150
|
+
goto done_running;
|
3151
|
+
|
3152
|
+
rt->gcLevel = 0;
|
3153
|
+
rt->gcPoke = JS_FALSE;
|
3154
|
+
rt->gcRunning = JS_FALSE;
|
3155
|
+
#ifdef JS_THREADSAFE
|
3156
|
+
rt->gcThread = NULL;
|
3157
|
+
rt->requestCount += requestDebit;
|
3158
|
+
#endif
|
3159
|
+
gckind = GC_LOCK_HELD;
|
3160
|
+
goto restart_at_beginning;
|
3161
|
+
}
|
3162
|
+
|
3163
|
+
JS_UNLOCK_GC(rt);
|
3164
|
+
|
3165
|
+
/* Reset malloc counter. */
|
3166
|
+
rt->gcMallocBytes = 0;
|
3167
|
+
|
3168
|
+
#ifdef JS_DUMP_SCOPE_METERS
|
3169
|
+
{ extern void js_DumpScopeMeters(JSRuntime *rt);
|
3170
|
+
js_DumpScopeMeters(rt);
|
3171
|
+
}
|
3172
|
+
#endif
|
3173
|
+
|
3174
|
+
/*
|
3175
|
+
* Clear property cache weak references and disable the cache so nothing
|
3176
|
+
* can fill it during GC (this is paranoia, since scripts should not run
|
3177
|
+
* during GC).
|
3178
|
+
*/
|
3179
|
+
js_DisablePropertyCache(cx);
|
3180
|
+
js_FlushPropertyCache(cx);
|
3181
|
+
|
3182
|
+
#ifdef JS_THREADSAFE
|
3183
|
+
/*
|
3184
|
+
* Set all thread local freelists to NULL. We may visit a thread's
|
3185
|
+
* freelist more than once. To avoid redundant clearing we unroll the
|
3186
|
+
* current thread's step.
|
3187
|
+
*
|
3188
|
+
* Also, in case a JSScript wrapped within an object was finalized, we
|
3189
|
+
* null acx->thread->gsnCache.script and finish the cache's hashtable.
|
3190
|
+
* Note that js_DestroyScript, called from script_finalize, will have
|
3191
|
+
* already cleared cx->thread->gsnCache above during finalization, so we
|
3192
|
+
* don't have to here.
|
3193
|
+
*/
|
3194
|
+
memset(cx->thread->gcFreeLists, 0, sizeof cx->thread->gcFreeLists);
|
3195
|
+
iter = NULL;
|
3196
|
+
while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
|
3197
|
+
if (!acx->thread || acx->thread == cx->thread)
|
3198
|
+
continue;
|
3199
|
+
memset(acx->thread->gcFreeLists, 0, sizeof acx->thread->gcFreeLists);
|
3200
|
+
GSN_CACHE_CLEAR(&acx->thread->gsnCache);
|
3201
|
+
js_DisablePropertyCache(acx);
|
3202
|
+
js_FlushPropertyCache(acx);
|
3203
|
+
}
|
3204
|
+
#else
|
3205
|
+
/* The thread-unsafe case just has to clear the runtime's GSN cache. */
|
3206
|
+
GSN_CACHE_CLEAR(&rt->gsnCache);
|
3207
|
+
#endif
|
3208
|
+
|
3209
|
+
restart:
|
3210
|
+
rt->gcNumber++;
|
3211
|
+
JS_ASSERT(!rt->gcUntracedArenaStackTop);
|
3212
|
+
JS_ASSERT(rt->gcTraceLaterCount == 0);
|
3213
|
+
|
3214
|
+
/* Reset the property cache's type id generator so we can compress ids. */
|
3215
|
+
rt->shapeGen = 0;
|
3216
|
+
|
3217
|
+
/*
|
3218
|
+
* Mark phase.
|
3219
|
+
*/
|
3220
|
+
JS_TRACER_INIT(&trc, cx, NULL);
|
3221
|
+
rt->gcMarkingTracer = &trc;
|
3222
|
+
JS_ASSERT(IS_GC_MARKING_TRACER(&trc));
|
3223
|
+
|
3224
|
+
for (a = rt->gcDoubleArenaList.first; a; a = a->prev)
|
3225
|
+
a->u.hasMarkedDoubles = JS_FALSE;
|
3226
|
+
|
3227
|
+
js_TraceRuntime(&trc, keepAtoms);
|
3228
|
+
js_MarkScriptFilenames(rt, keepAtoms);
|
3229
|
+
|
3230
|
+
/*
|
3231
|
+
* Mark children of things that caused too deep recursion during the above
|
3232
|
+
* tracing.
|
3233
|
+
*/
|
3234
|
+
TraceDelayedChildren(&trc);
|
3235
|
+
|
3236
|
+
JS_ASSERT(!cx->insideGCMarkCallback);
|
3237
|
+
if (rt->gcCallback) {
|
3238
|
+
cx->insideGCMarkCallback = JS_TRUE;
|
3239
|
+
(void) rt->gcCallback(cx, JSGC_MARK_END);
|
3240
|
+
JS_ASSERT(cx->insideGCMarkCallback);
|
3241
|
+
cx->insideGCMarkCallback = JS_FALSE;
|
3242
|
+
}
|
3243
|
+
JS_ASSERT(rt->gcTraceLaterCount == 0);
|
3244
|
+
|
3245
|
+
rt->gcMarkingTracer = NULL;
|
3246
|
+
|
3247
|
+
/*
|
3248
|
+
* Sweep phase.
|
3249
|
+
*
|
3250
|
+
* Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
|
3251
|
+
* so that any attempt to allocate a GC-thing from a finalizer will fail,
|
3252
|
+
* rather than nest badly and leave the unmarked newborn to be swept.
|
3253
|
+
*
|
3254
|
+
* We first sweep atom state so we can use js_IsAboutToBeFinalized on
|
3255
|
+
* JSString or jsdouble held in a hashtable to check if the hashtable
|
3256
|
+
* entry can be freed. Note that even after the entry is freed, JSObject
|
3257
|
+
* finalizers can continue to access the corresponding jsdouble* and
|
3258
|
+
* JSString* assuming that they are unique. This works since the
|
3259
|
+
* atomization API must not be called during GC.
|
3260
|
+
*/
|
3261
|
+
js_SweepAtomState(cx);
|
3262
|
+
|
3263
|
+
/* Finalize iterator states before the objects they iterate over. */
|
3264
|
+
CloseNativeIterators(cx);
|
3265
|
+
|
3266
|
+
/* Finalize watch points associated with unreachable objects. */
|
3267
|
+
js_SweepWatchPoints(cx);
|
3268
|
+
|
3269
|
+
#ifdef DEBUG
|
3270
|
+
/* Save the pre-sweep count of scope-mapped properties. */
|
3271
|
+
rt->liveScopePropsPreSweep = rt->liveScopeProps;
|
3272
|
+
#endif
|
3273
|
+
|
3274
|
+
/*
|
3275
|
+
* Here we need to ensure that JSObject instances are finalized before GC-
|
3276
|
+
* allocated JSString and jsdouble instances so object's finalizer can
|
3277
|
+
* access them even if they will be freed. For that we simply finalize the
|
3278
|
+
* list containing JSObject first since the static assert at the beginning
|
3279
|
+
* of the file guarantees that JSString and jsdouble instances are
|
3280
|
+
* allocated from a different list.
|
3281
|
+
*/
|
3282
|
+
emptyArenas = NULL;
|
3283
|
+
for (i = 0; i < GC_NUM_FREELISTS; i++) {
|
3284
|
+
arenaList = &rt->gcArenaList[i == 0
|
3285
|
+
? GC_FREELIST_INDEX(sizeof(JSObject))
|
3286
|
+
: i == GC_FREELIST_INDEX(sizeof(JSObject))
|
3287
|
+
? 0
|
3288
|
+
: i];
|
3289
|
+
ap = &arenaList->last;
|
3290
|
+
if (!(a = *ap))
|
3291
|
+
continue;
|
3292
|
+
|
3293
|
+
JS_ASSERT(arenaList->lastCount > 0);
|
3294
|
+
arenaList->freeList = NULL;
|
3295
|
+
freeList = NULL;
|
3296
|
+
thingSize = arenaList->thingSize;
|
3297
|
+
indexLimit = THINGS_PER_ARENA(thingSize);
|
3298
|
+
flagp = THING_FLAGP(a, arenaList->lastCount - 1);
|
3299
|
+
METER((nlivearenas = 0, nkilledarenas = 0, nthings = 0));
|
3300
|
+
for (;;) {
|
3301
|
+
JS_ASSERT(a->prevUntracedPage == 0);
|
3302
|
+
JS_ASSERT(a->u.untracedThings == 0);
|
3303
|
+
allClear = JS_TRUE;
|
3304
|
+
do {
|
3305
|
+
flags = *flagp;
|
3306
|
+
if (flags & (GCF_MARK | GCF_LOCK)) {
|
3307
|
+
*flagp &= ~GCF_MARK;
|
3308
|
+
allClear = JS_FALSE;
|
3309
|
+
METER(nthings++);
|
3310
|
+
} else {
|
3311
|
+
thing = FLAGP_TO_THING(flagp, thingSize);
|
3312
|
+
if (!(flags & GCF_FINAL)) {
|
3313
|
+
/*
|
3314
|
+
* Call the finalizer with GCF_FINAL ORed into flags.
|
3315
|
+
*/
|
3316
|
+
*flagp = (uint8)(flags | GCF_FINAL);
|
3317
|
+
type = flags & GCF_TYPEMASK;
|
3318
|
+
switch (type) {
|
3319
|
+
case GCX_OBJECT:
|
3320
|
+
js_FinalizeObject(cx, (JSObject *) thing);
|
3321
|
+
break;
|
3322
|
+
case GCX_DOUBLE:
|
3323
|
+
/* Do nothing. */
|
3324
|
+
break;
|
3325
|
+
#if JS_HAS_XML_SUPPORT
|
3326
|
+
case GCX_NAMESPACE:
|
3327
|
+
js_FinalizeXMLNamespace(cx,
|
3328
|
+
(JSXMLNamespace *) thing);
|
3329
|
+
break;
|
3330
|
+
case GCX_QNAME:
|
3331
|
+
js_FinalizeXMLQName(cx, (JSXMLQName *) thing);
|
3332
|
+
break;
|
3333
|
+
case GCX_XML:
|
3334
|
+
js_FinalizeXML(cx, (JSXML *) thing);
|
3335
|
+
break;
|
3336
|
+
#endif
|
3337
|
+
default:
|
3338
|
+
JS_ASSERT(type == GCX_STRING ||
|
3339
|
+
type - GCX_EXTERNAL_STRING <
|
3340
|
+
GCX_NTYPES - GCX_EXTERNAL_STRING);
|
3341
|
+
js_FinalizeStringRT(rt, (JSString *) thing,
|
3342
|
+
(intN) (type -
|
3343
|
+
GCX_EXTERNAL_STRING),
|
3344
|
+
cx);
|
3345
|
+
break;
|
3346
|
+
}
|
3347
|
+
#ifdef DEBUG
|
3348
|
+
memset(thing, JS_FREE_PATTERN, thingSize);
|
3349
|
+
#endif
|
3350
|
+
}
|
3351
|
+
thing->flagp = flagp;
|
3352
|
+
thing->next = freeList;
|
3353
|
+
freeList = thing;
|
3354
|
+
}
|
3355
|
+
} while (++flagp != THING_FLAGS_END(a));
|
3356
|
+
|
3357
|
+
if (allClear) {
|
3358
|
+
/*
|
3359
|
+
* Forget just assembled free list head for the arena and
|
3360
|
+
* add the arena itself to the destroy list.
|
3361
|
+
*/
|
3362
|
+
freeList = arenaList->freeList;
|
3363
|
+
if (a == arenaList->last)
|
3364
|
+
arenaList->lastCount = (uint16) indexLimit;
|
3365
|
+
*ap = a->prev;
|
3366
|
+
a->prev = emptyArenas;
|
3367
|
+
emptyArenas = a;
|
3368
|
+
METER(nkilledarenas++);
|
3369
|
+
} else {
|
3370
|
+
arenaList->freeList = freeList;
|
3371
|
+
ap = &a->prev;
|
3372
|
+
METER(nlivearenas++);
|
3373
|
+
}
|
3374
|
+
if (!(a = *ap))
|
3375
|
+
break;
|
3376
|
+
flagp = THING_FLAGP(a, indexLimit - 1);
|
3377
|
+
}
|
3378
|
+
|
3379
|
+
/*
|
3380
|
+
* We use arenaList - &rt->gcArenaList[0], not i, as the stat index
|
3381
|
+
* due to the enumeration reorder at the beginning of the loop.
|
3382
|
+
*/
|
3383
|
+
METER(UpdateArenaStats(&rt->gcStats.arenaStats[arenaList -
|
3384
|
+
&rt->gcArenaList[0]],
|
3385
|
+
nlivearenas, nkilledarenas, nthings));
|
3386
|
+
}
|
3387
|
+
|
3388
|
+
ap = &rt->gcDoubleArenaList.first;
|
3389
|
+
METER((nlivearenas = 0, nkilledarenas = 0, nthings = 0));
|
3390
|
+
while ((a = *ap) != NULL) {
|
3391
|
+
if (!a->u.hasMarkedDoubles) {
|
3392
|
+
/* No marked double values in the arena. */
|
3393
|
+
*ap = a->prev;
|
3394
|
+
a->prev = emptyArenas;
|
3395
|
+
emptyArenas = a;
|
3396
|
+
METER(nkilledarenas++);
|
3397
|
+
} else {
|
3398
|
+
ap = &a->prev;
|
3399
|
+
#ifdef JS_GCMETER
|
3400
|
+
for (i = 0; i != DOUBLES_PER_ARENA; ++i) {
|
3401
|
+
if (IsMarkedDouble(a, index))
|
3402
|
+
METER(nthings++);
|
3403
|
+
}
|
3404
|
+
METER(nlivearenas++);
|
3405
|
+
#endif
|
3406
|
+
}
|
3407
|
+
}
|
3408
|
+
METER(UpdateArenaStats(&rt->gcStats.doubleArenaStats,
|
3409
|
+
nlivearenas, nkilledarenas, nthings));
|
3410
|
+
rt->gcDoubleArenaList.nextDoubleFlags =
|
3411
|
+
rt->gcDoubleArenaList.first
|
3412
|
+
? DOUBLE_ARENA_BITMAP(rt->gcDoubleArenaList.first)
|
3413
|
+
: DOUBLE_BITMAP_SENTINEL;
|
3414
|
+
|
3415
|
+
/*
|
3416
|
+
* Sweep the runtime's property tree after finalizing objects, in case any
|
3417
|
+
* had watchpoints referencing tree nodes.
|
3418
|
+
*/
|
3419
|
+
js_SweepScopeProperties(cx);
|
3420
|
+
|
3421
|
+
/*
|
3422
|
+
* Sweep script filenames after sweeping functions in the generic loop
|
3423
|
+
* above. In this way when a scripted function's finalizer destroys the
|
3424
|
+
* script and calls rt->destroyScriptHook, the hook can still access the
|
3425
|
+
* script's filename. See bug 323267.
|
3426
|
+
*/
|
3427
|
+
js_SweepScriptFilenames(rt);
|
3428
|
+
|
3429
|
+
/*
|
3430
|
+
* Destroy arenas after we finished the sweeping sofinalizers can safely
|
3431
|
+
* use js_IsAboutToBeFinalized().
|
3432
|
+
*/
|
3433
|
+
DestroyGCArenas(rt, emptyArenas);
|
3434
|
+
|
3435
|
+
if (rt->gcCallback)
|
3436
|
+
(void) rt->gcCallback(cx, JSGC_FINALIZE_END);
|
3437
|
+
#ifdef DEBUG_srcnotesize
|
3438
|
+
{ extern void DumpSrcNoteSizeHist();
|
3439
|
+
DumpSrcNoteSizeHist();
|
3440
|
+
printf("GC HEAP SIZE %lu\n", (unsigned long)rt->gcBytes);
|
3441
|
+
}
|
3442
|
+
#endif
|
3443
|
+
|
3444
|
+
#ifdef JS_SCOPE_DEPTH_METER
|
3445
|
+
{ static FILE *fp;
|
3446
|
+
if (!fp)
|
3447
|
+
fp = fopen("/tmp/scopedepth.stats", "w");
|
3448
|
+
|
3449
|
+
if (fp) {
|
3450
|
+
JS_DumpBasicStats(&rt->protoLookupDepthStats, "proto-lookup depth", fp);
|
3451
|
+
JS_DumpBasicStats(&rt->scopeSearchDepthStats, "scope-search depth", fp);
|
3452
|
+
JS_DumpBasicStats(&rt->hostenvScopeDepthStats, "hostenv scope depth", fp);
|
3453
|
+
JS_DumpBasicStats(&rt->lexicalScopeDepthStats, "lexical scope depth", fp);
|
3454
|
+
|
3455
|
+
putc('\n', fp);
|
3456
|
+
fflush(fp);
|
3457
|
+
}
|
3458
|
+
}
|
3459
|
+
#endif /* JS_SCOPE_DEPTH_METER */
|
3460
|
+
|
3461
|
+
JS_LOCK_GC(rt);
|
3462
|
+
|
3463
|
+
/*
|
3464
|
+
* We want to restart GC if js_GC was called recursively or if any of the
|
3465
|
+
* finalizers called js_RemoveRoot or js_UnlockGCThingRT.
|
3466
|
+
*/
|
3467
|
+
if (rt->gcLevel > 1 || rt->gcPoke) {
|
3468
|
+
rt->gcLevel = 1;
|
3469
|
+
rt->gcPoke = JS_FALSE;
|
3470
|
+
JS_UNLOCK_GC(rt);
|
3471
|
+
goto restart;
|
3472
|
+
}
|
3473
|
+
|
3474
|
+
if (!(rt->shapeGen & SHAPE_OVERFLOW_BIT)) {
|
3475
|
+
js_EnablePropertyCache(cx);
|
3476
|
+
#ifdef JS_THREADSAFE
|
3477
|
+
iter = NULL;
|
3478
|
+
while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
|
3479
|
+
if (!acx->thread || acx->thread == cx->thread)
|
3480
|
+
continue;
|
3481
|
+
js_EnablePropertyCache(acx);
|
3482
|
+
}
|
3483
|
+
#endif
|
3484
|
+
}
|
3485
|
+
|
3486
|
+
rt->gcLastBytes = rt->gcBytes;
|
3487
|
+
done_running:
|
3488
|
+
rt->gcLevel = 0;
|
3489
|
+
rt->gcRunning = JS_FALSE;
|
3490
|
+
|
3491
|
+
#ifdef JS_THREADSAFE
|
3492
|
+
/* If we were invoked during a request, pay back the temporary debit. */
|
3493
|
+
if (requestDebit)
|
3494
|
+
rt->requestCount += requestDebit;
|
3495
|
+
rt->gcThread = NULL;
|
3496
|
+
JS_NOTIFY_GC_DONE(rt);
|
3497
|
+
|
3498
|
+
/*
|
3499
|
+
* Unlock unless we have GC_LOCK_HELD which requires locked GC on return.
|
3500
|
+
*/
|
3501
|
+
if (!(gckind & GC_LOCK_HELD))
|
3502
|
+
JS_UNLOCK_GC(rt);
|
3503
|
+
#endif
|
3504
|
+
|
3505
|
+
/*
|
3506
|
+
* Execute JSGC_END callback outside the lock. Again, sample the callback
|
3507
|
+
* pointer in case it changes, since we are outside of the GC vs. requests
|
3508
|
+
* interlock mechanism here.
|
3509
|
+
*/
|
3510
|
+
if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) {
|
3511
|
+
JSWeakRoots savedWeakRoots;
|
3512
|
+
JSTempValueRooter tvr;
|
3513
|
+
|
3514
|
+
if (gckind & GC_KEEP_ATOMS) {
|
3515
|
+
/*
|
3516
|
+
* We allow JSGC_END implementation to force a full GC or allocate
|
3517
|
+
* new GC things. Thus we must protect the weak roots from garbage
|
3518
|
+
* collection and overwrites.
|
3519
|
+
*/
|
3520
|
+
savedWeakRoots = cx->weakRoots;
|
3521
|
+
JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
|
3522
|
+
JS_KEEP_ATOMS(rt);
|
3523
|
+
JS_UNLOCK_GC(rt);
|
3524
|
+
}
|
3525
|
+
|
3526
|
+
(void) callback(cx, JSGC_END);
|
3527
|
+
|
3528
|
+
if (gckind & GC_KEEP_ATOMS) {
|
3529
|
+
JS_LOCK_GC(rt);
|
3530
|
+
JS_UNKEEP_ATOMS(rt);
|
3531
|
+
JS_POP_TEMP_ROOT(cx, &tvr);
|
3532
|
+
} else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
|
3533
|
+
/*
|
3534
|
+
* On shutdown iterate until JSGC_END callback stops creating
|
3535
|
+
* garbage.
|
3536
|
+
*/
|
3537
|
+
goto restart_at_beginning;
|
3538
|
+
}
|
3539
|
+
}
|
3540
|
+
}
|
3541
|
+
|
3542
|
+
void
|
3543
|
+
js_UpdateMallocCounter(JSContext *cx, size_t nbytes)
|
3544
|
+
{
|
3545
|
+
uint32 *pbytes, bytes;
|
3546
|
+
|
3547
|
+
#ifdef JS_THREADSAFE
|
3548
|
+
pbytes = &cx->thread->gcMallocBytes;
|
3549
|
+
#else
|
3550
|
+
pbytes = &cx->runtime->gcMallocBytes;
|
3551
|
+
#endif
|
3552
|
+
bytes = *pbytes;
|
3553
|
+
*pbytes = ((uint32)-1 - bytes <= nbytes) ? (uint32)-1 : bytes + nbytes;
|
3554
|
+
}
|