[Gc] More typos fixed in GC and libatomic_ops

Ivan Maidanski ivmai at mail.ru
Sat Oct 11 04:45:59 PDT 2008


Hi!

The attached patch contains:
- lots of corrected typos in comments (only);
- partially wrong (may be obsolete) comment for GC_finalize() is fixed (the world is not stopped there, only the lock is held).

Bye.


-------------- next part --------------
diff -ru bdwgc/alloc.c updated/bdwgc/alloc.c
--- bdwgc/alloc.c	2008-08-22 01:06:56.000000000 +0400
+++ updated/bdwgc/alloc.c	2008-10-11 13:21:55.009260500 +0400
@@ -216,7 +216,7 @@
 
 
 /* Clear up a few frames worth of garbage left at the top of the stack.	*/
-/* This is used to prevent us from accidentally treating garbade left	*/
+/* This is used to prevent us from accidentally treating garbage left	*/
 /* on the stack by other parts of the collector as roots.  This 	*/
 /* differs from the code in misc.c, which actually tries to keep the	*/
 /* stack clear of long-lived, client-generated garbage.			*/
diff -ru bdwgc/dyn_load.c updated/bdwgc/dyn_load.c
--- bdwgc/dyn_load.c	2008-10-01 18:28:50.000000000 +0400
+++ updated/bdwgc/dyn_load.c	2008-10-11 13:24:43.993635500 +0400
@@ -237,7 +237,7 @@
 /* Sort an array of HeapSects by start address.				*/
 /* Unfortunately at least some versions of				*/
 /* Linux qsort end up calling malloc by way of sysconf, and hence can't */
-/* be used in the colector.  Hence we roll our own.  Should be		*/
+/* be used in the collector.  Hence we roll our own.  Should be		*/
 /* reasonably fast if the array is already mostly sorted, as we expect	*/
 /* it to be.								*/
 static void sort_heap_sects(struct HeapSect *base, size_t number_of_elements)
@@ -1178,7 +1178,7 @@
           PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
           PCR_IL_LoadedSegment * q;
           
-          /* Skip uncommited files */
+          /* Skip uncommitted files */
           while (p != NIL && !(p -> lf_commitPoint)) {
               /* The loading of this file has not yet been committed	*/
               /* Hence its description could be inconsistent.  		*/
diff -ru bdwgc/finalize.c updated/bdwgc/finalize.c
--- bdwgc/finalize.c	2008-09-26 14:25:38.000000000 +0400
+++ updated/bdwgc/finalize.c	2008-10-11 15:04:26.737489300 +0400
@@ -81,7 +81,7 @@
 } **fo_head = 0;
 
 STATIC struct finalizable_object * GC_finalize_now = 0;
-	/* LIst of objects that should be finalized now.	*/
+	/* List of objects that should be finalized now.	*/
 
 static signed_word log_fo_table_size = -1;
 
@@ -96,7 +96,7 @@
 }
 
 /* Double the size of a hash table. *size_ptr is the log of its current	*/
-/* size.  May be a noop.						*/
+/* size.  May be a no-op.						*/
 /* *table is a pointer to an array of hash headers.  If we succeed, we	*/
 /* update both *table and *log_size_ptr.				*/
 /* Lock is held.  Signals are disabled.					*/
@@ -486,8 +486,8 @@
 }
 #endif
 
-/* Called with world stopped.  Cause disappearing links to disappear,	*/
-/* and invoke finalizers.						*/
+/* Called with held lock (but the world is running).			*/
+/* Cause disappearing links to disappear, and invoke finalizers.	*/
 void GC_finalize(void)
 {
     struct disappearing_link * curr_dl, * prev_dl, * next_dl;
Only in bdwgc/include: cord.h
Only in bdwgc/include: ec.h
diff -ru bdwgc/include/gc.h updated/bdwgc/include/gc.h
--- bdwgc/include/gc.h	2008-09-11 13:04:06.000000000 +0400
+++ updated/bdwgc/include/gc.h	2008-10-11 13:04:58.774885500 +0400
@@ -129,7 +129,7 @@
 
 GC_API int GC_java_finalization;
 			/* Mark objects reachable from finalizable 	*/
-			/* objects in a separate postpass.  This makes	*/
+			/* objects in a separate post-pass.  This makes	*/
 			/* it a bit safer to use non-topologically-	*/
 			/* ordered finalization.  Default value is	*/
 			/* determined by JAVA_FINALIZATION macro.	*/
@@ -164,7 +164,7 @@
 GC_API int GC_set_dont_expand(int);
 
 GC_API int GC_use_entire_heap;
-		/* Causes the nonincremental collector to use the	*/
+		/* Causes the non-incremental collector to use the	*/
 		/* entire heap before collecting.  This was the only 	*/
 		/* option for GC versions < 5.0.  This sometimes	*/
 		/* results in more large block fragmentation, since	*/
@@ -265,7 +265,7 @@
 /*
  * general purpose allocation routines, with roughly malloc calling conv.
  * The atomic versions promise that no relevant pointers are contained
- * in the object.  The nonatomic versions guarantee that the new object
+ * in the object.  The non-atomic versions guarantee that the new object
  * is cleared.  GC_malloc_stubborn promises that no changes to the object
  * will occur after GC_end_stubborn_change has been called on the
  * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
@@ -301,9 +301,9 @@
  * The collector is implicitly informed of coming change when such
  * an object is first allocated.  The following routines inform the
  * collector that an object will no longer be changed, or that it will
- * once again be changed.  Only nonNIL pointer stores into the object
+ * once again be changed.  Only non-NULL pointer stores into the object
  * are considered to be changes.  The argument to GC_end_stubborn_change
- * must be exacly the value returned by GC_malloc_stubborn or passed to
+ * must be exactly the value returned by GC_malloc_stubborn or passed to
  * GC_change_stubborn.  (In the second case it may be an interior pointer
  * within 512 bytes of the beginning of the objects.)
  * There is a performance penalty for allowing more than
@@ -420,7 +420,7 @@
 /* ineffective.								*/
 GC_API void GC_disable(void);
 
-/* Reenable garbage collection.  GC_disable() and GC_enable() calls 	*/
+/* Re-enable garbage collection.  GC_disable() and GC_enable() calls 	*/
 /* nest.  Garbage collection is enabled if the number of calls to both	*/
 /* both functions is equal.						*/
 GC_API void GC_enable(void);
@@ -428,7 +428,7 @@
 /* Enable incremental/generational collection.	*/
 /* Not advisable unless dirty bits are 		*/
 /* available or most heap objects are		*/
-/* pointerfree(atomic) or immutable.		*/
+/* pointer-free (atomic) or immutable.		*/
 /* Don't use in leak finding mode.		*/
 /* Ignored if GC_dont_gc is true.		*/
 /* Only the generational piece of this is	*/
@@ -627,7 +627,7 @@
 #   define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
 # endif
 /* The following are included because they are often convenient, and	*/
-/* reduce the chance for a misspecifed size argument.  But calls may	*/
+/* reduce the chance for a misspecified size argument.  But calls may	*/
 /* expand to something syntactically incorrect if t is a complicated	*/
 /* type expression.  							*/
 # define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
@@ -677,7 +677,7 @@
 	/* a signal, the object may be left with no		*/
 	/* finalization, even if neither the old nor new	*/
 	/* finalizer were NULL.					*/
-	/* Obj should be the nonNULL starting address of an 	*/
+	/* Obj should be the non-NULL starting address of an 	*/
 	/* object allocated by GC_malloc or friends.		*/
 	/* Note that any garbage collectable object referenced	*/
 	/* by cd will be considered accessible until the	*/
@@ -781,7 +781,7 @@
 	/* cleared.						*/
 	/* This can be used to implement certain types of	*/
 	/* weak pointers.  Note however that this generally	*/
-	/* requires that thje allocation lock is held (see	*/
+	/* requires that the allocation lock is held (see	*/
 	/* GC_call_with_allock_lock() below) when the disguised	*/
 	/* pointer is accessed.  Otherwise a strong pointer	*/
 	/* could be recreated between the time the collector    */
@@ -979,14 +979,14 @@
 	/* We can't do this right without typeof, which ANSI	*/
 	/* decided was not sufficiently useful.  Without it	*/
 	/* we resort to the non-debug version.			*/
-	/* FIXME: This should eventially support C++0x decltype */
+	/* FIXME: This should eventually support C++0x decltype */
 #   define GC_PTR_ADD(x, n) ((x)+(n))
 #   define GC_PRE_INCR(x, n) ((x) += (n))
 #   define GC_POST_INCR(x) ((x)++)
 #   define GC_POST_DECR(x) ((x)--)
 #endif
 
-/* Safer assignment of a pointer to a nonstack location.	*/
+/* Safer assignment of a pointer to a non-stack location.	*/
 #ifdef GC_DEBUG
 #   define GC_PTR_STORE(p, q) \
 	(*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
diff -ru bdwgc/include/gc_allocator.h updated/bdwgc/include/gc_allocator.h
--- bdwgc/include/gc_allocator.h	2006-04-13 04:33:04.000000000 +0400
+++ updated/bdwgc/include/gc_allocator.h	2008-10-11 12:23:22.372578500 +0400
@@ -26,7 +26,7 @@
  * This implements standard-conforming allocators that interact with
  * the garbage collector.  Gc_alloctor<T> allocates garbage-collectable
  * objects of type T.  Traceable_allocator<T> allocates objects that
- * are not temselves garbage collected, but are scanned by the
+ * are not themselves garbage collected, but are scanned by the
  * collector for pointers to collectable objects.  Traceable_alloc
  * should be used for explicitly managed STL containers that may
  * point to collectable objects.
@@ -49,7 +49,7 @@
 #endif
 
 /* First some helpers to allow us to dispatch on whether or not a type
- * is known to be pointerfree.
+ * is known to be pointer-free.
  * These are private, except that the client may invoke the
  * GC_DECLARE_PTRFREE macro.
  */
@@ -79,8 +79,8 @@
 GC_DECLARE_PTRFREE(long double);
 /* The client may want to add others.	*/
 
-// In the following GC_Tp is GC_true_type iff we are allocating a
-// pointerfree object.
+// In the following GC_Tp is GC_true_type if we are allocating a
+// pointer-free object.
 template <class GC_Tp>
 inline void * GC_selective_alloc(size_t n, GC_Tp) {
     return GC_MALLOC(n);
diff -ru bdwgc/include/gc_config_macros.h updated/bdwgc/include/gc_config_macros.h
--- bdwgc/include/gc_config_macros.h	2008-09-28 15:21:58.000000000 +0400
+++ updated/bdwgc/include/gc_config_macros.h	2008-10-11 11:13:54.000000000 +0400
@@ -141,7 +141,7 @@
       /* as well?						    */
 #   endif
 # else /* ! _WIN32_WCE */
-/* Yet more kluges for WinCE */
+/* Yet more kludges for WinCE */
 #   include <stdlib.h>		/* size_t is defined here */
     typedef long ptrdiff_t;	/* ptrdiff_t is not defined */
 # endif
diff -ru bdwgc/include/gc_mark.h updated/bdwgc/include/gc_mark.h
--- bdwgc/include/gc_mark.h	2008-08-22 01:06:56.000000000 +0400
+++ updated/bdwgc/include/gc_mark.h	2008-10-11 13:04:49.571760500 +0400
@@ -32,7 +32,7 @@
 /* A client supplied mark procedure.  Returns new mark stack pointer.	*/
 /* Primary effect should be to push new entries on the mark stack.	*/
 /* Mark stack pointer values are passed and returned explicitly.	*/
-/* Global variables decribing mark stack are not necessarily valid.	*/
+/* Global variables describing mark stack are not necessarily valid.	*/
 /* (This usually saves a few cycles by keeping things in registers.)	*/
 /* Assumed to scan about GC_PROC_BYTES on average.  If it needs to do	*/
 /* much more work than that, it should do it in smaller pieces by	*/
@@ -74,7 +74,7 @@
 #define GC_DS_LENGTH 0	/* The entire word is a length in bytes that	*/
 			/* must be a multiple of 4.			*/
 #define GC_DS_BITMAP 1	/* 30 (62) bits are a bitmap describing pointer	*/
-			/* fields.  The msb is 1 iff the first word	*/
+			/* fields.  The msb is 1 if the first word	*/
 			/* is a pointer.				*/
 			/* (This unconventional ordering sometimes	*/
 			/* makes the marker slightly faster.)		*/
@@ -99,7 +99,7 @@
 			/* object contains a type descriptor in the	*/
 			/* first word.					*/
 			/* Note that in multithreaded environments	*/
-			/* per object descriptors maust be located in	*/
+			/* per object descriptors must be located in	*/
 			/* either the first two or last two words of	*/
 			/* the object, since only those are guaranteed	*/
 			/* to be cleared while the allocation lock is	*/
diff -ru bdwgc/include/gc_typed.h updated/bdwgc/include/gc_typed.h
--- bdwgc/include/gc_typed.h	2005-10-11 02:33:34.000000000 +0400
+++ updated/bdwgc/include/gc_typed.h	2008-10-11 11:51:32.000000000 +0400
@@ -87,7 +87,7 @@
   				         GC_descr d);
   	/* Allocate an array of nelements elements, each of the	*/
   	/* given size, and with the given descriptor.		*/
-  	/* The elemnt size must be a multiple of the byte	*/
+  	/* The element size must be a multiple of the byte	*/
   	/* alignment required for pointers.  E.g. on a 32-bit	*/
   	/* machine with 16-bit aligned pointers, size_in_bytes	*/
   	/* must be a multiple of 2.				*/
Only in bdwgc/include: include.am
diff -ru bdwgc/include/javaxfc.h updated/bdwgc/include/javaxfc.h
--- bdwgc/include/javaxfc.h	2008-07-26 04:51:34.000000000 +0400
+++ updated/bdwgc/include/javaxfc.h	2008-10-11 11:52:52.000000000 +0400
@@ -11,7 +11,7 @@
  * This is needed for strict compliance with the Java standard, 
  * which can make the runtime guarantee that all finalizers are run.
  * This is problematic for several reasons:
- * 1) It means that finalizers, and all methods calle by them,
+ * 1) It means that finalizers, and all methods called by them,
  *    must be prepared to deal with objects that have been finalized in
  *    spite of the fact that they are still referenced by statically
  *    allocated pointer variables.
diff -ru bdwgc/include/new_gc_alloc.h updated/bdwgc/include/new_gc_alloc.h
--- bdwgc/include/new_gc_alloc.h	2006-12-06 22:16:14.000000000 +0300
+++ updated/bdwgc/include/new_gc_alloc.h	2008-10-11 13:38:22.540510500 +0400
@@ -133,7 +133,7 @@
   // real one must be updated with a procedure call.
   static size_t GC_bytes_recently_allocd;
 
-  // Same for uncollectable mmory.  Not yet reflected in either
+  // Same for uncollectable memory.  Not yet reflected in either
   // GC_bytes_recently_allocd or GC_non_gc_bytes.
   static size_t GC_uncollectable_bytes_recently_allocd;
 
@@ -183,7 +183,7 @@
 
 // A fast, single-threaded, garbage-collected allocator
 // We assume the first word will be immediately overwritten.
-// In this version, deallocation is not a noop, and explicit
+// In this version, deallocation is not a no-op, and explicit
 // deallocation is likely to help performance.
 template <int dummy>
 class single_client_gc_alloc_template {
@@ -347,7 +347,7 @@
 typedef traceable_alloc_template < 0 > traceable_alloc;
 
 // We want to specialize simple_alloc so that it does the right thing
-// for all pointerfree types.  At the moment there is no portable way to
+// for all pointer-free types.  At the moment there is no portable way to
 // even approximate that.  The following approximation should work for
 // SGI compilers, and recent versions of g++.
 
Only in bdwgc/include/private: cord_pos.h
diff -ru bdwgc/include/private/dbg_mlc.h updated/bdwgc/include/private/dbg_mlc.h
--- bdwgc/include/private/dbg_mlc.h	2008-09-26 14:05:32.000000000 +0400
+++ updated/bdwgc/include/private/dbg_mlc.h	2008-10-11 11:16:34.000000000 +0400
@@ -55,7 +55,7 @@
 #	define MARKED_FOR_FINALIZATION ((ptr_t)(word)2)
 	    /* Object was marked because it is finalizable.	*/
 #	define MARKED_FROM_REGISTER ((ptr_t)(word)4)
-	    /* Object was marked from a rgister.  Hence the	*/
+	    /* Object was marked from a register.  Hence the	*/
 	    /* source of the reference doesn't have an address.	*/
 # endif /* KEEP_BACK_PTRS || PRINT_BLACK_LIST */
 
@@ -110,7 +110,7 @@
       word oh_sf;			/* start flag */
 #   endif /* SHORT_DBG_HDRS */
 } oh;
-/* The size of the above structure is assumed not to dealign things,	*/
+/* The size of the above structure is assumed not to de-align things,	*/
 /* and to be a multiple of the word length.				*/
 
 #ifdef SHORT_DBG_HDRS
diff -ru bdwgc/include/private/gc_locks.h updated/bdwgc/include/private/gc_locks.h
--- bdwgc/include/private/gc_locks.h	2008-02-16 09:07:02.000000000 +0300
+++ updated/bdwgc/include/private/gc_locks.h	2008-10-11 14:25:01.371396000 +0400
@@ -187,7 +187,7 @@
 #   define I_HOLD_LOCK() TRUE
 #   define I_DONT_HOLD_LOCK() TRUE
        		/* Used only in positive assertions or to test whether	*/
-       		/* we still need to acaquire the lock.	TRUE works in	*/
+       		/* we still need to acquire the lock.  TRUE works in	*/
        		/* either case.						*/
 # endif /* !THREADS */
 
diff -ru bdwgc/include/private/gc_pmark.h updated/bdwgc/include/private/gc_pmark.h
--- bdwgc/include/private/gc_pmark.h	2008-09-25 04:51:24.000000000 +0400
+++ updated/bdwgc/include/private/gc_pmark.h	2008-10-11 13:01:06.446760500 +0400
@@ -104,7 +104,7 @@
      * also less performant, way.
      */
     void GC_do_parallel_mark();
-		/* inititate parallel marking.	*/
+		/* initiate parallel marking.	*/
 
     extern GC_bool GC_help_wanted;	/* Protected by mark lock	*/
     extern unsigned GC_helper_count;	/* Number of running helpers.	*/
@@ -242,7 +242,7 @@
 #endif
 /* If the mark bit corresponding to current is not set, set it, and 	*/
 /* push the contents of the object on the mark stack.  Current points	*/
-/* to the bginning of the object.  We rely on the fact that the 	*/
+/* to the beginning of the object.  We rely on the fact that the 	*/
 /* preceding header calculation will succeed for a pointer past the 	*/
 /* first page of an object, only if it is in fact a valid pointer	*/
 /* to the object.  Thus we can omit the otherwise necessary tests	*/
@@ -431,7 +431,7 @@
  * real_ptr. That is the job of the caller, if appropriate.
  * Note that this is called with the mutator running, but
  * with us holding the allocation lock.  This is safe only if the
- * mutator needs tha allocation lock to reveal hidden pointers.
+ * mutator needs the allocation lock to reveal hidden pointers.
  * FIXME: Why do we need the GC_mark_state test below?
  */
 # define GC_MARK_FO(real_ptr, mark_proc) \
diff -ru bdwgc/include/private/gc_priv.h updated/bdwgc/include/private/gc_priv.h
--- bdwgc/include/private/gc_priv.h	2008-09-26 15:32:02.000000000 +0400
+++ updated/bdwgc/include/private/gc_priv.h	2008-10-11 12:40:16.414552000 +0400
@@ -123,7 +123,7 @@
 /*********************************/
 
 /* #define STUBBORN_ALLOC */
-		    /* Enable stubborm allocation, and thus a limited	*/
+		    /* Enable stubborn allocation, and thus a limited	*/
 		    /* form of incremental collection w/o dirty bits.	*/
 
 /* #define ALL_INTERIOR_POINTERS */
@@ -142,7 +142,7 @@
 		    /* 2. This option makes it hard for the collector	*/
 		    /*    to allocate space that is not ``pointed to''  */
 		    /*    by integers, etc.  Under SunOS 4.X with a 	*/
-		    /*    statically linked libc, we empiricaly		*/
+		    /*    statically linked libc, we empirically	*/
 		    /*    observed that it would be difficult to 	*/
 		    /*	  allocate individual objects larger than 100K.	*/
 		    /* 	  Even if only smaller objects are allocated,	*/
@@ -1021,7 +1021,7 @@
 # endif
 # ifdef MSWINCE
     word _heap_lengths[MAX_HEAP_SECTS];
-    		/* Commited lengths of memory regions obtained from kernel. */
+    		/* Committed lengths of memory regions obtained from kernel. */
 # endif
   struct roots _static_roots[MAX_ROOT_SETS];
 # if !defined(MSWIN32) && !defined(MSWINCE)
@@ -1342,7 +1342,7 @@
 void GC_initiate_gc(void);
 				/* initiate collection.			*/
   				/* If the mark state is invalid, this	*/
-  				/* becomes full colleection.  Otherwise */
+  				/* becomes full collection.  Otherwise	*/
   				/* it's partial.			*/
 void GC_push_all(ptr_t bottom, ptr_t top);
 				/* Push everything in a range 		*/
@@ -1397,9 +1397,9 @@
   			/* Push system or application specific roots	*/
   			/* onto the mark stack.  In some environments	*/
   			/* (e.g. threads environments) this is		*/
-  			/* predfined to be non-zero.  A client supplied */
-  			/* replacement should also call the original	*/
-  			/* function.					*/
+  			/* predefined to be non-zero.  A client		*/
+  			/* supplied replacement should also call the	*/
+  			/* original function.				*/
 extern void GC_push_gc_structures(void);
 			/* Push GC internal roots.  These are normally	*/
 			/* included in the static data segment, and 	*/
@@ -1875,7 +1875,7 @@
   			/* Could the page contain valid heap pointers?	*/
 void GC_remove_protection(struct hblk *h, word nblocks,
 			  GC_bool pointerfree);
-  			/* h is about to be writteni or allocated.  Ensure  */
+  			/* h is about to be written or allocated.  Ensure   */
 			/* that it's not write protected by the virtual	    */
 			/* dirty bit implementation.			    */
 			
diff -ru bdwgc/include/private/gcconfig.h updated/bdwgc/include/private/gcconfig.h
--- bdwgc/include/private/gcconfig.h	2008-09-27 00:08:28.000000000 +0400
+++ updated/bdwgc/include/private/gcconfig.h	2008-10-11 12:58:22.478010500 +0400
@@ -385,7 +385,7 @@
 #   if defined(_MSC_VER) && defined(_M_IA64)
 #     define IA64
 #     define MSWIN32	/* Really win64, but we don't treat 64-bit 	*/
-			/* variants as a differnt platform.		*/
+			/* variants as a different platform.		*/
 #   endif
 # endif
 # if defined(__DJGPP__)
@@ -516,7 +516,7 @@
  * For each architecture and OS, the following need to be defined:
  *
  * CPP_WORDSZ is a simple integer constant representing the word size.
- * in bits.  We assume byte addressibility, where a byte has 8 bits.
+ * in bits.  We assume byte addressability, where a byte has 8 bits.
  * We also assume CPP_WORDSZ is either 32 or 64.
  * (We care about the length of pointers, not hardware
  * bus widths.  Thus a 64 bit processor with a C compiler that uses
@@ -568,7 +568,7 @@
  * HEURISTIC2:  Take an address inside GC_init's frame, increment it repeatedly
  *		in small steps (decrement if STACK_GROWS_UP), and read the value
  *		at each location.  Remember the value when the first
- *		Segmentation violation or Bus error is signalled.  Round that
+ *		Segmentation violation or Bus error is signaled.  Round that
  *		to the nearest plausible page boundary, and use that instead
  *		of STACKBOTTOM.
  *
@@ -1141,7 +1141,7 @@
 #	  define PREFETCH(x) \
 	    __asm__ __volatile__ ("	prefetchnta	%0": : "m"(*(char *)(x)))
 	    /* Empirically prefetcht0 is much more effective at reducing	*/
-	    /* cache miss stalls for the targetted load instructions.  But it	*/
+	    /* cache miss stalls for the targeted load instructions.  But it	*/
 	    /* seems to interfere enough with other cache traffic that the net	*/
 	    /* result is worse than prefetchnta.				*/
 #         if 0 
@@ -1256,7 +1256,7 @@
       extern char _end;
       extern char *_STACKTOP;
       /* Depending on calling conventions Watcom C either precedes
-         or does not precedes with undescore names of C-variables.
+         or does not precedes with underscore names of C-variables.
          Make sure startup code variables always have the same names.  */
       #pragma aux __nullarea "*";
       #pragma aux _end "*";
@@ -1528,7 +1528,7 @@
 	/* initialization.						   */
 #	define STACKBOTTOM ((ptr_t)(((word)(environ) | (getpagesize()-1))+1))
 /* #   	define HEURISTIC2 */
-	/* Normally HEURISTIC2 is too conervative, since		*/
+	/* Normally HEURISTIC2 is too conservative, since		*/
 	/* the text segment immediately follows the stack.		*/
 	/* Hence we give an upper pound.				*/
 	/* This is currently unused, since we disabled HEURISTIC2	*/
@@ -2307,7 +2307,7 @@
 	/* How to get heap memory from the OS:				*/
 	/* Note that sbrk()-like allocation is preferred, since it 	*/
 	/* usually makes it possible to merge consecutively allocated	*/
-	/* chunks.  It also avoids unintented recursion with		*/
+	/* chunks.  It also avoids unintended recursion with		*/
 	/* -DREDIRECT_MALLOC.						*/
 	/* GET_MEM() returns a HLKSIZE aligned chunk.			*/
 	/* 0 is taken to mean failure. 					*/
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/aligned_atomic_load_store.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/aligned_atomic_load_store.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/aligned_atomic_load_store.h	2008-07-19 02:42:22.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/aligned_atomic_load_store.h	2008-10-11 14:04:29.683896000 +0400
@@ -21,7 +21,7 @@
  */ 
 
 /*
- * Definitions for architecturs on which loads and stores of AO_t are
+ * Definitions for architectures on which loads and stores of AO_t are
  * atomic fo all legal alignments.
  */
 
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h	2006-07-12 03:29:30.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/all_aligned_atomic_load_store.h	2008-10-11 14:04:57.105771000 +0400
@@ -22,7 +22,8 @@
 
 /*
  * Describes architectures on which AO_t, unsigned char, unsigned short,
- * and unsigned int loads and strores are atomic for all normally legal alignments.
+ * and unsigned int loads and stores are atomic for all normally legal
+ * alignments.
  */
 #include "aligned_atomic_load_store.h"
 #include "char_atomic_load_store.h"
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/all_atomic_load_store.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/all_atomic_load_store.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/all_atomic_load_store.h	2006-07-12 03:29:30.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/all_atomic_load_store.h	2008-10-11 14:05:10.121396000 +0400
@@ -22,7 +22,7 @@
 
 /*
  * Describes architectures on which AO_t, unsigned char, unsigned short,
- * and unsigned int loads and strores are atomic for all normally legal
+ * and unsigned int loads and stores are atomic for all normally legal
  * alignments.
  */
 #include "atomic_load_store.h"
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/atomic_load_store.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/atomic_load_store.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/atomic_load_store.h	2008-07-19 02:42:22.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/atomic_load_store.h	2008-10-11 14:05:31.168271000 +0400
@@ -21,8 +21,8 @@
  */ 
 
 /*
- * Definitions for architecturs on which loads and stores of AO_t are
- * atomic fo all legal alignments.
+ * Definitions for architectures on which loads and stores of AO_t are
+ * atomic for all legal alignments.
  */
 
 AO_INLINE AO_t
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/char_atomic_load_store.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/char_atomic_load_store.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/char_atomic_load_store.h	2008-07-19 02:42:22.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/char_atomic_load_store.h	2008-10-11 14:05:57.277646000 +0400
@@ -21,7 +21,7 @@
  */ 
 
 /*
- * Definitions for architecturs on which loads and stores of unsigned char are
+ * Definitions for architectures on which loads and stores of unsigned char are
  * atomic for all legal alignments.
  */
 
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/arm.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/arm.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/arm.h	2008-07-19 02:42:22.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/arm.h	2008-10-11 13:59:09.152646000 +0400
@@ -236,7 +236,7 @@
   /* to be stored.  Both registers must be different from addr.	*/
   /* Make the address operand an early clobber output so it     */
   /* doesn't overlap with the other operands.  The early clobber*/
-  /* on oldval is neccessary to prevent the compiler allocating */
+  /* on oldval is necessary to prevent the compiler allocating  */
   /* them to the same register if they are both unused.  	*/
   __asm__ __volatile__("swp %0, %2, [%3]"
                         : "=&r"(oldval), "=&r"(addr)
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/x86.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/x86.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/x86.h	2008-02-12 03:18:54.000000000 +0300
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/x86.h	2008-10-11 14:01:33.043271000 +0400
@@ -143,7 +143,7 @@
   char result;
   #if __PIC__
   /* If PIC is turned on, we can't use %ebx as it is reserved for the
-     GOT poiner.  We can save and restore %ebx because GCC won't be
+     GOT pointer.  We can save and restore %ebx because GCC won't be
      using it for anything else (such as any of the m operands) */
   __asm__ __volatile__("pushl %%ebx;"   /* save ebx used for PIC GOT ptr */
 		       "movl %6,%%ebx;" /* move new_val2 to %ebx */
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/x86_64.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/x86_64.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/x86_64.h	2008-01-07 08:11:52.000000000 +0300
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/gcc/x86_64.h	2008-10-11 14:02:00.277646000 +0400
@@ -174,7 +174,7 @@
 #else
 /* this one provides spinlock based emulation of CAS implemented in	*/
 /* atomic_ops.c.  We probably do not want to do this here, since it is  */
-/* not attomic with respect to other kinds of updates of *addr.  On the */
+/* not atomic with respect to other kinds of updates of *addr.  On the  */
 /* other hand, this may be a useful facility on occasion.  		*/
 #ifdef AO_WEAK_DOUBLE_CAS_EMULATION
 int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/hpc/hppa.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/hpc/hppa.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/hpc/hppa.h	2006-07-12 03:29:32.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/hpc/hppa.h	2008-10-11 14:02:20.433896000 +0400
@@ -19,7 +19,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE. 
  * 
- * Derived from the corresponsing header file for gcc.
+ * Derived from the corresponding header file for gcc.
  * 
  */ 
 
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/hpc/ia64.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/hpc/ia64.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/hpc/ia64.h	2006-07-12 03:29:32.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/hpc/ia64.h	2008-10-11 14:02:41.262021000 +0400
@@ -22,7 +22,7 @@
 
 /*
  * This file specifies Itanimum primitives for use with the HP compiler
- * unde HP/UX.  We use intrinsics instead of the inline assembly code in the
+ * under HP/UX.  We use intrinsics instead of the inline assembly code in the
  * gcc file.
  */
 
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h	2008-07-19 02:42:22.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h	2008-10-11 14:06:38.980771000 +0400
@@ -21,8 +21,8 @@
  */ 
 
 /*
- * Definitions for architecturs on which loads and stores of unsigned int are
- * atomic fo all legal alignments.
+ * Definitions for architectures on which loads and stores of unsigned int are
+ * atomic for all legal alignments.
  */
 
 AO_INLINE unsigned int
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/int_atomic_load_store.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/int_atomic_load_store.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/int_atomic_load_store.h	2008-07-19 02:42:22.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/int_atomic_load_store.h	2008-10-11 14:06:52.480771000 +0400
@@ -21,7 +21,7 @@
  */ 
 
 /*
- * Definitions for architecturs on which loads and stores of unsigned int are
+ * Definitions for architectures on which loads and stores of unsigned int are
  * atomic for all legal alignments.
  */
 
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h	2008-07-19 02:42:22.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h	2008-10-11 14:07:41.512021000 +0400
@@ -21,8 +21,8 @@
  */ 
 
 /*
- * Definitions for architecturs on which loads and stores of unsigned short are
- * atomic fo all legal alignments.
+ * Definitions for architectures on which loads and stores of unsigned short
+ * are atomic for all legal alignments.
  */
 
 AO_INLINE unsigned short
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/short_atomic_load_store.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/short_atomic_load_store.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/short_atomic_load_store.h	2008-07-19 02:42:22.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops/sysdeps/short_atomic_load_store.h	2008-10-11 14:07:58.762021000 +0400
@@ -21,8 +21,8 @@
  */ 
 
 /*
- * Definitions for architecturs on which loads and stores of unsigned short are
- * atomic for all legal alignments.
+ * Definitions for architectures on which loads and stores of unsigned short
+ * are atomic for all legal alignments.
  */
 
 AO_INLINE unsigned short
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops.c updated/bdwgc/libatomic_ops-1.2/src/atomic_ops.c
--- bdwgc/libatomic_ops-1.2/src/atomic_ops.c	2006-07-12 03:29:30.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops.c	2008-10-11 13:51:55.496396000 +0400
@@ -59,12 +59,12 @@
  * Out of line compare-and-swap emulation based on test and set.
  * 
  * We use a small table of locks for different compare_and_swap locations.
- * Before we update perform a compare-and-swap, we grap the corresponding
+ * Before we update perform a compare-and-swap, we grab the corresponding
  * lock.  Different locations may hash to the same lock, but since we
  * never acquire more than one lock at a time, this can't deadlock.
  * We explicitly disable signals while we perform this operation.
  *
- * FIXME: We should probably also suppport emulation based on Lamport
+ * FIXME: We should probably also support emulation based on Lamport
  * locks, since we may not have test_and_set either.
  */
 #define AO_HASH_SIZE 16
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops.h	2008-07-26 04:51:36.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops.h	2008-10-11 13:51:09.574521000 +0400
@@ -58,7 +58,7 @@
 /* 	  later writes.						*/
 /* _full: Ordered with respect to both earlier and later memops.*/
 /* _release_write: Ordered with respect to earlier writes.	*/
-/* _acquire_read: Ordered with repsect to later reads.		*/
+/* _acquire_read: Ordered with respect to later reads.		*/
 /*								*/
 /* Currently we try to define the following atomic memory 	*/
 /* operations, in combination with the above barriers:		*/
@@ -122,7 +122,7 @@
 /*								*/
 /* The architecture dependent section:				*/
 /* This defines atomic operations that have direct hardware	*/
-/* support on a particular platform, mostly by uncluding the	*/
+/* support on a particular platform, mostly by including the	*/
 /* appropriate compiler- and hardware-dependent file.  		*/
 /*								*/
 /* The synthesis section:					*/
@@ -132,7 +132,7 @@
 /* We make no attempt to synthesize operations in ways that	*/
 /* effectively introduce locks, except for the debugging/demo	*/
 /* pthread-based implementation at the beginning.  A more 	*/
-/* relistic implementation that falls back to locks could be	*/
+/* realistic implementation that falls back to locks could be	*/
 /* added as a higher layer.  But that would sacrifice		*/
 /* usability from signal handlers.				*/
 /* The synthesis section is implemented almost entirely in	*/
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops_stack.c updated/bdwgc/libatomic_ops-1.2/src/atomic_ops_stack.c
--- bdwgc/libatomic_ops-1.2/src/atomic_ops_stack.c	2006-07-12 03:29:30.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops_stack.c	2008-10-11 14:33:53.090146000 +0400
@@ -69,7 +69,7 @@
 #ifdef AO_USE_ALMOST_LOCK_FREE
 
 /* LIFO linked lists based on compare-and-swap.  We need to avoid	*/
-/* the case of a node deleton and reinsertion while I'm deleting	*/
+/* the case of a node deletion and reinsertion while I'm deleting	*/
 /* it, since that may cause my CAS to succeed eventhough the next	*/
 /* pointer is now wrong.  Our solution is not fully lock-free, but it	*/
 /* is good enough for signal handlers, provided we have a suitably low	*/
@@ -143,7 +143,7 @@
  * I concluded experimentally that checking a value first before
  * performing a compare-and-swap is usually beneficial on X86, but
  * slows things down appreciably with contention on Itanium.
- * ince the Itanium behavior makes more sense to me (more cache line
+ * Since the Itanium behavior makes more sense to me (more cache line
  * movement unless we're mostly reading, but back-off should guard
  * against that), we take Itanium as the default.  Measurements on
  * other multiprocessor architectures would be useful.  (On a uniprocessor,
@@ -234,8 +234,8 @@
 		    ( &(list -> ptr), next, (AO_t) element));
     /* This uses a narrow CAS here, an old optimization suggested	*/
     /* by Treiber.  Pop is still safe, since we run into the ABA 	*/
-    /* problem only if there were both interveining "pop"s and "push"es.*/
-    /* Inthat case we still see a change inthe version number.		*/
+    /* problem only if there were both intervening "pop"s and "push"es.	*/
+    /* In that case we still see a change in the version number.	*/
 }
 
 AO_t *AO_stack_pop_acquire(AO_stack_t *list)
diff -ru bdwgc/libatomic_ops-1.2/src/atomic_ops_stack.h updated/bdwgc/libatomic_ops-1.2/src/atomic_ops_stack.h
--- bdwgc/libatomic_ops-1.2/src/atomic_ops_stack.h	2006-07-12 03:29:30.000000000 +0400
+++ updated/bdwgc/libatomic_ops-1.2/src/atomic_ops_stack.h	2008-10-11 13:56:56.402646000 +0400
@@ -94,7 +94,7 @@
   volatile AO_t AO_stack_bl[AO_BL_SIZE];
 } AO_stack_aux;
 
-/* The stack implementation knows only about the lecation of 	*/
+/* The stack implementation knows only about the location of 	*/
 /* link fields in nodes, and nothing about the rest of the 	*/
 /* stack elements.  Link fields hold an AO_t, which is not	*/
 /* necessarily a real pointer.  This converts the AO_t to a 	*/
diff -ru bdwgc/mach_dep.c updated/bdwgc/mach_dep.c
--- bdwgc/mach_dep.c	2008-07-22 04:29:00.000000000 +0400
+++ updated/bdwgc/mach_dep.c	2008-10-11 13:07:07.165510500 +0400
@@ -69,7 +69,7 @@
 # endif
 
 /* Routine to mark from registers that are preserved by the C compiler. */
-/* This must be ported to every new architecture.  It is noe optional,	*/
+/* This must be ported to every new architecture.  It is not optional,	*/
 /* and should not be used on platforms that are either UNIX-like, or	*/
 /* require thread support.						*/
 
diff -ru bdwgc/mark_rts.c updated/bdwgc/mark_rts.c
--- bdwgc/mark_rts.c	2008-09-26 14:34:10.000000000 +0400
+++ updated/bdwgc/mark_rts.c	2008-10-11 13:16:08.384260500 +0400
@@ -154,11 +154,11 @@
 
 
 /* Add [b,e) to the root set.  Adding the same interval a second time	*/
-/* is a moderately fast noop, and hence benign.  We do not handle	*/
+/* is a moderately fast no-op, and hence benign.  We do not handle	*/
 /* different but overlapping intervals efficiently.  (We do handle	*/
 /* them correctly.)							*/
 /* Tmp specifies that the interval may be deleted before 		*/
-/* reregistering dynamic libraries.					*/ 
+/* re-registering dynamic libraries.					*/ 
 void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
 {
     struct roots * old;
@@ -563,7 +563,7 @@
      * Next push static data.  This must happen early on, since it's
      * not robust against mark stack overflow.
      */
-     /* Reregister dynamic libraries, in case one got added.		*/
+     /* Re-register dynamic libraries, in case one got added.		*/
      /* There is some argument for doing this as late as possible,	*/
      /* especially on win32, where it can change asynchronously.	*/
      /* In those cases, we do it here.  But on other platforms, it's	*/
diff -ru bdwgc/misc.c updated/bdwgc/misc.c
--- bdwgc/misc.c	2008-09-27 00:03:52.000000000 +0400
+++ updated/bdwgc/misc.c	2008-10-11 13:23:28.337385500 +0400
@@ -134,7 +134,7 @@
 /* but not too much bigger						*/
 /* and so that size_map contains relatively few distinct entries 	*/
 /* This was originally stolen from Russ Atkinson's Cedar		*/
-/* quantization alogrithm (but we precompute it).			*/ 
+/* quantization algorithm (but we precompute it).			*/ 
 STATIC void GC_init_size_map(void)
 {
     int i;
@@ -365,7 +365,7 @@
 
 
 /* Return the size of an object, given a pointer to its base.		*/
-/* (For small obects this also happens to work from interior pointers,	*/
+/* (For small objects this also happens to work from interior pointers,	*/
 /* but that shouldn't be relied upon.)					*/
 GC_API size_t GC_size(void * p)
 {
diff -ru bdwgc/os_dep.c updated/bdwgc/os_dep.c
--- bdwgc/os_dep.c	2008-09-27 00:02:36.000000000 +0400
+++ updated/bdwgc/os_dep.c	2008-10-11 13:43:40.087385500 +0400
@@ -166,7 +166,7 @@
 
 #ifdef THREADS
 /* Determine the length of a file by incrementally reading it into a 	*/
-/* This would be sily to use on a file supporting lseek, but Linux	*/
+/* This would be silly to use on a file supporting lseek, but Linux	*/
 /* /proc files usually do not.						*/
 STATIC size_t GC_get_file_len(int f)
 {
@@ -438,7 +438,7 @@
     /* Some Linux distributions arrange to define __data_start.  Some	*/
     /* define data_start as a weak symbol.  The latter is technically	*/
     /* broken, since the user program may define data_start, in which	*/
-    /* case we lose.  Nonetheless, we try both, prefering __data_start.	*/
+    /* case we lose.  Nonetheless, we try both, preferring __data_start.*/
     /* We assume gcc-compatible pragmas.	*/
 #   pragma weak __data_start
     extern int __data_start[];
@@ -800,7 +800,7 @@
 #       endif
     }
 
-    /* Return the first nonaddressible location > p (up) or 	*/
+    /* Return the first non-addressable location > p (up) or 	*/
     /* the smallest location q s.t. [q,p) is addressable (!up).	*/
     /* We assume that p (up) or p-1 (!up) is addressable.	*/
     /* Requires allocation lock.				*/
@@ -1247,8 +1247,8 @@
   /* Unfortunately, we have to handle win32s very differently from NT, 	*/
   /* Since VirtualQuery has very different semantics.  In particular,	*/
   /* under win32s a VirtualQuery call on an unmapped page returns an	*/
-  /* invalid result.  Under NT, GC_register_data_segments is a noop and	*/
-  /* all real work is done by GC_register_dynamic_libraries.  Under	*/
+  /* invalid result.  Under NT, GC_register_data_segments is a no-op	*/
+  /* and all real work is done by GC_register_dynamic_libraries.  Under	*/
   /* win32s, we cannot find the data segments associated with dll's.	*/
   /* We register the main data segment here.				*/
   GC_bool GC_no_win32_dlls = FALSE;	 
@@ -2125,7 +2125,7 @@
 }
 
 /* Push the contents of an old object. We treat this as stack	*/
-/* data only becasue that makes it robust against mark stack	*/
+/* data only because that makes it robust against mark stack	*/
 /* overflow.							*/
 PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
 {
@@ -2369,7 +2369,7 @@
 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer?	*/
 /* If the actual page size is different, this returns TRUE if any	*/
 /* of the pages overlapping h are dirty.  This routine may err on the	*/
-/* side of labelling pages as dirty (and this implementation does).	*/
+/* side of labeling pages as dirty (and this implementation does).	*/
 /*ARGSUSED*/
 GC_bool GC_page_was_dirty(struct hblk *h)
 {
@@ -2427,7 +2427,7 @@
 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer?	*/
 /* If the actual page size is different, this returns TRUE if any	*/
 /* of the pages overlapping h are dirty.  This routine may err on the	*/
-/* side of labelling pages as dirty (and this implementation does).	*/
+/* side of labeling pages as dirty (and this implementation does).	*/
 GC_bool GC_page_was_dirty(struct hblk *h)
 {
     register word index;
@@ -3134,7 +3134,7 @@
  */
  
 /*
- * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
+ * This implementation assumes a Solaris 2.X like /proc pseudo-file-system
  * from which we can read page modified bits.  This facility is far from
  * optimal (e.g. we would like to get the info for only some of the
  * address space), but it avoids intercepting system calls.
@@ -3571,7 +3571,7 @@
 }
 
 /* All this SIGBUS code shouldn't be necessary. All protection faults should
-   be going throught the mach exception handler. However, it seems a SIGBUS is
+   be going through the mach exception handler. However, it seems a SIGBUS is
    occasionally sent for some unknown reason. Even more odd, it seems to be
    meaningless and safe to ignore. */
 #ifdef BROKEN_EXCEPTION_HANDLING
@@ -3606,7 +3606,7 @@
   exception_mask_t mask;
 
   if (GC_print_stats == VERBOSE)
-    GC_log_printf("Inititalizing mach/darwin mprotect virtual dirty bit "
+    GC_log_printf("Initializing mach/darwin mprotect virtual dirty bit "
 		  "implementation\n");
 # ifdef BROKEN_EXCEPTION_HANDLING
     WARN("Enabling workarounds for various darwin "
@@ -3813,7 +3813,7 @@
       /* Ugh... just like the SIGBUS problem above, it seems we get a bogus
 	 KERN_PROTECTION_FAILURE every once and a while. We wait till we get
 	 a bunch in a row before doing anything about it. If a "real" fault
-	 ever occurres it'll just keep faulting over and over and we'll hit
+	 ever occurs it'll just keep faulting over and over and we'll hit
 	 the limit pretty quickly. */
 #     ifdef BROKEN_EXCEPTION_HANDLING
         static char *last_fault;
diff -ru bdwgc/pthread_stop_world.c updated/bdwgc/pthread_stop_world.c
--- bdwgc/pthread_stop_world.c	2008-07-26 04:51:34.000000000 +0400
+++ updated/bdwgc/pthread_stop_world.c	2008-10-11 11:23:08.000000000 +0400
@@ -535,7 +535,7 @@
     	ABORT("Cannot set SIG_THR_RESTART handler");
     }
 
-    /* Inititialize suspend_handler_mask. It excludes SIG_THR_RESTART. */
+    /* Initiialize suspend_handler_mask. It excludes SIG_THR_RESTART. */
       if (sigfillset(&suspend_handler_mask) != 0) ABORT("sigfillset() failed");
       GC_remove_allowed_signals(&suspend_handler_mask);
       if (sigdelset(&suspend_handler_mask, SIG_THR_RESTART) != 0)
diff -ru bdwgc/pthread_support.c updated/bdwgc/pthread_support.c
--- bdwgc/pthread_support.c	2008-08-22 01:06:56.000000000 +0400
+++ updated/bdwgc/pthread_support.c	2008-10-11 13:26:05.665510500 +0400
@@ -21,7 +21,7 @@
  * guaranteed by the pthread standard, though it now does
  * very little of that.  It now also supports NPTL, and many
  * other Posix thread implementations.  We are trying to merge
- * all flavors of pthread dupport code into this file.
+ * all flavors of pthread support code into this file.
  */
  /* DG/UX ix86 support <takis at xfree86.org> */
 /*
diff -ru bdwgc/thread_local_alloc.c updated/bdwgc/thread_local_alloc.c
--- bdwgc/thread_local_alloc.c	2008-09-26 13:13:32.000000000 +0400
+++ updated/bdwgc/thread_local_alloc.c	2008-10-11 14:49:26.077419500 +0400
@@ -225,8 +225,8 @@
 /* fundamental issue is that we may end up marking a free list, which	*/
 /* has freelist links instead of "vtable" pointers.  That is usually	*/
 /* OK, since the next object on the free list will be cleared, and	*/
-/* will thus be interpreted as containg a zero descriptor.  That's fine	*/
-/* if the object has not yet been initialized.  But there are		*/
+/* will thus be interpreted as containing a zero descriptor.  That's	*/
+/* fine if the object has not yet been initialized.  But there are	*/
 /* interesting potential races.						*/
 /* In the case of incremental collection, this seems hopeless, since	*/
 /* the marker may run asynchronously, and may pick up the pointer to  	*/
diff -ru bdwgc/typd_mlc.c updated/bdwgc/typd_mlc.c
--- bdwgc/typd_mlc.c	2008-09-26 13:09:54.000000000 +0400
+++ updated/bdwgc/typd_mlc.c	2008-10-11 13:16:38.040510500 +0400
@@ -28,7 +28,7 @@
  * must trace the complex_descriptor.
  *
  * Note that descriptors inside objects may appear cleared, if we encounter a
- * false refrence to an object on a free list.  In the GC_descr case, this
+ * false reference to an object on a free list.  In the GC_descr case, this
  * is OK, since a 0 descriptor corresponds to examining no fields.
  * In the complex_descriptor case, we explicitly check for that case.
  *
diff -ru bdwgc/win32_threads.c updated/bdwgc/win32_threads.c
--- bdwgc/win32_threads.c	2008-10-03 22:36:26.000000000 +0400
+++ updated/bdwgc/win32_threads.c	2008-10-11 14:44:24.874294500 +0400
@@ -316,7 +316,7 @@
 {
   GC_vthread me;
 
-  /* The following should be a noop according to the win32	*/
+  /* The following should be a no-op according to the win32	*/
   /* documentation.  There is empirical evidence that it	*/
   /* isn't.		- HB					*/
 # if defined(MPROTECT_VDB)
@@ -331,7 +331,7 @@
   if (GC_win32_dll_threads) {
     int i;
     /* It appears to be unsafe to acquire a lock here, since this	*/
-    /* code is apparently not preeemptible on some systems.		*/
+    /* code is apparently not preemptible on some systems.		*/
     /* (This is based on complaints, not on Microsoft's official	*/
     /* documentation, which says this should perform "only simple	*/
     /* initialization tasks".)						*/
@@ -1508,7 +1508,7 @@
 #else /* !GC_PTHREADS */
 
 /*
- * We avoid acquiring locks here, since this doesn't seem to be preemptable.
+ * We avoid acquiring locks here, since this doesn't seem to be preemptible.
  * This may run with an uninitialized collector, in which case we don't do much.
  * This implies that no threads other than the main one should be created
  * with an uninitialized collector.  (The alternative of initializing


More information about the Gc mailing list