Welcome to mirror list, hosted at ThFree Co, Russian Federation.

cygwin.com/git/newlib-cygwin.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'newlib/libc/stdlib')
-rw-r--r--newlib/libc/stdlib/Makefile.am98
-rw-r--r--newlib/libc/stdlib/Makefile.in94
-rw-r--r--newlib/libc/stdlib/__atexit.c20
-rw-r--r--newlib/libc/stdlib/__call_atexit.c10
-rw-r--r--newlib/libc/stdlib/ecvtbuf.c28
-rw-r--r--newlib/libc/stdlib/mblen.c7
-rw-r--r--newlib/libc/stdlib/mbrlen.c6
-rw-r--r--newlib/libc/stdlib/mbrtowc.c11
-rw-r--r--newlib/libc/stdlib/mbtowc.c7
-rw-r--r--newlib/libc/stdlib/nano-mallocr.c581
-rw-r--r--newlib/libc/stdlib/rand.c16
-rw-r--r--newlib/libc/stdlib/strtod.c155
-rw-r--r--newlib/libc/stdlib/wcrtomb.c11
-rw-r--r--newlib/libc/stdlib/wctob.c6
-rw-r--r--newlib/libc/stdlib/wctomb.c8
15 files changed, 899 insertions, 159 deletions
diff --git a/newlib/libc/stdlib/Makefile.am b/newlib/libc/stdlib/Makefile.am
index 38120e6e2..22fedcc84 100644
--- a/newlib/libc/stdlib/Makefile.am
+++ b/newlib/libc/stdlib/Makefile.am
@@ -66,12 +66,40 @@ GENERAL_SOURCES += \
wcstold.c
endif # HAVE_LONG_DOUBLE
+if NEWLIB_NANO_MALLOC
+MALIGNR=nano-malignr
+MALLOPTR=nano-malloptr
+PVALLOCR=nano-pvallocr
+VALLOCR=nano-vallocr
+FREER=nano-freer
+REALLOCR=nano-reallocr
+CALLOCR=nano-callocr
+CFREER=nano-cfreer
+MALLINFOR=nano-mallinfor
+MALLSTATSR=nano-mallstatsr
+MSIZER=nano-msizer
+MALLOCR=nano-mallocr
+else
+MALIGNR=malignr
+MALLOPTR=malloptr
+PVALLOCR=pvallocr
+VALLOCR=vallocr
+FREER=freer
+REALLOCR=reallocr
+CALLOCR=callocr
+CFREER=cfreer
+MALLINFOR=mallinfor
+MALLSTATSR=mallstatsr
+MSIZER=msizer
+MALLOCR=mallocr
+endif
+
EXTENDED_SOURCES = \
cxa_atexit.c \
cxa_finalize.c \
drand48.c \
- ecvtbuf.c \
- efgcvt.c \
+ ecvtbuf.c \
+ efgcvt.c \
erand48.c \
jrand48.c \
lcong48.c \
@@ -115,10 +143,10 @@ ELIX_2_SOURCES = \
wctob.c
ELIX_2_OBJS = \
- $(lpfx)malignr.$(oext) \
- $(lpfx)malloptr.$(oext) \
- $(lpfx)pvallocr.$(oext) \
- $(lpfx)vallocr.$(oext)
+ $(lpfx)$(MALIGNR).$(oext) \
+ $(lpfx)$(MALLOPTR).$(oext) \
+ $(lpfx)$(PVALLOCR).$(oext) \
+ $(lpfx)$(VALLOCR).$(oext)
ELIX_3_SOURCES = \
putenv.c \
@@ -148,10 +176,10 @@ endif
endif
# Because of how libtool moves objects around, mallocr must be built last.
-LIBADD_OBJS = $(lpfx)freer.$(oext) $(lpfx)reallocr.$(oext) \
- $(lpfx)callocr.$(oext) $(lpfx)cfreer.$(oext) \
- $(lpfx)mallinfor.$(oext) $(lpfx)mallstatsr.$(oext) \
- $(lpfx)msizer.$(oext) $(lpfx)mallocr.$(oext)
+LIBADD_OBJS = $(lpfx)$(FREER).$(oext) $(lpfx)$(REALLOCR).$(oext) \
+ $(lpfx)$(CALLOCR).$(oext) $(lpfx)$(CFREER).$(oext) \
+ $(lpfx)$(MALLINFOR).$(oext) $(lpfx)$(MALLSTATSR).$(oext) \
+ $(lpfx)$(MSIZER).$(oext) $(lpfx)$(MALLOCR).$(oext)
libstdlib_la_LDFLAGS = -Xcompiler -nostdlib
@@ -176,41 +204,41 @@ include $(srcdir)/../../Makefile.shared
MALLOC_COMPILE = $(LIB_COMPILE) -DINTERNAL_NEWLIB
-$(lpfx)mallocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)freer.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_FREE -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(FREER).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_FREE -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)reallocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_REALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(REALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_REALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)callocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_CALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(CALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_CALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)cfreer.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_CFREE -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(CFREER).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_CFREE -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)malignr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MEMALIGN -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALIGNR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MEMALIGN -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)vallocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_VALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(VALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_VALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)pvallocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_PVALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(PVALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_PVALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)mallinfor.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLINFO -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALLINFOR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLINFO -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)mallstatsr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLOC_STATS -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALLSTATSR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLOC_STATS -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)msizer.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLOC_USABLE_SIZE -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MSIZER).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLOC_USABLE_SIZE -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)malloptr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLOPT -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALLOPTR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLOPT -c $(srcdir)/$(MALLOCR).c -o $@
CHEWOUT_FILES= \
_Exit.def \
@@ -234,7 +262,7 @@ CHEWOUT_FILES= \
llabs.def \
lldiv.def \
malloc.def \
- mallocr.def \
+ $(MALLOCR).def \
mblen.def \
mbsnrtowcs.def \
mbstowcs.def \
diff --git a/newlib/libc/stdlib/Makefile.in b/newlib/libc/stdlib/Makefile.in
index 33e5f260d..319079dd5 100644
--- a/newlib/libc/stdlib/Makefile.in
+++ b/newlib/libc/stdlib/Makefile.in
@@ -361,12 +361,36 @@ GENERAL_SOURCES = __adjust.c __atexit.c __call_atexit.c __exp10.c \
reallocf.c sb_charsets.c strtod.c strtol.c strtoul.c wcstod.c \
wcstol.c wcstoul.c wcstombs.c wcstombs_r.c wctomb.c wctomb_r.c \
$(am__append_1)
+@NEWLIB_NANO_MALLOC_FALSE@MALIGNR = malignr
+@NEWLIB_NANO_MALLOC_TRUE@MALIGNR = nano-malignr
+@NEWLIB_NANO_MALLOC_FALSE@MALLOPTR = malloptr
+@NEWLIB_NANO_MALLOC_TRUE@MALLOPTR = nano-malloptr
+@NEWLIB_NANO_MALLOC_FALSE@PVALLOCR = pvallocr
+@NEWLIB_NANO_MALLOC_TRUE@PVALLOCR = nano-pvallocr
+@NEWLIB_NANO_MALLOC_FALSE@VALLOCR = vallocr
+@NEWLIB_NANO_MALLOC_TRUE@VALLOCR = nano-vallocr
+@NEWLIB_NANO_MALLOC_FALSE@FREER = freer
+@NEWLIB_NANO_MALLOC_TRUE@FREER = nano-freer
+@NEWLIB_NANO_MALLOC_FALSE@REALLOCR = reallocr
+@NEWLIB_NANO_MALLOC_TRUE@REALLOCR = nano-reallocr
+@NEWLIB_NANO_MALLOC_FALSE@CALLOCR = callocr
+@NEWLIB_NANO_MALLOC_TRUE@CALLOCR = nano-callocr
+@NEWLIB_NANO_MALLOC_FALSE@CFREER = cfreer
+@NEWLIB_NANO_MALLOC_TRUE@CFREER = nano-cfreer
+@NEWLIB_NANO_MALLOC_FALSE@MALLINFOR = mallinfor
+@NEWLIB_NANO_MALLOC_TRUE@MALLINFOR = nano-mallinfor
+@NEWLIB_NANO_MALLOC_FALSE@MALLSTATSR = mallstatsr
+@NEWLIB_NANO_MALLOC_TRUE@MALLSTATSR = nano-mallstatsr
+@NEWLIB_NANO_MALLOC_FALSE@MSIZER = msizer
+@NEWLIB_NANO_MALLOC_TRUE@MSIZER = nano-msizer
+@NEWLIB_NANO_MALLOC_FALSE@MALLOCR = mallocr
+@NEWLIB_NANO_MALLOC_TRUE@MALLOCR = nano-mallocr
EXTENDED_SOURCES = \
cxa_atexit.c \
cxa_finalize.c \
drand48.c \
- ecvtbuf.c \
- efgcvt.c \
+ ecvtbuf.c \
+ efgcvt.c \
erand48.c \
jrand48.c \
lcong48.c \
@@ -410,10 +434,10 @@ ELIX_2_SOURCES = \
wctob.c
ELIX_2_OBJS = \
- $(lpfx)malignr.$(oext) \
- $(lpfx)malloptr.$(oext) \
- $(lpfx)pvallocr.$(oext) \
- $(lpfx)vallocr.$(oext)
+ $(lpfx)$(MALIGNR).$(oext) \
+ $(lpfx)$(MALLOPTR).$(oext) \
+ $(lpfx)$(PVALLOCR).$(oext) \
+ $(lpfx)$(VALLOCR).$(oext)
ELIX_3_SOURCES = \
putenv.c \
@@ -434,10 +458,10 @@ ELIX_4_SOURCES = \
@ELIX_LEVEL_1_TRUE@ELIX_OBJS =
# Because of how libtool moves objects around, mallocr must be built last.
-LIBADD_OBJS = $(lpfx)freer.$(oext) $(lpfx)reallocr.$(oext) \
- $(lpfx)callocr.$(oext) $(lpfx)cfreer.$(oext) \
- $(lpfx)mallinfor.$(oext) $(lpfx)mallstatsr.$(oext) \
- $(lpfx)msizer.$(oext) $(lpfx)mallocr.$(oext)
+LIBADD_OBJS = $(lpfx)$(FREER).$(oext) $(lpfx)$(REALLOCR).$(oext) \
+ $(lpfx)$(CALLOCR).$(oext) $(lpfx)$(CFREER).$(oext) \
+ $(lpfx)$(MALLINFOR).$(oext) $(lpfx)$(MALLSTATSR).$(oext) \
+ $(lpfx)$(MSIZER).$(oext) $(lpfx)$(MALLOCR).$(oext)
libstdlib_la_LDFLAGS = -Xcompiler -nostdlib
@USE_LIBTOOL_TRUE@noinst_LTLIBRARIES = libstdlib.la
@@ -476,7 +500,7 @@ CHEWOUT_FILES = \
llabs.def \
lldiv.def \
malloc.def \
- mallocr.def \
+ $(MALLOCR).def \
mblen.def \
mbsnrtowcs.def \
mbstowcs.def \
@@ -1388,41 +1412,41 @@ objectlist.awk.in: $(noinst_LTLIBRARIES)
echo $$i `pwd`/$$i >> objectlist.awk.in ; \
done
-$(lpfx)mallocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)freer.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_FREE -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(FREER).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_FREE -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)reallocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_REALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(REALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_REALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)callocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_CALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(CALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_CALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)cfreer.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_CFREE -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(CFREER).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_CFREE -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)malignr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MEMALIGN -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALIGNR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MEMALIGN -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)vallocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_VALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(VALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_VALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)pvallocr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_PVALLOC -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(PVALLOCR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_PVALLOC -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)mallinfor.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLINFO -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALLINFOR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLINFO -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)mallstatsr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLOC_STATS -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALLSTATSR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLOC_STATS -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)msizer.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLOC_USABLE_SIZE -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MSIZER).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLOC_USABLE_SIZE -c $(srcdir)/$(MALLOCR).c -o $@
-$(lpfx)malloptr.$(oext): mallocr.c
- $(MALLOC_COMPILE) -DDEFINE_MALLOPT -c $(srcdir)/mallocr.c -o $@
+$(lpfx)$(MALLOPTR).$(oext): $(MALLOCR).c
+ $(MALLOC_COMPILE) -DDEFINE_MALLOPT -c $(srcdir)/$(MALLOCR).c -o $@
.c.def:
$(CHEW) < $< > $*.def 2> $*.ref
diff --git a/newlib/libc/stdlib/__atexit.c b/newlib/libc/stdlib/__atexit.c
index 4687d0023..f04f00d8e 100644
--- a/newlib/libc/stdlib/__atexit.c
+++ b/newlib/libc/stdlib/__atexit.c
@@ -10,7 +10,17 @@
/* Make this a weak reference to avoid pulling in malloc. */
void * malloc(size_t) _ATTRIBUTE((__weak__));
-__LOCK_INIT_RECURSIVE(, __atexit_lock);
+
+#ifndef __SINGLE_THREAD__
+extern _LOCK_RECURSIVE_T __atexit_lock;
+#endif
+
+#ifdef _REENT_GLOBAL_ATEXIT
+static struct _atexit _global_atexit0 = _ATEXIT_INIT;
+# define _GLOBAL_ATEXIT0 (&_global_atexit0)
+#else
+# define _GLOBAL_ATEXIT0 (&_GLOBAL_REENT->_atexit0)
+#endif
/*
* Register a function to be performed at exit or on shared library unload.
@@ -31,9 +41,9 @@ _DEFUN (__register_exitproc,
__lock_acquire_recursive(__atexit_lock);
#endif
- p = _GLOBAL_REENT->_atexit;
+ p = _GLOBAL_ATEXIT;
if (p == NULL)
- _GLOBAL_REENT->_atexit = p = &_GLOBAL_REENT->_atexit0;
+ _GLOBAL_ATEXIT = p = _GLOBAL_ATEXIT0;
if (p->_ind >= _ATEXIT_SIZE)
{
#ifndef _ATEXIT_DYNAMIC_ALLOC
@@ -53,8 +63,8 @@ _DEFUN (__register_exitproc,
return -1;
}
p->_ind = 0;
- p->_next = _GLOBAL_REENT->_atexit;
- _GLOBAL_REENT->_atexit = p;
+ p->_next = _GLOBAL_ATEXIT;
+ _GLOBAL_ATEXIT = p;
#ifndef _REENT_SMALL
p->_on_exit_args._fntypes = 0;
p->_on_exit_args._is_cxa = 0;
diff --git a/newlib/libc/stdlib/__call_atexit.c b/newlib/libc/stdlib/__call_atexit.c
index 4c4506343..1e6e71044 100644
--- a/newlib/libc/stdlib/__call_atexit.c
+++ b/newlib/libc/stdlib/__call_atexit.c
@@ -11,8 +11,10 @@
/* Make this a weak reference to avoid pulling in free. */
void free(void *) _ATTRIBUTE((__weak__));
-#ifndef __SINGLE_THREAD__
-extern _LOCK_RECURSIVE_T __atexit_lock;
+__LOCK_INIT_RECURSIVE(, __atexit_lock);
+
+#ifdef _REENT_GLOBAL_ATEXIT
+struct _atexit *_global_atexit = _NULL;
#endif
#ifdef _WANT_REGISTER_FINI
@@ -78,8 +80,8 @@ _DEFUN (__call_exitprocs, (code, d),
restart:
- p = _GLOBAL_REENT->_atexit;
- lastp = &_GLOBAL_REENT->_atexit;
+ p = _GLOBAL_ATEXIT;
+ lastp = &_GLOBAL_ATEXIT;
while (p)
{
#ifdef _REENT_SMALL
diff --git a/newlib/libc/stdlib/ecvtbuf.c b/newlib/libc/stdlib/ecvtbuf.c
index 2b9b9eb8a..feaa33fd0 100644
--- a/newlib/libc/stdlib/ecvtbuf.c
+++ b/newlib/libc/stdlib/ecvtbuf.c
@@ -233,6 +233,7 @@ _DEFUN (fcvtbuf, (invalue, ndigit, decpt, sign, fcvt_buf),
int *sign _AND
char *fcvt_buf)
{
+ struct _reent *reent = _REENT;
char *save;
char *p;
char *end;
@@ -240,27 +241,27 @@ _DEFUN (fcvtbuf, (invalue, ndigit, decpt, sign, fcvt_buf),
if (fcvt_buf == NULL)
{
- if (_REENT->_cvtlen <= ndigit + 35)
+ if (reent->_cvtlen <= ndigit + 35)
{
- if ((fcvt_buf = (char *) _realloc_r (_REENT, _REENT->_cvtbuf,
+ if ((fcvt_buf = (char *) _realloc_r (reent, reent->_cvtbuf,
ndigit + 36)) == NULL)
return NULL;
- _REENT->_cvtlen = ndigit + 36;
- _REENT->_cvtbuf = fcvt_buf;
+ reent->_cvtlen = ndigit + 36;
+ reent->_cvtbuf = fcvt_buf;
}
- fcvt_buf = _REENT->_cvtbuf ;
+ fcvt_buf = reent->_cvtbuf ;
}
save = fcvt_buf;
if (invalue < 1.0 && invalue > -1.0)
{
- p = _dtoa_r (_REENT, invalue, 2, ndigit, decpt, sign, &end);
+ p = _dtoa_r (reent, invalue, 2, ndigit, decpt, sign, &end);
}
else
{
- p = _dtoa_r (_REENT, invalue, 3, ndigit, decpt, sign, &end);
+ p = _dtoa_r (reent, invalue, 3, ndigit, decpt, sign, &end);
}
/* Now copy */
@@ -289,6 +290,7 @@ _DEFUN (ecvtbuf, (invalue, ndigit, decpt, sign, fcvt_buf),
int *sign _AND
char *fcvt_buf)
{
+ struct _reent *reent = _REENT;
char *save;
char *p;
char *end;
@@ -296,21 +298,21 @@ _DEFUN (ecvtbuf, (invalue, ndigit, decpt, sign, fcvt_buf),
if (fcvt_buf == NULL)
{
- if (_REENT->_cvtlen <= ndigit)
+ if (reent->_cvtlen <= ndigit)
{
- if ((fcvt_buf = (char *) _realloc_r (_REENT, _REENT->_cvtbuf,
+ if ((fcvt_buf = (char *) _realloc_r (reent, reent->_cvtbuf,
ndigit + 1)) == NULL)
return NULL;
- _REENT->_cvtlen = ndigit + 1;
- _REENT->_cvtbuf = fcvt_buf;
+ reent->_cvtlen = ndigit + 1;
+ reent->_cvtbuf = fcvt_buf;
}
- fcvt_buf = _REENT->_cvtbuf ;
+ fcvt_buf = reent->_cvtbuf ;
}
save = fcvt_buf;
- p = _dtoa_r (_REENT, invalue, 2, ndigit, decpt, sign, &end);
+ p = _dtoa_r (reent, invalue, 2, ndigit, decpt, sign, &end);
/* Now copy */
diff --git a/newlib/libc/stdlib/mblen.c b/newlib/libc/stdlib/mblen.c
index ace23889b..4d9ac38bd 100644
--- a/newlib/libc/stdlib/mblen.c
+++ b/newlib/libc/stdlib/mblen.c
@@ -55,11 +55,12 @@ _DEFUN (mblen, (s, n),
{
#ifdef _MB_CAPABLE
int retval = 0;
+ struct _reent *reent = _REENT;
mbstate_t *state;
- _REENT_CHECK_MISC(_REENT);
- state = &(_REENT_MBLEN_STATE(_REENT));
- retval = __mbtowc (_REENT, NULL, s, n, __locale_charset (), state);
+ _REENT_CHECK_MISC(reent);
+ state = &(_REENT_MBLEN_STATE(reent));
+ retval = __mbtowc (reent, NULL, s, n, __locale_charset (), state);
if (retval < 0)
{
state->__count = 0;
diff --git a/newlib/libc/stdlib/mbrlen.c b/newlib/libc/stdlib/mbrlen.c
index ac9aa324f..4319875a1 100644
--- a/newlib/libc/stdlib/mbrlen.c
+++ b/newlib/libc/stdlib/mbrlen.c
@@ -11,8 +11,10 @@ mbrlen(const char *s, size_t n, mbstate_t *ps)
#ifdef _MB_CAPABLE
if (ps == NULL)
{
- _REENT_CHECK_MISC(_REENT);
- ps = &(_REENT_MBRLEN_STATE(_REENT));
+ struct _reent *reent = _REENT;
+
+ _REENT_CHECK_MISC(reent);
+ ps = &(_REENT_MBRLEN_STATE(reent));
}
#endif
diff --git a/newlib/libc/stdlib/mbrtowc.c b/newlib/libc/stdlib/mbrtowc.c
index e191e1158..0a8c23a9d 100644
--- a/newlib/libc/stdlib/mbrtowc.c
+++ b/newlib/libc/stdlib/mbrtowc.c
@@ -52,24 +52,25 @@ _DEFUN (mbrtowc, (pwc, s, n, ps),
return _mbrtowc_r (_REENT, pwc, s, n, ps);
#else
int retval = 0;
+ struct _reent *reent = _REENT;
#ifdef _MB_CAPABLE
if (ps == NULL)
{
- _REENT_CHECK_MISC(_REENT);
- ps = &(_REENT_MBRTOWC_STATE(_REENT));
+ _REENT_CHECK_MISC(reent);
+ ps = &(_REENT_MBRTOWC_STATE(reent));
}
#endif
if (s == NULL)
- retval = __mbtowc (_REENT, NULL, "", 1, __locale_charset (), ps);
+ retval = __mbtowc (reent, NULL, "", 1, __locale_charset (), ps);
else
- retval = __mbtowc (_REENT, pwc, s, n, __locale_charset (), ps);
+ retval = __mbtowc (reent, pwc, s, n, __locale_charset (), ps);
if (retval == -1)
{
ps->__count = 0;
- _REENT->_errno = EILSEQ;
+ reent->_errno = EILSEQ;
return (size_t)(-1);
}
else
diff --git a/newlib/libc/stdlib/mbtowc.c b/newlib/libc/stdlib/mbtowc.c
index 83b6a0eda..8da309e2d 100644
--- a/newlib/libc/stdlib/mbtowc.c
+++ b/newlib/libc/stdlib/mbtowc.c
@@ -64,12 +64,13 @@ _DEFUN (mbtowc, (pwc, s, n),
{
#ifdef _MB_CAPABLE
int retval = 0;
+ struct _reent *reent = _REENT;
mbstate_t *ps;
- _REENT_CHECK_MISC(_REENT);
- ps = &(_REENT_MBTOWC_STATE(_REENT));
+ _REENT_CHECK_MISC(reent);
+ ps = &(_REENT_MBTOWC_STATE(reent));
- retval = __mbtowc (_REENT, pwc, s, n, __locale_charset (), ps);
+ retval = __mbtowc (reent, pwc, s, n, __locale_charset (), ps);
if (retval < 0)
{
diff --git a/newlib/libc/stdlib/nano-mallocr.c b/newlib/libc/stdlib/nano-mallocr.c
new file mode 100644
index 000000000..e0a919590
--- /dev/null
+++ b/newlib/libc/stdlib/nano-mallocr.c
@@ -0,0 +1,581 @@
+/*
+ * Copyright (c) 2012, 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Implementation of <<malloc>> <<free>> <<calloc>> <<realloc>>, optional
+ * as to be reenterable.
+ *
+ * Interface documentation refer to malloc.c.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#if DEBUG
+#include <assert.h>
+#else
+#define assert(x) ((void)0)
+#endif
+
+#ifndef MAX
+#define MAX(a,b) ((a) >= (b) ? (a) : (b))
+#endif
+
+#ifdef INTERNAL_NEWLIB
+
+#include <sys/config.h>
+#include <reent.h>
+
+#define RARG struct _reent *reent_ptr,
+#define RONEARG struct _reent *reent_ptr
+#define RCALL reent_ptr,
+
+/* Disable MALLOC_LOCK so far. So it won't be thread safe */
+#define MALLOC_LOCK /*__malloc_lock(reent_ptr) */
+#define MALLOC_UNLOCK /*__malloc_unlock(reent_ptr) */
+
+#define RERRNO reent_ptr->_errno
+
+#define nano_malloc _malloc_r
+#define nano_free _free_r
+#define nano_realloc _realloc_r
+#define nano_memalign _memalign_r
+#define nano_valloc _valloc_r
+#define nano_pvalloc _pvalloc_r
+#define nano_calloc _calloc_r
+#define nano_cfree _cfree_r
+#define nano_malloc_usable_size _malloc_usable_size_r
+#define nano_malloc_stats _malloc_stats_r
+#define nano_mallinfo _mallinfo_r
+#define nano_allopt _mallopt_r
+
+#else /* ! INTERNAL_NEWLIB */
+
+#define RARG
+#define RONEARG
+#define RCALL
+#define MALLOC_LOCK
+#define MALLOC_UNLOCK
+#define RERRNO errno
+
+#define nano_malloc malloc
+#define nano_free free
+#define nano_realloc realloc
+#define nano_memalign memalign
+#define nano_valloc valloc
+#define nano_pvalloc pvalloc
+#define nano_calloc calloc
+#define nano_cfree cfree
+#define nano_malloc_usable_size malloc_usable_size
+#define nano_malloc_stats malloc_stats
+#define nano_mallinfo mallinfo
+#define nano_allopt mallopt
+#endif /* ! INTERNAL_NEWLIB */
+
+/* Define free_list as internal name to avoid conflict with user names */
+#define free_list __malloc_free_list
+
+#define ALIGN_TO(size, align) \
+ (((size) + (align) -1) & ~((align) -1))
+
+/* Alignment of allocated block */
+#define MALLOC_ALIGN (8U)
+#define CHUNK_ALIGN (sizeof(void*))
+#define MALLOC_PADDING ((MAX(MALLOC_ALIGN, CHUNK_ALIGN)) - CHUNK_ALIGN)
+
+/* as well as the minimal allocation size
+ * to hold a free pointer */
+#define MALLOC_MINSIZE (sizeof(void *))
+#define MALLOC_PAGE_ALIGN (0x1000)
+#define MAX_ALLOC_SIZE (0x80000000U)
+
+typedef size_t malloc_size_t;
+
+typedef struct malloc_chunk
+{
+ /* ------------------
+ * chunk->| size (4 bytes) |
+ * ------------------
+ * | Padding for |
+ * | alignment |
+ * | holding neg |
+ * | offset to size |
+ * ------------------
+ * mem_ptr->| point to next |
+ * | free when freed|
+ * | or data load |
+ * | when allocated |
+ * ------------------
+ */
+ /* size of the allocated payload area, including size before
+ CHUNK_OFFSET */
+ int size;
+
+ /* since here, the memory is either the next free block, or data load */
+ struct malloc_chunk * next;
+}chunk;
+
+#define CHUNK_OFFSET ((malloc_size_t)(&(((struct malloc_chunk *)0)->next)))
+
+/* size of smallest possible chunk. A memory piece smaller than this size
+ * won't be able to create a chunk */
+#define MALLOC_MINCHUNK (CHUNK_OFFSET + MALLOC_PADDING + MALLOC_MINSIZE)
+
+static chunk * get_chunk_from_ptr(void * ptr)
+{
+ chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
+ /* Skip the padding area */
+ if (c->size < 0) c = (chunk *)((char *)c + c->size);
+ return c;
+}
+
+#ifdef DEFINE_MALLOC
+chunk * free_list = NULL;
+
+/** Function sbrk_aligned
+ * Algorithm:
+ * Use sbrk() to obtain more memory and ensure it is CHUNK_ALIGN aligned
+ * Optimise for the case that it is already aligned - only ask for extra
+ * padding after we know we need it
+ */
+static void* sbrk_aligned(RARG malloc_size_t s)
+{
+ char *p, *align_p;
+
+ p = _sbrk_r(RCALL s);
+
+ /* sbrk returns -1 if fail to allocate */
+ if (p == (void *)-1)
+ return p;
+
+ align_p = (char*)ALIGN_TO((unsigned long)p, CHUNK_ALIGN);
+ if (align_p != p)
+ {
+ /* p is not aligned, ask for a few more bytes so that we have s
+ * bytes reserved from align_p. */
+ p = _sbrk_r(RCALL align_p - p);
+ if (p == (void *)-1)
+ return p;
+ }
+ return align_p;
+}
+
+/** Function nano_malloc
+ * Algorithm:
+ * Walk through the free list to find the first match. If fails to find
+ * one, call sbrk to allocate a new chunk.
+ */
+void * nano_malloc(RARG malloc_size_t s)
+{
+ chunk *p, *r;
+ char * ptr, * align_ptr;
+ int offset;
+
+ malloc_size_t alloc_size;
+
+ alloc_size = ALIGN_TO(s, CHUNK_ALIGN); /* size of aligned data load */
+ alloc_size += MALLOC_PADDING; /* padding */
+ alloc_size += CHUNK_OFFSET; /* size of chunk head */
+ alloc_size = MAX(alloc_size, MALLOC_MINCHUNK);
+
+ if (alloc_size >= MAX_ALLOC_SIZE || alloc_size < s)
+ {
+ RERRNO = ENOMEM;
+ return NULL;
+ }
+
+ MALLOC_LOCK;
+
+ p = free_list;
+ r = p;
+
+ while (r)
+ {
+ int rem = r->size - alloc_size;
+ if (rem >= 0)
+ {
+ if (rem >= MALLOC_MINCHUNK)
+ {
+ /* Find a chunk that much larger than required size, break
+ * it into two chunks and return the second one */
+ r->size = rem;
+ r = (chunk *)((char *)r + rem);
+ r->size = alloc_size;
+ }
+ /* Find a chunk that is exactly the size or slightly bigger
+ * than requested size, just return this chunk */
+ else if (p == r)
+ {
+ /* Now it implies p==r==free_list. Move the free_list
+ * to next chunk */
+ free_list = r->next;
+ }
+ else
+ {
+ /* Normal case. Remove it from free_list */
+ p->next = r->next;
+ }
+ break;
+ }
+ p=r;
+ r=r->next;
+ }
+
+ /* Failed to find a appropriate chunk. Ask for more memory */
+ if (r == NULL)
+ {
+ r = sbrk_aligned(RCALL alloc_size);
+
+ /* sbrk returns -1 if fail to allocate */
+ if (r == (void *)-1)
+ {
+ RERRNO = ENOMEM;
+ MALLOC_UNLOCK;
+ return NULL;
+ }
+ r->size = alloc_size;
+ }
+ MALLOC_UNLOCK;
+
+ ptr = (char *)r + CHUNK_OFFSET;
+
+ align_ptr = (char *)ALIGN_TO((unsigned long)ptr, MALLOC_ALIGN);
+ offset = align_ptr - ptr;
+
+ if (offset)
+ {
+ *(int *)((char *)r + offset) = -offset;
+ }
+
+ assert(align_ptr + size <= (char *)r + alloc_size);
+ return align_ptr;
+}
+#endif /* DEFINE_MALLOC */
+
+#ifdef DEFINE_FREE
+#define MALLOC_CHECK_DOUBLE_FREE
+
+extern chunk * free_list;
+/** Function nano_free
+ * Implementation of libc free.
+ * Algorithm:
+ * Maintain a global free chunk single link list, headed by global
+ * variable free_list.
+ * When free, insert the to-be-freed chunk into free list. The place to
+ * insert should make sure all chunks are sorted by address from low to
+ * high. Then merge with neighbor chunks if adjacent.
+ */
+void nano_free (RARG void * free_p)
+{
+ chunk * p_to_free;
+ chunk * p, * q;
+
+ if (free_p == NULL) return;
+
+ p_to_free = get_chunk_from_ptr(free_p);
+
+ MALLOC_LOCK;
+ if (free_list == NULL)
+ {
+ /* Set first free list element */
+ p_to_free->next = free_list;
+ free_list = p_to_free;
+ MALLOC_UNLOCK;
+ return;
+ }
+
+ if (p_to_free < free_list)
+ {
+ if ((char *)p_to_free + p_to_free->size == (char *)free_list)
+ {
+ /* Chunk to free is just before the first element of
+ * free list */
+ p_to_free->size += free_list->size;
+ p_to_free->next = free_list->next;
+ }
+ else
+ {
+ /* Insert before current free_list */
+ p_to_free->next = free_list;
+ }
+ free_list = p_to_free;
+ MALLOC_UNLOCK;
+ return;
+ }
+
+ q = free_list;
+ /* Walk through the free list to find the place for insert. */
+ do
+ {
+ p = q;
+ q = q->next;
+ } while (q && q <= p_to_free);
+
+ /* Now p <= p_to_free and either q == NULL or q > p_to_free
+ * Try to merge with chunks immediately before/after it. */
+
+ if ((char *)p + p->size == (char *)p_to_free)
+ {
+ /* Chunk to be freed is adjacent
+ * to a free chunk before it */
+ p->size += p_to_free->size;
+ /* If the merged chunk is also adjacent
+ * to the chunk after it, merge again */
+ if ((char *)p + p->size == (char *) q)
+ {
+ p->size += q->size;
+ p->next = q->next;
+ }
+ }
+#ifdef MALLOC_CHECK_DOUBLE_FREE
+ else if ((char *)p + p->size > (char *)p_to_free)
+ {
+ /* Report double free fault */
+ RERRNO = ENOMEM;
+ MALLOC_UNLOCK;
+ return;
+ }
+#endif
+ else if ((char *)p_to_free + p_to_free->size == (char *) q)
+ {
+ /* Chunk to be freed is adjacent
+ * to a free chunk after it */
+ p_to_free->size += q->size;
+ p_to_free->next = q->next;
+ p->next = p_to_free;
+ }
+ else
+ {
+ /* Not adjacent to any chunk. Just insert it. Resulting
+ * a fragment. */
+ p_to_free->next = q;
+ p->next = p_to_free;
+ }
+ MALLOC_UNLOCK;
+}
+#endif /* DEFINE_FREE */
+
+#ifdef DEFINE_CFREE
+void nano_free (RARG void * free_p);
+
+void nano_cfree(RARG void * ptr)
+{
+ nano_free(RCALL ptr);
+}
+#endif /* DEFINE_CFREE */
+
+#ifdef DEFINE_CALLOC
+void * nano_malloc(RARG malloc_size_t s);
+
+/* Function nano_calloc
+ * Implement calloc simply by calling malloc and set zero */
+void * nano_calloc(RARG malloc_size_t n, malloc_size_t elem)
+{
+ void * mem = nano_malloc(RCALL n * elem);
+ if (mem != NULL) memset(mem, 0, n * elem);
+ return mem;
+}
+#endif /* DEFINE_CALLOC */
+
+#ifdef DEFINE_REALLOC
+void * nano_malloc(RARG malloc_size_t s);
+void nano_free (RARG void * free_p);
+malloc_size_t nano_malloc_usable_size(RARG void * ptr);
+
+/* Function nano_realloc
+ * Implement realloc by malloc + memcpy */
+void * nano_realloc(RARG void * ptr, malloc_size_t size)
+{
+ void * mem;
+ chunk * p_to_realloc;
+
+ if (ptr == NULL) return nano_malloc(RCALL size);
+
+ if (size == 0)
+ {
+ nano_free(RCALL ptr);
+ return NULL;
+ }
+
+ /* TODO: There is chance to shrink the chunk if newly requested
+ * size is much small */
+ if (nano_malloc_usable_size(RCALL ptr) >= size)
+ return ptr;
+
+ mem = nano_malloc(RCALL size);
+ if (mem != NULL)
+ {
+ memcpy(mem, ptr, size);
+ nano_free(RCALL ptr);
+ }
+ return mem;
+}
+#endif /* DEFINE_REALLOC */
+
+#ifdef DEFINE_MALLINFO
+struct mallinfo
+{
+ int arena; /* total space allocated from system */
+ int ordblks; /* number of non-inuse chunks */
+ int smblks; /* unused -- always zero */
+ int hblks; /* number of mmapped regions */
+ int hblkhd; /* total space in mmapped regions */
+ int usmblks; /* unused -- always zero */
+ int fsmblks; /* unused -- always zero */
+ int uordblks; /* total allocated space */
+ int fordblks; /* total non-inuse space */
+ int keepcost; /* top-most, releasable (via malloc_trim) space */
+};
+
+static struct mallinfo current_mallinfo={0,0,0,0,0,0,0,0,0,0};
+
+struct mallinfo nano_mallinfo(RONEARG)
+{
+ return current_mallinfo;
+}
+
+#endif /* DEFINE_MALLINFO */
+
+#ifdef DEFINE_MALLOC_STATS
+void nano_malloc_stats(RONEARG)
+{
+}
+#endif /* DEFINE_MALLOC_STATS */
+
+#ifdef DEFINE_MALLOC_USABLE_SIZE
+malloc_size_t nano_malloc_usable_size(RARG void * ptr)
+{
+ chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
+ int size_or_offset = c->size;
+
+ if (size_or_offset < 0)
+ {
+ /* Padding is used. Excluding the padding size */
+ c = (chunk *)((char *)c + c->size);
+ return c->size - CHUNK_OFFSET + size_or_offset;
+ }
+ return c->size - CHUNK_OFFSET;
+}
+#endif /* DEFINE_MALLOC_USABLE_SIZE */
+
+#ifdef DEFINE_MEMALIGN
+void * nano_malloc(RARG malloc_size_t s);
+
+/* Function nano_memalign
+ * Allocate memory block aligned at specific boundary.
+ * align: required alignment. Must be power of 2. Return NULL
+ * if not power of 2. Undefined behavior is bigger than
+ * pointer value range.
+ * s: required size.
+ * Return: allocated memory pointer aligned to align
+ * Algorithm: Malloc a big enough block, padding pointer to aligned
+ * address, then truncate and free the tail if too big.
+ * Record the offset of align pointer and original pointer
+ * in the padding area.
+ */
+void * nano_memalign(RARG size_t align, size_t s)
+{
+ chunk * chunk_p;
+ malloc_size_t size_allocated, offset, ma_size, size_with_padding;
+ char * allocated, * aligned_p;
+
+ /* Return NULL if align isn't power of 2 */
+ if ((align & (align-1)) != 0) return NULL;
+
+ align = MAX(align, MALLOC_ALIGN);
+ ma_size = ALIGN_TO(MAX(s, MALLOC_MINSIZE), CHUNK_ALIGN);
+ size_with_padding = ma_size + align - MALLOC_ALIGN;
+
+ allocated = nano_malloc(RCALL size_with_padding);
+ if (allocated == NULL) return NULL;
+
+ chunk_p = get_chunk_from_ptr(allocated);
+ aligned_p = (char *)ALIGN_TO(
+ (unsigned long)((char *)chunk_p + CHUNK_OFFSET),
+ (unsigned long)align);
+ offset = aligned_p - ((char *)chunk_p + CHUNK_OFFSET);
+
+ if (offset)
+ {
+ if (offset >= MALLOC_MINCHUNK)
+ {
+ /* Padding is too large, free it */
+ chunk * front_chunk = chunk_p;
+ chunk_p = (chunk *)((char *)chunk_p + offset);
+ chunk_p->size = front_chunk->size - offset;
+ front_chunk->size = offset;
+ nano_free(RCALL (char *)front_chunk + CHUNK_OFFSET);
+ }
+ else
+ {
+ /* Padding is used. Need to set a jump offset for aligned pointer
+ * to get back to chunk head */
+ assert(offset >= sizeof(int));
+ *(int *)((char *)chunk_p + offset) = -offset;
+ }
+ }
+
+ size_allocated = chunk_p->size;
+ if ((char *)chunk_p + size_allocated >
+ (aligned_p + ma_size + MALLOC_MINCHUNK))
+ {
+ /* allocated much more than what's required for padding, free
+ * tail part */
+ chunk * tail_chunk = (chunk *)(aligned_p + ma_size);
+ chunk_p->size = aligned_p + ma_size - (char *)chunk_p;
+ tail_chunk->size = size_allocated - chunk_p->size;
+ nano_free(RCALL (char *)tail_chunk + CHUNK_OFFSET);
+ }
+ return aligned_p;
+}
+#endif /* DEFINE_MEMALIGN */
+
+#ifdef DEFINE_MALLOPT
+int nano_mallopt(RARG int parameter_number, int parameter_value)
+{
+ return 0;
+}
+#endif /* DEFINE_MALLOPT */
+
+#ifdef DEFINE_VALLOC
+void * nano_memalign(RARG size_t align, size_t s);
+
+void * nano_valloc(RARG size_t s)
+{
+ return nano_memalign(RCALL MALLOC_PAGE_ALIGN, s);
+}
+#endif /* DEFINE_VALLOC */
+
+#ifdef DEFINE_PVALLOC
+void * nano_valloc(RARG size_t s);
+
+void * nano_pvalloc(RARG size_t s)
+{
+ return nano_valloc(RCALL ALIGN_TO(s, MALLOC_PAGE_ALIGN));
+}
+#endif /* DEFINE_PVALLOC */
diff --git a/newlib/libc/stdlib/rand.c b/newlib/libc/stdlib/rand.c
index 131e5cf09..42acde4aa 100644
--- a/newlib/libc/stdlib/rand.c
+++ b/newlib/libc/stdlib/rand.c
@@ -72,20 +72,24 @@ on two different systems.
void
_DEFUN (srand, (seed), unsigned int seed)
{
- _REENT_CHECK_RAND48(_REENT);
- _REENT_RAND_NEXT(_REENT) = seed;
+ struct _reent *reent = _REENT;
+
+ _REENT_CHECK_RAND48(reent);
+ _REENT_RAND_NEXT(reent) = seed;
}
int
_DEFUN_VOID (rand)
{
+ struct _reent *reent = _REENT;
+
/* This multiplier was obtained from Knuth, D.E., "The Art of
Computer Programming," Vol 2, Seminumerical Algorithms, Third
Edition, Addison-Wesley, 1998, p. 106 (line 26) & p. 108 */
- _REENT_CHECK_RAND48(_REENT);
- _REENT_RAND_NEXT(_REENT) =
- _REENT_RAND_NEXT(_REENT) * __extension__ 6364136223846793005LL + 1;
- return (int)((_REENT_RAND_NEXT(_REENT) >> 32) & RAND_MAX);
+ _REENT_CHECK_RAND48(reent);
+ _REENT_RAND_NEXT(reent) =
+ _REENT_RAND_NEXT(reent) * __extension__ 6364136223846793005LL + 1;
+ return (int)((_REENT_RAND_NEXT(reent) >> 32) & RAND_MAX);
}
#endif /* _REENT_ONLY */
diff --git a/newlib/libc/stdlib/strtod.c b/newlib/libc/stdlib/strtod.c
index fe6aac206..159c9695b 100644
--- a/newlib/libc/stdlib/strtod.c
+++ b/newlib/libc/stdlib/strtod.c
@@ -128,11 +128,17 @@ THIS SOFTWARE.
#ifndef NO_IEEE_Scale
#define Avoid_Underflow
#undef tinytens
-/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */
+/* The factor of 2^106 in tinytens[4] helps us avoid setting the underflow */
/* flag unnecessarily. It leads to a song and dance at the end of strtod. */
-static _CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
- 9007199254740992.e-256
- };
+static _CONST double tinytens[] = { 1e-16, 1e-32,
+#ifdef _DOUBLE_IS_32BITS
+ 0.0, 0.0, 0.0
+#else
+ 1e-64, 1e-128,
+ 9007199254740992. * 9007199254740992.e-256
+#endif
+ };
+
#endif
#endif
@@ -144,6 +150,28 @@ static _CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
#define Rounding Flt_Rounds
#endif
+#ifdef Avoid_Underflow /*{*/
+ static double
+_DEFUN (sulp, (x, scale),
+ U x _AND
+ int scale)
+{
+ U u;
+ double rv;
+ int i;
+
+ rv = ulp(dval(x));
+ if (!scale || (i = 2*P + 1 - ((dword0(x) & Exp_mask) >> Exp_shift)) <= 0)
+ return rv; /* Is there an example where i <= 0 ? */
+ dword0(u) = Exp_1 + (i << Exp_shift);
+#ifndef _DOUBLE_IS_32BITS
+ dword1(u) = 0;
+#endif
+ return rv * u.d;
+ }
+#endif /*}*/
+
+
#ifndef NO_HEX_FP
static void
@@ -221,7 +249,10 @@ _DEFUN (_strtod_r, (ptr, s00, se),
U aadj1, rv, rv0;
Long L;
__ULong y, z;
- _Bigint *bb, *bb1, *bd, *bd0, *bs, *delta;
+ _Bigint *bb = NULL, *bb1, *bd = NULL, *bd0, *bs = NULL, *delta = NULL;
+#ifdef Avoid_Underflow
+ __ULong Lsb, Lsb1;
+#endif
#ifdef SET_INEXACT
int inexact, oldinexact;
#endif
@@ -279,6 +310,8 @@ _DEFUN (_strtod_r, (ptr, s00, se),
switch((i = gethex(ptr, &s, &fpi1, &exp, &bb, sign)) & STRTOG_Retmask) {
case STRTOG_NoNumber:
s = s00;
+ sign = 0;
+ /* FALLTHROUGH */
case STRTOG_Zero:
break;
default:
@@ -299,14 +332,11 @@ _DEFUN (_strtod_r, (ptr, s00, se),
}
s0 = s;
y = z = 0;
- for(nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++) {
- if (nd < DBL_DIG + 1) {
- if (nd < 9)
- y = 10*y + c - '0';
- else
- z = 10*z + c - '0';
- }
- }
+ for(nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++)
+ if (nd < 9)
+ y = 10*y + c - '0';
+ else
+ z = 10*z + c - '0';
nd0 = nd;
if (strncmp (s, _localeconv_r (ptr)->decimal_point,
strlen (_localeconv_r (ptr)->decimal_point)) == 0)
@@ -329,20 +359,15 @@ _DEFUN (_strtod_r, (ptr, s00, se),
nz++;
if (c -= '0') {
nf += nz;
- for(i = 1; i < nz; i++) {
- if (nd++ <= DBL_DIG + 1) {
- if (nd < 10)
- y *= 10;
- else
- z *= 10;
- }
- }
- if (nd++ <= DBL_DIG + 1) {
- if (nd < 10)
- y = 10*y + c;
- else
- z = 10*z + c;
- }
+ for(i = 1; i < nz; i++)
+ if (nd++ < 9)
+ y *= 10;
+ else if (nd <= DBL_DIG + 1)
+ z *= 10;
+ if (nd++ < 9)
+ y = 10*y + c;
+ else if (nd <= DBL_DIG + 1)
+ z = 10*z + c;
nz = 0;
}
}
@@ -691,12 +716,20 @@ _DEFUN (_strtod_r, (ptr, s00, se),
/* Put digits into bd: true value = bd * 10^e */
bd0 = s2b(ptr, s0, nd0, nd, y);
+ if (bd0 == NULL)
+ goto ovfl;
for(;;) {
bd = Balloc(ptr,bd0->_k);
+ if (bd == NULL)
+ goto ovfl;
Bcopy(bd, bd0);
bb = d2b(ptr,dval(rv), &bbe, &bbbits); /* rv = bb * 2^bbe */
+ if (bb == NULL)
+ goto ovfl;
bs = i2b(ptr,1);
+ if (bs == NULL)
+ goto ovfl;
if (e >= 0) {
bb2 = bb5 = 0;
@@ -716,12 +749,19 @@ _DEFUN (_strtod_r, (ptr, s00, se),
bs2++;
#endif
#ifdef Avoid_Underflow
+ Lsb = LSB;
+ Lsb1 = 0;
j = bbe - scale;
i = j + bbbits - 1; /* logb(rv) */
- if (i < Emin) /* denormal */
- j += P - Emin;
- else
- j = P + 1 - bbbits;
+ j = P + 1 - bbbits;
+ if (i < Emin) { /* denormal */
+ i = Emin - i;
+ j -= i;
+ if (i < 32)
+ Lsb <<= i;
+ else
+ Lsb1 = Lsb << (i-32);
+ }
#else /*Avoid_Underflow*/
#ifdef Sudden_Underflow
#ifdef IBM
@@ -753,19 +793,37 @@ _DEFUN (_strtod_r, (ptr, s00, se),
}
if (bb5 > 0) {
bs = pow5mult(ptr, bs, bb5);
+ if (bs == NULL)
+ goto ovfl;
bb1 = mult(ptr, bs, bb);
+ if (bb1 == NULL)
+ goto ovfl;
Bfree(ptr, bb);
bb = bb1;
}
- if (bb2 > 0)
+ if (bb2 > 0) {
bb = lshift(ptr, bb, bb2);
- if (bd5 > 0)
+ if (bb == NULL)
+ goto ovfl;
+ }
+ if (bd5 > 0) {
bd = pow5mult(ptr, bd, bd5);
- if (bd2 > 0)
+ if (bd == NULL)
+ goto ovfl;
+ }
+ if (bd2 > 0) {
bd = lshift(ptr, bd, bd2);
- if (bs2 > 0)
+ if (bd == NULL)
+ goto ovfl;
+ }
+ if (bs2 > 0) {
bs = lshift(ptr, bs, bs2);
+ if (bs == NULL)
+ goto ovfl;
+ }
delta = diff(ptr, bb, bd);
+ if (delta == NULL)
+ goto ovfl;
dsign = delta->_sign;
delta->_sign = 0;
i = cmp(delta, bs);
@@ -789,7 +847,7 @@ _DEFUN (_strtod_r, (ptr, s00, se),
else if (!dsign) {
adj = -1.;
if (!dword1(rv)
- && !(dword0(rv) & Frac_mask)) {
+ && !(dword0(rv) & Frac_mask)) {
y = dword0(rv) & Exp_mask;
#ifdef Avoid_Underflow
if (!scale || y > 2*P*Exp_msk1)
@@ -852,7 +910,9 @@ _DEFUN (_strtod_r, (ptr, s00, se),
#endif /*Sudden_Underflow*/
#endif /*Avoid_Underflow*/
adj *= ulp(dval(rv));
- if (dsign)
+ if (dsign) {
+ if (dword0(rv) == Big0 && dword1(rv) == Big1)
+ goto ovfl;
dval(rv) += adj;
else
dval(rv) -= adj;
@@ -902,6 +962,8 @@ _DEFUN (_strtod_r, (ptr, s00, se),
#endif
0xffffffff)) {
/*boundary case -- increment exponent*/
+ if (dword0(rv) == Big0 && dword1(rv) == Big1)
+ goto ovfl;
dword0(rv) = (dword0(rv) & Exp_mask)
+ Exp_msk1
#ifdef IBM
@@ -960,14 +1022,31 @@ _DEFUN (_strtod_r, (ptr, s00, se),
#endif
}
#ifndef ROUND_BIASED
+#ifdef Avoid_Underflow
+ if (Lsb1) {
+ if (!(dword0(rv) & Lsb1))
+ break;
+ }
+ else if (!(dword1(rv) & Lsb))
+ break;
+#else
if (!(dword1(rv) & LSB))
break;
#endif
+#endif
if (dsign)
+#ifdef Avoid_Underflow
+ dval(rv) += sulp(rv, scale);
+#else
dval(rv) += ulp(dval(rv));
+#endif
#ifndef ROUND_BIASED
else {
+#ifdef Avoid_Underflow
+ dval(rv) -= sulp(rv, scale);
+#else
dval(rv) -= ulp(dval(rv));
+#endif
#ifndef Sudden_Underflow
if (!dval(rv))
goto undfl;
@@ -1044,7 +1123,7 @@ _DEFUN (_strtod_r, (ptr, s00, se),
#ifdef Avoid_Underflow
if (scale && y <= 2*P*Exp_msk1) {
if (aadj <= 0x7fffffff) {
- if ((z = aadj) <= 0)
+ if ((z = aadj) == 0)
z = 1;
aadj = z;
dval(aadj1) = dsign ? aadj : -aadj;
diff --git a/newlib/libc/stdlib/wcrtomb.c b/newlib/libc/stdlib/wcrtomb.c
index 60e0d89c8..b634c23ce 100644
--- a/newlib/libc/stdlib/wcrtomb.c
+++ b/newlib/libc/stdlib/wcrtomb.c
@@ -50,25 +50,26 @@ _DEFUN (wcrtomb, (s, wc, ps),
return _wcrtomb_r (_REENT, s, wc, ps);
#else
int retval = 0;
+ struct _reent *reent = _REENT;
char buf[10];
#ifdef _MB_CAPABLE
if (ps == NULL)
{
- _REENT_CHECK_MISC(_REENT);
- ps = &(_REENT_WCRTOMB_STATE(_REENT));
+ _REENT_CHECK_MISC(reent);
+ ps = &(_REENT_WCRTOMB_STATE(reent));
}
#endif
if (s == NULL)
- retval = __wctomb (_REENT, buf, L'\0', __locale_charset (), ps);
+ retval = __wctomb (reent, buf, L'\0', __locale_charset (), ps);
else
- retval = __wctomb (_REENT, s, wc, __locale_charset (), ps);
+ retval = __wctomb (reent, s, wc, __locale_charset (), ps);
if (retval == -1)
{
ps->__count = 0;
- _REENT->_errno = EILSEQ;
+ reent->_errno = EILSEQ;
return (size_t)(-1);
}
else
diff --git a/newlib/libc/stdlib/wctob.c b/newlib/libc/stdlib/wctob.c
index d97c01f24..eebaec84f 100644
--- a/newlib/libc/stdlib/wctob.c
+++ b/newlib/libc/stdlib/wctob.c
@@ -8,6 +8,7 @@
int
wctob (wint_t wc)
{
+ struct _reent *reent;
mbstate_t mbs;
unsigned char pmb[MB_LEN_MAX];
@@ -17,8 +18,9 @@ wctob (wint_t wc)
/* Put mbs in initial state. */
memset (&mbs, '\0', sizeof (mbs));
- _REENT_CHECK_MISC(_REENT);
+ reent = _REENT;
+ _REENT_CHECK_MISC(reent);
- return __wctomb (_REENT, (char *) pmb, wc, __locale_charset (), &mbs) == 1
+ return __wctomb (reent, (char *) pmb, wc, __locale_charset (), &mbs) == 1
? (int) pmb[0] : EOF;
}
diff --git a/newlib/libc/stdlib/wctomb.c b/newlib/libc/stdlib/wctomb.c
index 9e82eaaba..8d4ceb8ad 100644
--- a/newlib/libc/stdlib/wctomb.c
+++ b/newlib/libc/stdlib/wctomb.c
@@ -57,10 +57,12 @@ _DEFUN (wctomb, (s, wchar),
wchar_t wchar)
{
#ifdef _MB_CAPABLE
- _REENT_CHECK_MISC(_REENT);
+ struct _reent *reent = _REENT;
- return __wctomb (_REENT, s, wchar, __locale_charset (),
- &(_REENT_WCTOMB_STATE(_REENT)));
+ _REENT_CHECK_MISC(reent);
+
+ return __wctomb (reent, s, wchar, __locale_charset (),
+ &(_REENT_WCTOMB_STATE(reent)));
#else /* not _MB_CAPABLE */
if (s == NULL)
return 0;