summaryrefslogtreecommitdiffstats
path: root/toolchain/uClibc/uClibc-0.9.28-500-mutex-cancel.patch
diff options
context:
space:
mode:
Diffstat (limited to 'toolchain/uClibc/uClibc-0.9.28-500-mutex-cancel.patch')
-rw-r--r--toolchain/uClibc/uClibc-0.9.28-500-mutex-cancel.patch8631
1 files changed, 8631 insertions, 0 deletions
diff --git a/toolchain/uClibc/uClibc-0.9.28-500-mutex-cancel.patch b/toolchain/uClibc/uClibc-0.9.28-500-mutex-cancel.patch
new file mode 100644
index 000000000..5e56a7397
--- /dev/null
+++ b/toolchain/uClibc/uClibc-0.9.28-500-mutex-cancel.patch
@@ -0,0 +1,8631 @@
+diff --git a/include/printf.h b/include/printf.h
+index 340b6cb..2dea58f 100644
+--- a/include/printf.h
++++ b/include/printf.h
+@@ -75,6 +75,7 @@ struct printf_info
+ unsigned int is_short:1; /* h flag. */
+ unsigned int is_long:1; /* l flag. */
+ unsigned int is_long_double:1;/* L flag. */
++ unsigned int __padding:20;/* non-gnu -- total of 32 bits on 32bit arch */
+
+ #elif __BYTE_ORDER == __BIG_ENDIAN
+
+diff --git a/include/pthread.h b/include/pthread.h
+index 8c01172..cee112b 100644
+--- a/include/pthread.h
++++ b/include/pthread.h
+@@ -644,7 +644,8 @@ extern void _pthread_cleanup_pop (struct
+ /* Install a cleanup handler as pthread_cleanup_push does, but also
+ saves the current cancellation type and set it to deferred cancellation. */
+
+-#ifdef __USE_GNU
++/* #ifdef __USE_GNU */
++#if defined(__USE_GNU) || defined(_LIBC)
+ # define pthread_cleanup_push_defer_np(routine,arg) \
+ { struct _pthread_cleanup_buffer _buffer; \
+ _pthread_cleanup_push_defer (&_buffer, (routine), (arg));
+diff --git a/libc/inet/getnetent.c b/libc/inet/getnetent.c
+index 181c5ad..659bf5d 100644
+--- a/libc/inet/getnetent.c
++++ b/libc/inet/getnetent.c
+@@ -22,18 +22,9 @@
+ #include <netdb.h>
+ #include <arpa/inet.h>
+
++#include <bits/uClibc_mutex.h>
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
+-
+-
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ #define MAXALIASES 35
+ static const char NETDB[] = _PATH_NETWORKS;
+@@ -46,25 +37,25 @@ int _net_stayopen;
+
+ void setnetent(int f)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (netf == NULL)
+- netf = fopen(NETDB, "r" );
++ netf = fopen(NETDB, "r" );
+ else
+- rewind(netf);
++ rewind(netf);
+ _net_stayopen |= f;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return;
+ }
+
+ void endnetent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (netf) {
+- fclose(netf);
+- netf = NULL;
++ fclose(netf);
++ netf = NULL;
+ }
+ _net_stayopen = 0;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ static char * any(register char *cp, char *match)
+@@ -72,10 +63,10 @@ static char * any(register char *cp, cha
+ register char *mp, c;
+
+ while ((c = *cp)) {
+- for (mp = match; *mp; mp++)
+- if (*mp == c)
+- return (cp);
+- cp++;
++ for (mp = match; *mp; mp++)
++ if (*mp == c)
++ return (cp);
++ cp++;
+ }
+ return ((char *)0);
+ }
+@@ -84,59 +75,62 @@ struct netent * getnetent(void)
+ {
+ char *p;
+ register char *cp, **q;
++ struct netent *rv = NULL;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (netf == NULL && (netf = fopen(NETDB, "r" )) == NULL) {
+- UNLOCK;
+- return (NULL);
++ goto DONE;
+ }
+-again:
++ again:
+
+ if (!line) {
+- line = malloc(BUFSIZ + 1);
+- if (!line)
+- abort();
++ line = malloc(BUFSIZ + 1);
++ if (!line)
++ abort();
+ }
+
+ p = fgets(line, BUFSIZ, netf);
+ if (p == NULL) {
+- UNLOCK;
+- return (NULL);
++ goto DONE;
+ }
+ if (*p == '#')
+- goto again;
++ goto again;
+ cp = any(p, "#\n");
+ if (cp == NULL)
+- goto again;
++ goto again;
+ *cp = '\0';
+ net.n_name = p;
+ cp = any(p, " \t");
+ if (cp == NULL)
+- goto again;
++ goto again;
+ *cp++ = '\0';
+ while (*cp == ' ' || *cp == '\t')
+- cp++;
++ cp++;
+ p = any(cp, " \t");
+ if (p != NULL)
+- *p++ = '\0';
++ *p++ = '\0';
+ net.n_net = inet_network(cp);
+ net.n_addrtype = AF_INET;
+ q = net.n_aliases = net_aliases;
+ if (p != NULL)
+- cp = p;
++ cp = p;
+ while (cp && *cp) {
+- if (*cp == ' ' || *cp == '\t') {
+- cp++;
+- continue;
+- }
+- if (q < &net_aliases[MAXALIASES - 1])
+- *q++ = cp;
+- cp = any(cp, " \t");
+- if (cp != NULL)
+- *cp++ = '\0';
++ if (*cp == ' ' || *cp == '\t') {
++ cp++;
++ continue;
++ }
++ if (q < &net_aliases[MAXALIASES - 1])
++ *q++ = cp;
++ cp = any(cp, " \t");
++ if (cp != NULL)
++ *cp++ = '\0';
+ }
+ *q = NULL;
+- UNLOCK;
+- return (&net);
++
++ rv = &net;
++
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(mylock);
++ return rv;
+ }
+
+diff --git a/libc/inet/getproto.c b/libc/inet/getproto.c
+index c9f35f1..3665d89 100644
+--- a/libc/inet/getproto.c
++++ b/libc/inet/getproto.c
+@@ -62,17 +62,9 @@
+ #include <string.h>
+ #include <errno.h>
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
+-
++#include <bits/uClibc_mutex.h>
+
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+
+ #define MAXALIASES 35
+ #define SBUFSIZE (BUFSIZ + 1 + (sizeof(char *) * MAXALIASES))
+@@ -85,109 +77,114 @@ static int proto_stayopen;
+ static void __initbuf(void)
+ {
+ if (!static_aliases) {
+- static_aliases = malloc(SBUFSIZE);
+- if (!static_aliases)
+- abort();
++ static_aliases = malloc(SBUFSIZE);
++ if (!static_aliases)
++ abort();
+ }
+ }
+
+ void setprotoent(int f)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (protof == NULL)
+- protof = fopen(_PATH_PROTOCOLS, "r" );
++ protof = fopen(_PATH_PROTOCOLS, "r" );
+ else
+- rewind(protof);
++ rewind(protof);
+ proto_stayopen |= f;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ void endprotoent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (protof) {
+- fclose(protof);
+- protof = NULL;
++ fclose(protof);
++ protof = NULL;
+ }
+ proto_stayopen = 0;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ int getprotoent_r(struct protoent *result_buf,
+- char *buf, size_t buflen,
+- struct protoent **result)
++ char *buf, size_t buflen,
++ struct protoent **result)
+ {
+ char *p;
+ register char *cp, **q;
+ char **proto_aliases;
+ char *line;
++ int rv;
+
+ *result = NULL;
+
+ if (buflen < sizeof(*proto_aliases)*MAXALIASES) {
+- errno=ERANGE;
+- return errno;
++ errno=ERANGE;
++ return errno;
+ }
+- LOCK;
++
++ __UCLIBC_MUTEX_LOCK(mylock);
+ proto_aliases=(char **)buf;
+ buf+=sizeof(*proto_aliases)*MAXALIASES;
+ buflen-=sizeof(*proto_aliases)*MAXALIASES;
+
+ if (buflen < BUFSIZ+1) {
+- UNLOCK;
+- errno=ERANGE;
+- return errno;
++ errno=rv=ERANGE;
++ goto DONE;
+ }
+ line=buf;
+ buf+=BUFSIZ+1;
+ buflen-=BUFSIZ+1;
+
+ if (protof == NULL && (protof = fopen(_PATH_PROTOCOLS, "r" )) == NULL) {
+- UNLOCK;
+- return errno;
++ rv=errno;
++ goto DONE;
+ }
+-again:
++ again:
+ if ((p = fgets(line, BUFSIZ, protof)) == NULL) {
+- UNLOCK;
+- return TRY_AGAIN;
++ rv=TRY_AGAIN;
++ goto DONE;
+ }
+
+ if (*p == '#')
+- goto again;
++ goto again;
+ cp = strpbrk(p, "#\n");
+ if (cp == NULL)
+- goto again;
++ goto again;
+ *cp = '\0';
+ result_buf->p_name = p;
+ cp = strpbrk(p, " \t");
+ if (cp == NULL)
+- goto again;
++ goto again;
+ *cp++ = '\0';
+ while (*cp == ' ' || *cp == '\t')
+- cp++;
++ cp++;
+ p = strpbrk(cp, " \t");
+ if (p != NULL)
+- *p++ = '\0';
++ *p++ = '\0';
+ result_buf->p_proto = atoi(cp);
+ q = result_buf->p_aliases = proto_aliases;
+ if (p != NULL) {
+- cp = p;
+- while (cp && *cp) {
+- if (*cp == ' ' || *cp == '\t') {
+- cp++;
+- continue;
+- }
+- if (q < &proto_aliases[MAXALIASES - 1])
+- *q++ = cp;
+- cp = strpbrk(cp, " \t");
+- if (cp != NULL)
+- *cp++ = '\0';
+- }
++ cp = p;
++ while (cp && *cp) {
++ if (*cp == ' ' || *cp == '\t') {
++ cp++;
++ continue;
++ }
++ if (q < &proto_aliases[MAXALIASES - 1])
++ *q++ = cp;
++ cp = strpbrk(cp, " \t");
++ if (cp != NULL)
++ *cp++ = '\0';
++ }
+ }
+ *q = NULL;
+ *result=result_buf;
+- UNLOCK;
+- return 0;
++
++ rv = 0;
++
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(mylock);
++ return rv;
+ }
+
+ struct protoent * getprotoent(void)
+@@ -201,26 +198,26 @@ struct protoent * getprotoent(void)
+
+
+ int getprotobyname_r(const char *name,
+- struct protoent *result_buf,
+- char *buf, size_t buflen,
+- struct protoent **result)
++ struct protoent *result_buf,
++ char *buf, size_t buflen,
++ struct protoent **result)
+ {
+ register char **cp;
+ int ret;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ setprotoent(proto_stayopen);
+ while (!(ret=getprotoent_r(result_buf, buf, buflen, result))) {
+- if (strcmp(result_buf->p_name, name) == 0)
+- break;
+- for (cp = result_buf->p_aliases; *cp != 0; cp++)
+- if (strcmp(*cp, name) == 0)
+- goto found;
++ if (strcmp(result_buf->p_name, name) == 0)
++ break;
++ for (cp = result_buf->p_aliases; *cp != 0; cp++)
++ if (strcmp(*cp, name) == 0)
++ goto found;
+ }
+-found:
++ found:
+ if (!proto_stayopen)
+- endprotoent();
+- UNLOCK;
++ endprotoent();
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return *result?0:ret;
+ }
+
+@@ -236,20 +233,20 @@ struct protoent * getprotobyname(const c
+
+
+ int getprotobynumber_r (int proto_num,
+- struct protoent *result_buf,
+- char *buf, size_t buflen,
+- struct protoent **result)
++ struct protoent *result_buf,
++ char *buf, size_t buflen,
++ struct protoent **result)
+ {
+ int ret;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ setprotoent(proto_stayopen);
+ while (!(ret=getprotoent_r(result_buf, buf, buflen, result)))
+- if (result_buf->p_proto == proto_num)
+- break;
++ if (result_buf->p_proto == proto_num)
++ break;
+ if (!proto_stayopen)
+- endprotoent();
+- UNLOCK;
++ endprotoent();
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return *result?0:ret;
+ }
+
+diff --git a/libc/inet/getservice.c b/libc/inet/getservice.c
+index cbe5c50..b666057 100644
+--- a/libc/inet/getservice.c
++++ b/libc/inet/getservice.c
+@@ -65,20 +65,9 @@
+ #include <arpa/inet.h>
+ #include <errno.h>
+
++#include <bits/uClibc_mutex.h>
+
+-
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
+-
+-
+-
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+
+ #define MAXALIASES 35
+ #define SBUFSIZE (BUFSIZ + 1 + (sizeof(char *) * MAXALIASES))
+@@ -91,32 +80,32 @@ static int serv_stayopen;
+ static void __initbuf(void)
+ {
+ if (!servbuf) {
+- servbuf = malloc(SBUFSIZE);
+- if (!servbuf)
+- abort();
++ servbuf = malloc(SBUFSIZE);
++ if (!servbuf)
++ abort();
+ }
+ }
+
+ void setservent(int f)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (servf == NULL)
+- servf = fopen(_PATH_SERVICES, "r" );
++ servf = fopen(_PATH_SERVICES, "r" );
+ else
+- rewind(servf);
++ rewind(servf);
+ serv_stayopen |= f;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ void endservent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (servf) {
+- fclose(servf);
+- servf = NULL;
++ fclose(servf);
++ servf = NULL;
+ }
+ serv_stayopen = 0;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ struct servent * getservent(void)
+@@ -149,127 +138,129 @@ struct servent * getservbyport(int port,
+ }
+
+ int getservent_r(struct servent * result_buf,
+- char * buf, size_t buflen,
+- struct servent ** result)
++ char * buf, size_t buflen,
++ struct servent ** result)
+ {
+ char *p;
+ register char *cp, **q;
+ char **serv_aliases;
+ char *line;
++ int rv;
+
+ *result=NULL;
+
+ if (buflen < sizeof(*serv_aliases)*MAXALIASES) {
+- errno=ERANGE;
+- return errno;
++ errno=ERANGE;
++ return errno;
+ }
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ serv_aliases=(char **)buf;
+ buf+=sizeof(*serv_aliases)*MAXALIASES;
+ buflen-=sizeof(*serv_aliases)*MAXALIASES;
+
+ if (buflen < BUFSIZ+1) {
+- UNLOCK;
+- errno=ERANGE;
+- return errno;
++ errno=rv=ERANGE;
++ goto DONE;
+ }
+ line=buf;
+ buf+=BUFSIZ+1;
+ buflen-=BUFSIZ+1;
+
+ if (servf == NULL && (servf = fopen(_PATH_SERVICES, "r" )) == NULL) {
+- UNLOCK;
+- errno=EIO;
+- return errno;
++ errno=rv=EIO;
++ goto DONE;
+ }
+-again:
++ again:
+ if ((p = fgets(line, BUFSIZ, servf)) == NULL) {
+- UNLOCK;
+- errno=EIO;
+- return errno;
++ errno=rv=EIO;
++ goto DONE;
+ }
+ if (*p == '#')
+- goto again;
++ goto again;
+ cp = strpbrk(p, "#\n");
+ if (cp == NULL)
+- goto again;
++ goto again;
+ *cp = '\0';
+ result_buf->s_name = p;
+ p = strpbrk(p, " \t");
+ if (p == NULL)
+- goto again;
++ goto again;
+ *p++ = '\0';
+ while (*p == ' ' || *p == '\t')
+- p++;
++ p++;
+ cp = strpbrk(p, ",/");
+ if (cp == NULL)
+- goto again;
++ goto again;
+ *cp++ = '\0';
+ result_buf->s_port = htons((u_short)atoi(p));
+ result_buf->s_proto = cp;
+ q = result_buf->s_aliases = serv_aliases;
+ cp = strpbrk(cp, " \t");
+ if (cp != NULL)
+- *cp++ = '\0';
++ *cp++ = '\0';
+ while (cp && *cp) {
+- if (*cp == ' ' || *cp == '\t') {
+- cp++;
+- continue;
+- }
+- if (q < &serv_aliases[MAXALIASES - 1])
+- *q++ = cp;
+- cp = strpbrk(cp, " \t");
+- if (cp != NULL)
+- *cp++ = '\0';
++ if (*cp == ' ' || *cp == '\t') {
++ cp++;
++ continue;
++ }
++ if (q < &serv_aliases[MAXALIASES - 1])
++ *q++ = cp;
++ cp = strpbrk(cp, " \t");
++ if (cp != NULL)
++ *cp++ = '\0';
+ }
+ *q = NULL;
+ *result=result_buf;
+- UNLOCK;
+- return 0;
++
++ rv = 0;
++
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(mylock);
++ return rv;
+ }
+
+ int getservbyname_r(const char *name, const char *proto,
+- struct servent * result_buf, char * buf, size_t buflen,
+- struct servent ** result)
++ struct servent * result_buf, char * buf, size_t buflen,
++ struct servent ** result)
+ {
+ register char **cp;
+ int ret;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ setservent(serv_stayopen);
+ while (!(ret=getservent_r(result_buf, buf, buflen, result))) {
+- if (strcmp(name, result_buf->s_name) == 0)
+- goto gotname;
+- for (cp = result_buf->s_aliases; *cp; cp++)
+- if (strcmp(name, *cp) == 0)
+- goto gotname;
+- continue;
+-gotname:
+- if (proto == 0 || strcmp(result_buf->s_proto, proto) == 0)
+- break;
++ if (strcmp(name, result_buf->s_name) == 0)
++ goto gotname;
++ for (cp = result_buf->s_aliases; *cp; cp++)
++ if (strcmp(name, *cp) == 0)
++ goto gotname;
++ continue;
++ gotname:
++ if (proto == 0 || strcmp(result_buf->s_proto, proto) == 0)
++ break;
+ }
+ if (!serv_stayopen)
+- endservent();
+- UNLOCK;
++ endservent();
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return *result?0:ret;
+ }
+
+ int getservbyport_r(int port, const char *proto,
+- struct servent * result_buf, char * buf,
+- size_t buflen, struct servent ** result)
++ struct servent * result_buf, char * buf,
++ size_t buflen, struct servent ** result)
+ {
+ int ret;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ setservent(serv_stayopen);
+ while (!(ret=getservent_r(result_buf, buf, buflen, result))) {
+- if (result_buf->s_port != port)
+- continue;
+- if (proto == 0 || strcmp(result_buf->s_proto, proto) == 0)
+- break;
++ if (result_buf->s_port != port)
++ continue;
++ if (proto == 0 || strcmp(result_buf->s_proto, proto) == 0)
++ break;
+ }
+ if (!serv_stayopen)
+- endservent();
+- UNLOCK;
++ endservent();
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return *result?0:ret;
+ }
+diff --git a/libc/inet/resolv.c b/libc/inet/resolv.c
+index 27b60ef..0f583ab 100644
+--- a/libc/inet/resolv.c
++++ b/libc/inet/resolv.c
+@@ -7,7 +7,7 @@
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+-*/
++ */
+
+ /*
+ * Portions Copyright (c) 1985, 1993
+@@ -153,6 +153,11 @@
+ #include <sys/utsname.h>
+ #include <sys/un.h>
+
++#include <bits/uClibc_mutex.h>
++
++__UCLIBC_MUTEX_EXTERN(__resolv_lock);
++
++
+ #define MAX_RECURSE 5
+ #define REPLY_TIMEOUT 10
+ #define MAX_RETRIES 3
+@@ -180,18 +185,6 @@ extern char * __nameserver[MAX_SERVERS];
+ extern int __searchdomains;
+ extern char * __searchdomain[MAX_SEARCH];
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-extern pthread_mutex_t __resolv_lock;
+-# define BIGLOCK __pthread_mutex_lock(&__resolv_lock)
+-# define BIGUNLOCK __pthread_mutex_unlock(&__resolv_lock);
+-#else
+-# define BIGLOCK
+-# define BIGUNLOCK
+-#endif
+-
+-
+-
+ /* Structs */
+ struct resolv_header {
+ int id;
+@@ -229,49 +222,49 @@ enum etc_hosts_action {
+
+ /* function prototypes */
+ extern int __get_hosts_byname_r(const char * name, int type,
+- struct hostent * result_buf,
+- char * buf, size_t buflen,
+- struct hostent ** result,
+- int * h_errnop);
++ struct hostent * result_buf,
++ char * buf, size_t buflen,
++ struct hostent ** result,
++ int * h_errnop);
+ extern int __get_hosts_byaddr_r(const char * addr, int len, int type,
+- struct hostent * result_buf,
+- char * buf, size_t buflen,
+- struct hostent ** result,
+- int * h_errnop);
++ struct hostent * result_buf,
++ char * buf, size_t buflen,
++ struct hostent ** result,
++ int * h_errnop);
+ extern void __open_etc_hosts(FILE **fp);
+ extern int __read_etc_hosts_r(FILE *fp, const char * name, int type,
+- enum etc_hosts_action action,
+- struct hostent * result_buf,
+- char * buf, size_t buflen,
+- struct hostent ** result,
+- int * h_errnop);
++ enum etc_hosts_action action,
++ struct hostent * result_buf,
++ char * buf, size_t buflen,
++ struct hostent ** result,
++ int * h_errnop);
+ extern int __dns_lookup(const char * name, int type, int nscount,
+- char ** nsip, unsigned char ** outpacket, struct resolv_answer * a);
++ char ** nsip, unsigned char ** outpacket, struct resolv_answer * a);
+
+ extern int __encode_dotted(const char * dotted, unsigned char * dest, int maxlen);
+ extern int __decode_dotted(const unsigned char * message, int offset,
+- char * dest, int maxlen);
++ char * dest, int maxlen);
+ extern int __length_dotted(const unsigned char * message, int offset);
+ extern int __encode_header(struct resolv_header * h, unsigned char * dest, int maxlen);
+ extern int __decode_header(unsigned char * data, struct resolv_header * h);
+ extern int __encode_question(struct resolv_question * q,
+- unsigned char * dest, int maxlen);
++ unsigned char * dest, int maxlen);
+ extern int __decode_question(unsigned char * message, int offset,
+- struct resolv_question * q);
++ struct resolv_question * q);
+ extern int __encode_answer(struct resolv_answer * a,
+- unsigned char * dest, int maxlen);
++ unsigned char * dest, int maxlen);
+ extern int __decode_answer(unsigned char * message, int offset,
+- struct resolv_answer * a);
++ struct resolv_answer * a);
+ extern int __length_question(unsigned char * message, int offset);
+ extern int __open_nameservers(void);
+ extern void __close_nameservers(void);
+ extern int __dn_expand(const u_char *, const u_char *, const u_char *,
+- char *, int);
++ char *, int);
+ extern int __ns_name_uncompress(const u_char *, const u_char *,
+- const u_char *, char *, size_t);
++ const u_char *, char *, size_t);
+ extern int __ns_name_ntop(const u_char *, char *, size_t);
+ extern int __ns_name_unpack(const u_char *, const u_char *, const u_char *,
+- u_char *, size_t);
++ u_char *, size_t);
+
+
+ #ifdef L_encodeh
+@@ -361,7 +354,7 @@ int __encode_dotted(const char *dotted,
+ This routine understands compressed data. */
+
+ int __decode_dotted(const unsigned char *data, int offset,
+- char *dest, int maxlen)
++ char *dest, int maxlen)
+ {
+ int l;
+ int measure = 1;
+@@ -435,7 +428,7 @@ int __length_dotted(const unsigned char
+
+ #ifdef L_encodeq
+ int __encode_question(struct resolv_question *q,
+- unsigned char *dest, int maxlen)
++ unsigned char *dest, int maxlen)
+ {
+ int i;
+
+@@ -460,7 +453,7 @@ int __encode_question(struct resolv_ques
+
+ #ifdef L_decodeq
+ int __decode_question(unsigned char *message, int offset,
+- struct resolv_question *q)
++ struct resolv_question *q)
+ {
+ char temp[256];
+ int i;
+@@ -525,7 +518,7 @@ int __encode_answer(struct resolv_answer
+
+ #ifdef L_decodea
+ int __decode_answer(unsigned char *message, int offset,
+- struct resolv_answer *a)
++ struct resolv_answer *a)
+ {
+ char temp[256];
+ int i;
+@@ -557,11 +550,11 @@ int __decode_answer(unsigned char *messa
+
+ #ifdef L_encodep
+ int __encode_packet(struct resolv_header *h,
+- struct resolv_question **q,
+- struct resolv_answer **an,
+- struct resolv_answer **ns,
+- struct resolv_answer **ar,
+- unsigned char *dest, int maxlen)
++ struct resolv_question **q,
++ struct resolv_answer **an,
++ struct resolv_answer **ns,
++ struct resolv_answer **ar,
++ unsigned char *dest, int maxlen)
+ {
+ int i, total = 0;
+ int j;
+@@ -621,7 +614,7 @@ int __decode_packet(unsigned char *data,
+
+ #ifdef L_formquery
+ int __form_query(int id, const char *name, int type, unsigned char *packet,
+- int maxlen)
++ int maxlen)
+ {
+ struct resolv_header h;
+ struct resolv_question q;
+@@ -649,14 +642,7 @@ int __form_query(int id, const char *nam
+
+ #ifdef L_dnslookup
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ /* Just for the record, having to lock __dns_lookup() just for these two globals
+ * is pretty lame. I think these two variables can probably be de-global-ized,
+@@ -665,7 +651,7 @@ static pthread_mutex_t mylock = PTHREAD_
+ static int ns=0, id=1;
+
+ int __dns_lookup(const char *name, int type, int nscount, char **nsip,
+- unsigned char **outpacket, struct resolv_answer *a)
++ unsigned char **outpacket, struct resolv_answer *a)
+ {
+ int i, j, len, fd, pos, rc;
+ struct timeval tv;
+@@ -693,10 +679,10 @@ int __dns_lookup(const char *name, int t
+ DPRINTF("Looking up type %d answer for '%s'\n", type, name);
+
+ /* Mess with globals while under lock */
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ local_ns = ns % nscount;
+ local_id = id;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+
+ while (retries < MAX_RETRIES) {
+ if (fd != -1)
+@@ -722,13 +708,13 @@ int __dns_lookup(const char *name, int t
+
+ strncpy(lookup,name,MAXDNAME);
+ if (variant >= 0) {
+- BIGLOCK;
+- if (variant < __searchdomains) {
+- strncat(lookup,".", MAXDNAME);
+- strncat(lookup,__searchdomain[variant], MAXDNAME);
+- }
+- BIGUNLOCK;
+- }
++ __UCLIBC_MUTEX_LOCK(__resolv_lock);
++ if (variant < __searchdomains) {
++ strncat(lookup,".", MAXDNAME);
++ strncat(lookup,__searchdomain[variant], MAXDNAME);
++ }
++ __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
++ }
+ DPRINTF("lookup name: %s\n", lookup);
+ q.dotted = (char *)lookup;
+ q.qtype = type;
+@@ -750,7 +736,7 @@ int __dns_lookup(const char *name, int t
+ fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ #endif
+ if (fd < 0) {
+- retries++;
++ retries++;
+ continue;
+ }
+
+@@ -772,11 +758,11 @@ int __dns_lookup(const char *name, int t
+ #endif
+ if (rc < 0) {
+ if (errno == ENETUNREACH) {
+- /* routing error, presume not transient */
+- goto tryall;
++ /* routing error, presume not transient */
++ goto tryall;
+ } else
+- /* retry */
+- retries++;
++ /* retry */
++ retries++;
+ continue;
+ }
+
+@@ -838,55 +824,55 @@ int __dns_lookup(const char *name, int t
+
+ first_answer = 1;
+ for (j=0;j<h.ancount;j++,pos += i)
+- {
+- i = __decode_answer(packet, pos, &ma);
++ {
++ i = __decode_answer(packet, pos, &ma);
+
+- if (i<0) {
+- DPRINTF("failed decode %d\n", i);
+- goto again;
+- }
++ if (i<0) {
++ DPRINTF("failed decode %d\n", i);
++ goto again;
++ }
+
+- if ( first_answer )
+- {
+- ma.buf = a->buf;
+- ma.buflen = a->buflen;
+- ma.add_count = a->add_count;
+- memcpy(a, &ma, sizeof(ma));
+- if (a->atype != T_SIG && (0 == a->buf || (type != T_A && type != T_AAAA)))
+- {
+- break;
+- }
+- if (a->atype != type)
+- {
+- free(a->dotted);
+- continue;
+- }
+- a->add_count = h.ancount - j - 1;
+- if ((a->rdlength + sizeof(struct in_addr*)) * a->add_count > a->buflen)
+- {
+- break;
+- }
+- a->add_count = 0;
+- first_answer = 0;
+- }
+- else
+- {
+- free(ma.dotted);
+- if (ma.atype != type)
+- {
+- continue;
+- }
+- if (a->rdlength != ma.rdlength)
+- {
+- free(a->dotted);
+- DPRINTF("Answer address len(%u) differs from original(%u)\n",
+- ma.rdlength, a->rdlength);
+- goto again;
++ if ( first_answer )
++ {
++ ma.buf = a->buf;
++ ma.buflen = a->buflen;
++ ma.add_count = a->add_count;
++ memcpy(a, &ma, sizeof(ma));
++ if (a->atype != T_SIG && (0 == a->buf || (type != T_A && type != T_AAAA)))
++ {
++ break;
++ }
++ if (a->atype != type)
++ {
++ free(a->dotted);
++ continue;
++ }
++ a->add_count = h.ancount - j - 1;
++ if ((a->rdlength + sizeof(struct in_addr*)) * a->add_count > a->buflen)
++ {
++ break;
++ }
++ a->add_count = 0;
++ first_answer = 0;
++ }
++ else
++ {
++ free(ma.dotted);
++ if (ma.atype != type)
++ {
++ continue;
++ }
++ if (a->rdlength != ma.rdlength)
++ {
++ free(a->dotted);
++ DPRINTF("Answer address len(%u) differs from original(%u)\n",
++ ma.rdlength, a->rdlength);
++ goto again;
++ }
++ memcpy(a->buf + (a->add_count * ma.rdlength), ma.rdata, ma.rdlength);
++ ++a->add_count;
++ }
+ }
+- memcpy(a->buf + (a->add_count * ma.rdlength), ma.rdata, ma.rdlength);
+- ++a->add_count;
+- }
+- }
+
+ DPRINTF("Answer name = |%s|\n", a->dotted);
+ DPRINTF("Answer type = |%d|\n", a->atype);
+@@ -900,48 +886,48 @@ int __dns_lookup(const char *name, int t
+ free(lookup);
+
+ /* Mess with globals while under lock */
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ ns = local_ns;
+ id = local_id;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+
+ return (len); /* success! */
+
+- tryall:
++ tryall:
+ /* if there are other nameservers, give them a go,
+ otherwise return with error */
+ {
+ variant = -1;
+- local_ns = (local_ns + 1) % nscount;
+- if (local_ns == 0)
+- retries++;
++ local_ns = (local_ns + 1) % nscount;
++ if (local_ns == 0)
++ retries++;
+
+- continue;
++ continue;
+ }
+
+- again:
++ again:
+ /* if there are searchdomains, try them or fallback as passed */
+ {
+ int sdomains;
+- BIGLOCK;
++ __UCLIBC_MUTEX_LOCK(__resolv_lock);
+ sdomains=__searchdomains;
+- BIGUNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
+
+ if (variant < sdomains - 1) {
+- /* next search */
+- variant++;
++ /* next search */
++ variant++;
+ } else {
+- /* next server, first search */
+- local_ns = (local_ns + 1) % nscount;
+- if (local_ns == 0)
+- retries++;
++ /* next server, first search */
++ local_ns = (local_ns + 1) % nscount;
++ if (local_ns == 0)
++ retries++;
+
+- variant = -1;
++ variant = -1;
+ }
+ }
+ }
+
+-fail:
++ fail:
+ if (fd != -1)
+ close(fd);
+ if (lookup)
+@@ -951,10 +937,10 @@ fail:
+ h_errno = NETDB_INTERNAL;
+ /* Mess with globals while under lock */
+ if (local_ns != -1) {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ ns = local_ns;
+ id = local_id;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+ return -1;
+ }
+@@ -966,9 +952,8 @@ int __nameservers;
+ char * __nameserver[MAX_SERVERS];
+ int __searchdomains;
+ char * __searchdomain[MAX_SEARCH];
+-#ifdef __UCLIBC_HAS_THREADS__
+-pthread_mutex_t __resolv_lock = PTHREAD_MUTEX_INITIALIZER;
+-#endif
++
++__UCLIBC_MUTEX_INIT(__resolv_lock, PTHREAD_MUTEX_INITIALIZER);
+
+ /*
+ * we currently read formats not quite the same as that on normal
+@@ -982,60 +967,63 @@ int __open_nameservers()
+ #define RESOLV_ARGS 5
+ char szBuffer[128], *p, *argv[RESOLV_ARGS];
+ int argc;
++ int rv = 0;
+
+- BIGLOCK;
++ __UCLIBC_MUTEX_LOCK(__resolv_lock);
+ if (__nameservers > 0) {
+- BIGUNLOCK;
+- return 0;
++ goto DONE;
+ }
+
+ if ((fp = fopen("/etc/resolv.conf", "r")) ||
+- (fp = fopen("/etc/config/resolv.conf", "r")))
+- {
+-
+- while (fgets(szBuffer, sizeof(szBuffer), fp) != NULL) {
++ (fp = fopen("/etc/config/resolv.conf", "r")))
++ {
+
+- for (p = szBuffer; *p && isspace(*p); p++)
+- /* skip white space */;
+- if (*p == '\0' || *p == '\n' || *p == '#') /* skip comments etc */
+- continue;
+- argc = 0;
+- while (*p && argc < RESOLV_ARGS) {
+- argv[argc++] = p;
+- while (*p && !isspace(*p) && *p != '\n')
+- p++;
+- while (*p && (isspace(*p) || *p == '\n')) /* remove spaces */
+- *p++ = '\0';
+- }
++ while (fgets(szBuffer, sizeof(szBuffer), fp) != NULL) {
+
+- if (strcmp(argv[0], "nameserver") == 0) {
+- for (i = 1; i < argc && __nameservers < MAX_SERVERS; i++) {
+- __nameserver[__nameservers++] = strdup(argv[i]);
+- DPRINTF("adding nameserver %s\n", argv[i]);
++ for (p = szBuffer; *p && isspace(*p); p++)
++ /* skip white space */;
++ if (*p == '\0' || *p == '\n' || *p == '#') /* skip comments etc */
++ continue;
++ argc = 0;
++ while (*p && argc < RESOLV_ARGS) {
++ argv[argc++] = p;
++ while (*p && !isspace(*p) && *p != '\n')
++ p++;
++ while (*p && (isspace(*p) || *p == '\n')) /* remove spaces */
++ *p++ = '\0';
+ }
+- }
+
+- /* domain and search are mutually exclusive, the last one wins */
+- if (strcmp(argv[0],"domain")==0 || strcmp(argv[0],"search")==0) {
+- while (__searchdomains > 0) {
+- free(__searchdomain[--__searchdomains]);
+- __searchdomain[__searchdomains] = NULL;
++ if (strcmp(argv[0], "nameserver") == 0) {
++ for (i = 1; i < argc && __nameservers < MAX_SERVERS; i++) {
++ __nameserver[__nameservers++] = strdup(argv[i]);
++ DPRINTF("adding nameserver %s\n", argv[i]);
++ }
+ }
+- for (i=1; i < argc && __searchdomains < MAX_SEARCH; i++) {
+- __searchdomain[__searchdomains++] = strdup(argv[i]);
+- DPRINTF("adding search %s\n", argv[i]);
++
++ /* domain and search are mutually exclusive, the last one wins */
++ if (strcmp(argv[0],"domain")==0 || strcmp(argv[0],"search")==0) {
++ while (__searchdomains > 0) {
++ free(__searchdomain[--__searchdomains]);
++ __searchdomain[__searchdomains] = NULL;
++ }
++ for (i=1; i < argc && __searchdomains < MAX_SEARCH; i++) {
++ __searchdomain[__searchdomains++] = strdup(argv[i]);
++ DPRINTF("adding search %s\n", argv[i]);
++ }
+ }
+ }
++ fclose(fp);
++ DPRINTF("nameservers = %d\n", __nameservers);
++ goto DONE;
+ }
+- fclose(fp);
+- DPRINTF("nameservers = %d\n", __nameservers);
+- BIGUNLOCK;
+- return 0;
+- }
+ DPRINTF("failed to open %s\n", "resolv.conf");
+ h_errno = NO_RECOVERY;
+- BIGUNLOCK;
+- return -1;
++
++ rv = -1;
++
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
++ return rv;
+ }
+ #endif
+
+@@ -1044,7 +1032,7 @@ int __open_nameservers()
+
+ void __close_nameservers(void)
+ {
+- BIGLOCK;
++ __UCLIBC_MUTEX_LOCK(__resolv_lock);
+ while (__nameservers > 0) {
+ free(__nameserver[--__nameservers]);
+ __nameserver[__nameservers] = NULL;
+@@ -1053,7 +1041,7 @@ void __close_nameservers(void)
+ free(__searchdomain[--__searchdomains]);
+ __searchdomain[__searchdomains] = NULL;
+ }
+- BIGUNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
+ }
+ #endif
+
+@@ -1063,8 +1051,8 @@ struct hostent *gethostbyname(const char
+ {
+ static struct hostent h;
+ static char buf[sizeof(struct in_addr) +
+- sizeof(struct in_addr *)*2 +
+- sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
++ sizeof(struct in_addr *)*2 +
++ sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
+ struct hostent *hp;
+
+ gethostbyname_r(name, &h, buf, sizeof(buf), &hp, &h_errno);
+@@ -1082,8 +1070,8 @@ struct hostent *gethostbyname2(const cha
+ #else /* __UCLIBC_HAS_IPV6__ */
+ static struct hostent h;
+ static char buf[sizeof(struct in6_addr) +
+- sizeof(struct in6_addr *)*2 +
+- sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
++ sizeof(struct in6_addr *)*2 +
++ sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
+ struct hostent *hp;
+
+ gethostbyname2_r(name, family, &h, buf, sizeof(buf), &hp, &h_errno);
+@@ -1119,7 +1107,7 @@ int res_init(void)
+ /** rp->rhook = NULL; **/
+ /** rp->_u._ext.nsinit = 0; **/
+
+- BIGLOCK;
++ __UCLIBC_MUTEX_LOCK(__resolv_lock);
+ if(__searchdomains) {
+ int i;
+ for(i=0; i<__searchdomains; i++) {
+@@ -1139,7 +1127,7 @@ int res_init(void)
+ }
+ }
+ rp->nscount = __nameservers;
+- BIGUNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
+
+ return(0);
+ }
+@@ -1175,10 +1163,10 @@ int res_query(const char *dname, int cla
+
+ memset((char *) &a, '\0', sizeof(a));
+
+- BIGLOCK;
++ __UCLIBC_MUTEX_LOCK(__resolv_lock);
+ __nameserversXX=__nameservers;
+ __nameserverXX=__nameserver;
+- BIGUNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
+ i = __dns_lookup(dname, type, __nameserversXX, __nameserverXX, &packet, &a);
+
+ if (i < 0) {
+@@ -1207,10 +1195,10 @@ int res_query(const char *dname, int cla
+ * is detected. Error code, if any, is left in h_errno.
+ */
+ int res_search(name, class, type, answer, anslen)
+- const char *name; /* domain name */
+- int class, type; /* class and type of query */
+- u_char *answer; /* buffer to put answer */
+- int anslen; /* size of answer */
++ const char *name; /* domain name */
++ int class, type; /* class and type of query */
++ u_char *answer; /* buffer to put answer */
++ int anslen; /* size of answer */
+ {
+ const char *cp, * const *domain;
+ HEADER *hp = (HEADER *)(void *)answer;
+@@ -1256,11 +1244,11 @@ int res_search(name, class, type, answer
+ int done = 0;
+
+ for (domain = (const char * const *)_res.dnsrch;
+- *domain && !done;
+- domain++) {
++ *domain && !done;
++ domain++) {
+
+ ret = res_querydomain(name, *domain, class, type,
+- answer, anslen);
++ answer, anslen);
+ if (ret > 0)
+ return (ret);
+
+@@ -1283,22 +1271,22 @@ int res_search(name, class, type, answer
+ }
+
+ switch (h_errno) {
+- case NO_DATA:
+- got_nodata++;
+- /* FALLTHROUGH */
+- case HOST_NOT_FOUND:
+- /* keep trying */
+- break;
+- case TRY_AGAIN:
+- if (hp->rcode == SERVFAIL) {
+- /* try next search element, if any */
+- got_servfail++;
++ case NO_DATA:
++ got_nodata++;
++ /* FALLTHROUGH */
++ case HOST_NOT_FOUND:
++ /* keep trying */
+ break;
+- }
+- /* FALLTHROUGH */
+- default:
+- /* anything else implies that we're done */
+- done++;
++ case TRY_AGAIN:
++ if (hp->rcode == SERVFAIL) {
++ /* try next search element, if any */
++ got_servfail++;
++ break;
++ }
++ /* FALLTHROUGH */
++ default:
++ /* anything else implies that we're done */
++ done++;
+ }
+ /*
+ * if we got here for some reason other than DNSRCH,
+@@ -1342,10 +1330,10 @@ int res_search(name, class, type, answer
+ * removing a trailing dot from name if domain is NULL.
+ */
+ int res_querydomain(name, domain, class, type, answer, anslen)
+- const char *name, *domain;
+- int class, type; /* class and type of query */
+- u_char *answer; /* buffer to put answer */
+- int anslen; /* size of answer */
++ const char *name, *domain;
++ int class, type; /* class and type of query */
++ u_char *answer; /* buffer to put answer */
++ int anslen; /* size of answer */
+ {
+ char nbuf[MAXDNAME];
+ const char *longname = nbuf;
+@@ -1359,7 +1347,7 @@ int res_querydomain(name, domain, class,
+ #ifdef DEBUG
+ if (_res.options & RES_DEBUG)
+ printf(";; res_querydomain(%s, %s, %d, %d)\n",
+- name, domain?domain:"<Nil>", class, type);
++ name, domain?domain:"<Nil>", class, type);
+ #endif
+ if (domain == NULL) {
+ /*
+@@ -1400,11 +1388,11 @@ struct hostent *gethostbyaddr (const voi
+ static struct hostent h;
+ static char buf[
+ #ifndef __UCLIBC_HAS_IPV6__
+- sizeof(struct in_addr) + sizeof(struct in_addr *)*2 +
++ sizeof(struct in_addr) + sizeof(struct in_addr *)*2 +
+ #else
+- sizeof(struct in6_addr) + sizeof(struct in6_addr *)*2 +
++ sizeof(struct in6_addr) + sizeof(struct in6_addr *)*2 +
+ #endif /* __UCLIBC_HAS_IPV6__ */
+- sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
++ sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
+ struct hostent *hp;
+
+ gethostbyaddr_r(addr, len, type, &h, buf, sizeof(buf), &hp, &h_errno);
+@@ -1425,11 +1413,11 @@ void __open_etc_hosts(FILE **fp)
+ }
+
+ int __read_etc_hosts_r(FILE * fp, const char * name, int type,
+- enum etc_hosts_action action,
+- struct hostent * result_buf,
+- char * buf, size_t buflen,
+- struct hostent ** result,
+- int * h_errnop)
++ enum etc_hosts_action action,
++ struct hostent * result_buf,
++ char * buf, size_t buflen,
++ struct hostent ** result,
++ int * h_errnop)
+ {
+ struct in_addr *in=NULL;
+ struct in_addr **addr_list=NULL;
+@@ -1576,56 +1564,49 @@ int __read_etc_hosts_r(FILE * fp, const
+
+ #ifdef L_gethostent
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ static int __stay_open;
+ static FILE * __gethostent_fp;
+
+ void endhostent (void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ __stay_open = 0;
+ if (__gethostent_fp) {
+- fclose(__gethostent_fp);
++ fclose(__gethostent_fp);
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ void sethostent (int stay_open)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ __stay_open = stay_open;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ int gethostent_r(struct hostent *result_buf, char *buf, size_t buflen,
+- struct hostent **result, int *h_errnop)
++ struct hostent **result, int *h_errnop)
+ {
+- int ret;
++ int ret = 0;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (__gethostent_fp == NULL) {
+- __open_etc_hosts(&__gethostent_fp);
+- if (__gethostent_fp == NULL) {
+- UNLOCK;
+- *result=NULL;
+- return 0;
+- }
++ __open_etc_hosts(&__gethostent_fp);
++ if (__gethostent_fp == NULL) {
++ *result=NULL;
++ goto DONE;
++ }
+ }
+
+ ret = __read_etc_hosts_r(__gethostent_fp, NULL, AF_INET, GETHOSTENT,
+- result_buf, buf, buflen, result, h_errnop);
++ result_buf, buf, buflen, result, h_errnop);
+ if (__stay_open==0) {
+- fclose(__gethostent_fp);
++ fclose(__gethostent_fp);
+ }
+- UNLOCK;
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return(ret);
+ }
+
+@@ -1634,17 +1615,17 @@ struct hostent *gethostent (void)
+ static struct hostent h;
+ static char buf[
+ #ifndef __UCLIBC_HAS_IPV6__
+- sizeof(struct in_addr) + sizeof(struct in_addr *)*2 +
++ sizeof(struct in_addr) + sizeof(struct in_addr *)*2 +
+ #else
+- sizeof(struct in6_addr) + sizeof(struct in6_addr *)*2 +
++ sizeof(struct in6_addr) + sizeof(struct in6_addr *)*2 +
+ #endif /* __UCLIBC_HAS_IPV6__ */
+- sizeof(char *)*(ALIAS_DIM) +
+- 80/*namebuffer*/ + 2/* margin */];
++ sizeof(char *)*(ALIAS_DIM) +
++ 80/*namebuffer*/ + 2/* margin */];
+ struct hostent *host;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ gethostent_r(&h, buf, sizeof(buf), &host, &h_errno);
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return(host);
+ }
+ #endif
+@@ -1652,23 +1633,23 @@ struct hostent *gethostent (void)
+ #ifdef L_get_hosts_byname_r
+
+ int __get_hosts_byname_r(const char * name, int type,
+- struct hostent * result_buf,
+- char * buf, size_t buflen,
+- struct hostent ** result,
+- int * h_errnop)
++ struct hostent * result_buf,
++ char * buf, size_t buflen,
++ struct hostent ** result,
++ int * h_errnop)
+ {
+ return(__read_etc_hosts_r(NULL, name, type, GET_HOSTS_BYNAME,
+- result_buf, buf, buflen, result, h_errnop));
++ result_buf, buf, buflen, result, h_errnop));
+ }
+ #endif
+
+ #ifdef L_get_hosts_byaddr_r
+
+ int __get_hosts_byaddr_r(const char * addr, int len, int type,
+- struct hostent * result_buf,
+- char * buf, size_t buflen,
+- struct hostent ** result,
+- int * h_errnop)
++ struct hostent * result_buf,
++ char * buf, size_t buflen,
++ struct hostent ** result,
++ int * h_errnop)
+ {
+ #ifndef __UCLIBC_HAS_IPV6__
+ char ipaddr[INET_ADDRSTRLEN];
+@@ -1677,24 +1658,24 @@ int __get_hosts_byaddr_r(const char * ad
+ #endif /* __UCLIBC_HAS_IPV6__ */
+
+ switch (type) {
+- case AF_INET:
+- if (len != sizeof(struct in_addr))
+- return 0;
+- break;
++ case AF_INET:
++ if (len != sizeof(struct in_addr))
++ return 0;
++ break;
+ #ifdef __UCLIBC_HAS_IPV6__
+- case AF_INET6:
+- if (len != sizeof(struct in6_addr))
+- return 0;
+- break;
++ case AF_INET6:
++ if (len != sizeof(struct in6_addr))
++ return 0;
++ break;
+ #endif /* __UCLIBC_HAS_IPV6__ */
+- default:
+- return 0;
++ default:
++ return 0;
+ }
+
+ inet_ntop(type, addr, ipaddr, sizeof(ipaddr));
+
+ return(__read_etc_hosts_r(NULL, ipaddr, type, GET_HOSTS_BYADDR,
+- result_buf, buf, buflen, result, h_errnop));
++ result_buf, buf, buflen, result, h_errnop));
+ }
+ #endif
+
+@@ -1705,8 +1686,8 @@ int __get_hosts_byaddr_r(const char * ad
+ #endif /* min */
+
+ int getnameinfo (const struct sockaddr *sa, socklen_t addrlen, char *host,
+- socklen_t hostlen, char *serv, socklen_t servlen,
+- unsigned int flags)
++ socklen_t hostlen, char *serv, socklen_t servlen,
++ unsigned int flags)
+ {
+ int serrno = errno;
+ int ok = 0;
+@@ -1720,167 +1701,167 @@ int getnameinfo (const struct sockaddr *
+ return EAI_FAMILY;
+
+ switch (sa->sa_family) {
+- case AF_LOCAL:
+- break;
+- case AF_INET:
+- if (addrlen < sizeof (struct sockaddr_in))
+- return EAI_FAMILY;
+- break;
++ case AF_LOCAL:
++ break;
++ case AF_INET:
++ if (addrlen < sizeof (struct sockaddr_in))
++ return EAI_FAMILY;
++ break;
+ #ifdef __UCLIBC_HAS_IPV6__
+- case AF_INET6:
+- if (addrlen < sizeof (struct sockaddr_in6))
+- return EAI_FAMILY;
+- break;
++ case AF_INET6:
++ if (addrlen < sizeof (struct sockaddr_in6))
++ return EAI_FAMILY;
++ break;
+ #endif /* __UCLIBC_HAS_IPV6__ */
+- default:
+- return EAI_FAMILY;
++ default:
++ return EAI_FAMILY;
+ }
+
+ if (host != NULL && hostlen > 0)
+ switch (sa->sa_family) {
+- case AF_INET:
++ case AF_INET:
+ #ifdef __UCLIBC_HAS_IPV6__
+- case AF_INET6:
++ case AF_INET6:
+ #endif /* __UCLIBC_HAS_IPV6__ */
+- if (!(flags & NI_NUMERICHOST)) {
++ if (!(flags & NI_NUMERICHOST)) {
+ #ifdef __UCLIBC_HAS_IPV6__
+- if (sa->sa_family == AF_INET6)
+- h = gethostbyaddr ((const void *)
+- &(((const struct sockaddr_in6 *) sa)->sin6_addr),
+- sizeof(struct in6_addr), AF_INET6);
+- else
+-#endif /* __UCLIBC_HAS_IPV6__ */
+- h = gethostbyaddr ((const void *) &(((const struct sockaddr_in *)sa)->sin_addr),
+- sizeof(struct in_addr), AF_INET);
+-
+- if (h) {
+- char *c;
+- if ((flags & NI_NOFQDN)
+- && (getdomainname (domain, sizeof(domain)) == 0)
+- && (c = strstr (h->h_name, domain))
+- && (c != h->h_name) && (*(--c) == '.')) {
+- strncpy (host, h->h_name,
+- min(hostlen, (size_t) (c - h->h_name)));
+- host[min(hostlen - 1, (size_t) (c - h->h_name))] = '\0';
+- ok = 1;
+- } else {
+- strncpy (host, h->h_name, hostlen);
+- ok = 1;
++ if (sa->sa_family == AF_INET6)
++ h = gethostbyaddr ((const void *)
++ &(((const struct sockaddr_in6 *) sa)->sin6_addr),
++ sizeof(struct in6_addr), AF_INET6);
++ else
++#endif /* __UCLIBC_HAS_IPV6__ */
++ h = gethostbyaddr ((const void *) &(((const struct sockaddr_in *)sa)->sin_addr),
++ sizeof(struct in_addr), AF_INET);
++
++ if (h) {
++ char *c;
++ if ((flags & NI_NOFQDN)
++ && (getdomainname (domain, sizeof(domain)) == 0)
++ && (c = strstr (h->h_name, domain))
++ && (c != h->h_name) && (*(--c) == '.')) {
++ strncpy (host, h->h_name,
++ min(hostlen, (size_t) (c - h->h_name)));
++ host[min(hostlen - 1, (size_t) (c - h->h_name))] = '\0';
++ ok = 1;
++ } else {
++ strncpy (host, h->h_name, hostlen);
++ ok = 1;
++ }
+ }
+- }
+- }
++ }
+
+- if (!ok) {
+- if (flags & NI_NAMEREQD) {
+- errno = serrno;
+- return EAI_NONAME;
+- } else {
+- const char *c;
++ if (!ok) {
++ if (flags & NI_NAMEREQD) {
++ errno = serrno;
++ return EAI_NONAME;
++ } else {
++ const char *c;
+ #ifdef __UCLIBC_HAS_IPV6__
+- if (sa->sa_family == AF_INET6) {
+- const struct sockaddr_in6 *sin6p;
++ if (sa->sa_family == AF_INET6) {
++ const struct sockaddr_in6 *sin6p;
+
+- sin6p = (const struct sockaddr_in6 *) sa;
++ sin6p = (const struct sockaddr_in6 *) sa;
+
+- c = inet_ntop (AF_INET6,
+- (const void *) &sin6p->sin6_addr, host, hostlen);
++ c = inet_ntop (AF_INET6,
++ (const void *) &sin6p->sin6_addr, host, hostlen);
+ #if 0
+- /* Does scope id need to be supported? */
+- uint32_t scopeid;
+- scopeid = sin6p->sin6_scope_id;
+- if (scopeid != 0) {
+- /* Buffer is >= IFNAMSIZ+1. */
+- char scopebuf[IFNAMSIZ + 1];
+- char *scopeptr;
+- int ni_numericscope = 0;
+- size_t real_hostlen = __strnlen (host, hostlen);
+- size_t scopelen = 0;
+-
+- scopebuf[0] = SCOPE_DELIMITER;
+- scopebuf[1] = '\0';
+- scopeptr = &scopebuf[1];
+-
+- if (IN6_IS_ADDR_LINKLOCAL (&sin6p->sin6_addr)
+- || IN6_IS_ADDR_MC_LINKLOCAL (&sin6p->sin6_addr)) {
+- if (if_indextoname (scopeid, scopeptr) == NULL)
++ /* Does scope id need to be supported? */
++ uint32_t scopeid;
++ scopeid = sin6p->sin6_scope_id;
++ if (scopeid != 0) {
++ /* Buffer is >= IFNAMSIZ+1. */
++ char scopebuf[IFNAMSIZ + 1];
++ char *scopeptr;
++ int ni_numericscope = 0;
++ size_t real_hostlen = __strnlen (host, hostlen);
++ size_t scopelen = 0;
++
++ scopebuf[0] = SCOPE_DELIMITER;
++ scopebuf[1] = '\0';
++ scopeptr = &scopebuf[1];
++
++ if (IN6_IS_ADDR_LINKLOCAL (&sin6p->sin6_addr)
++ || IN6_IS_ADDR_MC_LINKLOCAL (&sin6p->sin6_addr)) {
++ if (if_indextoname (scopeid, scopeptr) == NULL)
++ ++ni_numericscope;
++ else
++ scopelen = strlen (scopebuf);
++ } else {
+ ++ni_numericscope;
+- else
+- scopelen = strlen (scopebuf);
+- } else {
+- ++ni_numericscope;
+- }
++ }
+
+- if (ni_numericscope)
+- scopelen = 1 + snprintf (scopeptr,
+- (scopebuf
+- + sizeof scopebuf
+- - scopeptr),
+- "%u", scopeid);
+-
+- if (real_hostlen + scopelen + 1 > hostlen)
+- return EAI_SYSTEM;
+- memcpy (host + real_hostlen, scopebuf, scopelen + 1);
+- }
++ if (ni_numericscope)
++ scopelen = 1 + snprintf (scopeptr,
++ (scopebuf
++ + sizeof scopebuf
++ - scopeptr),
++ "%u", scopeid);
++
++ if (real_hostlen + scopelen + 1 > hostlen)
++ return EAI_SYSTEM;
++ memcpy (host + real_hostlen, scopebuf, scopelen + 1);
++ }
+ #endif
+- } else
++ } else
+ #endif /* __UCLIBC_HAS_IPV6__ */
+- c = inet_ntop (AF_INET, (const void *)
+- &(((const struct sockaddr_in *) sa)->sin_addr),
+- host, hostlen);
+-
+- if (c == NULL) {
+- errno = serrno;
+- return EAI_SYSTEM;
++ c = inet_ntop (AF_INET, (const void *)
++ &(((const struct sockaddr_in *) sa)->sin_addr),
++ host, hostlen);
++
++ if (c == NULL) {
++ errno = serrno;
++ return EAI_SYSTEM;
++ }
+ }
++ ok = 1;
+ }
+- ok = 1;
+- }
+- break;
+-
+- case AF_LOCAL:
+- if (!(flags & NI_NUMERICHOST)) {
+- struct utsname utsname;
++ break;
+
+- if (!uname (&utsname)) {
+- strncpy (host, utsname.nodename, hostlen);
+- break;
++ case AF_LOCAL:
++ if (!(flags & NI_NUMERICHOST)) {
++ struct utsname utsname;
++
++ if (!uname (&utsname)) {
++ strncpy (host, utsname.nodename, hostlen);
++ break;
++ };
+ };
+- };
+
+- if (flags & NI_NAMEREQD) {
+- errno = serrno;
+- return EAI_NONAME;
+- }
++ if (flags & NI_NAMEREQD) {
++ errno = serrno;
++ return EAI_NONAME;
++ }
+
+- strncpy (host, "localhost", hostlen);
+- break;
++ strncpy (host, "localhost", hostlen);
++ break;
+
+- default:
+- return EAI_FAMILY;
+- }
++ default:
++ return EAI_FAMILY;
++ }
+
+ if (serv && (servlen > 0)) {
+ switch (sa->sa_family) {
+- case AF_INET:
++ case AF_INET:
+ #ifdef __UCLIBC_HAS_IPV6__
+- case AF_INET6:
++ case AF_INET6:
+ #endif /* __UCLIBC_HAS_IPV6__ */
+- if (!(flags & NI_NUMERICSERV)) {
+- struct servent *s;
+- s = getservbyport (((const struct sockaddr_in *) sa)->sin_port,
+- ((flags & NI_DGRAM) ? "udp" : "tcp"));
+- if (s) {
+- strncpy (serv, s->s_name, servlen);
+- break;
++ if (!(flags & NI_NUMERICSERV)) {
++ struct servent *s;
++ s = getservbyport (((const struct sockaddr_in *) sa)->sin_port,
++ ((flags & NI_DGRAM) ? "udp" : "tcp"));
++ if (s) {
++ strncpy (serv, s->s_name, servlen);
++ break;
++ }
+ }
+- }
+- snprintf (serv, servlen, "%d",
+- ntohs (((const struct sockaddr_in *) sa)->sin_port));
+- break;
++ snprintf (serv, servlen, "%d",
++ ntohs (((const struct sockaddr_in *) sa)->sin_port));
++ break;
+
+- case AF_LOCAL:
+- strncpy (serv, ((const struct sockaddr_un *) sa)->sun_path, servlen);
+- break;
++ case AF_LOCAL:
++ strncpy (serv, ((const struct sockaddr_un *) sa)->sun_path, servlen);
++ break;
+ }
+ }
+ if (host && (hostlen > 0))
+@@ -1896,10 +1877,10 @@ int getnameinfo (const struct sockaddr *
+ #ifdef L_gethostbyname_r
+
+ int gethostbyname_r(const char * name,
+- struct hostent * result_buf,
+- char * buf, size_t buflen,
+- struct hostent ** result,
+- int * h_errnop)
++ struct hostent * result_buf,
++ char * buf, size_t buflen,
++ struct hostent ** result,
++ int * h_errnop)
+ {
+ struct in_addr *in;
+ struct in_addr **addr_list;
+@@ -1921,7 +1902,7 @@ int gethostbyname_r(const char * name,
+ __set_errno(0); /* to check for missing /etc/hosts. */
+
+ if ((i=__get_hosts_byname_r(name, AF_INET, result_buf,
+- buf, buflen, result, h_errnop))==0)
++ buf, buflen, result, h_errnop))==0)
+ return i;
+ switch (*h_errnop) {
+ case HOST_NOT_FOUND:
+@@ -1983,60 +1964,60 @@ int gethostbyname_r(const char * name,
+
+ for (;;) {
+
+- BIGLOCK;
++ __UCLIBC_MUTEX_LOCK(__resolv_lock);
+ __nameserversXX=__nameservers;
+ __nameserverXX=__nameserver;
+- BIGUNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
+ a.buf = buf;
+ a.buflen = buflen;
+ a.add_count = 0;
+ i = __dns_lookup(name, T_A, __nameserversXX, __nameserverXX, &packet, &a);
+
+ if (i < 0) {
+- *h_errnop = HOST_NOT_FOUND;
+- DPRINTF("__dns_lookup\n");
+- return TRY_AGAIN;
++ *h_errnop = HOST_NOT_FOUND;
++ DPRINTF("__dns_lookup\n");
++ return TRY_AGAIN;
+ }
+
+ if ((a.rdlength + sizeof(struct in_addr*)) * a.add_count + 256 > buflen)
+- {
+- free(a.dotted);
+- free(packet);
+- *h_errnop = NETDB_INTERNAL;
+- DPRINTF("buffer too small for all addresses\n");
+- return ERANGE;
+- }
++ {
++ free(a.dotted);
++ free(packet);
++ *h_errnop = NETDB_INTERNAL;
++ DPRINTF("buffer too small for all addresses\n");
++ return ERANGE;
++ }
+ else if(a.add_count > 0)
+- {
+- memmove(buf - sizeof(struct in_addr*)*2, buf, a.add_count * a.rdlength);
+- addr_list = (struct in_addr**)(buf + a.add_count * a.rdlength);
+- addr_list[0] = in;
+- for (i = a.add_count-1; i>=0; --i)
+- addr_list[i+1] = (struct in_addr*)(buf - sizeof(struct in_addr*)*2 + a.rdlength * i);
+- addr_list[a.add_count + 1] = 0;
+- buflen -= (((char*)&(addr_list[a.add_count + 2])) - buf);
+- buf = (char*)&addr_list[a.add_count + 2];
+- }
++ {
++ memmove(buf - sizeof(struct in_addr*)*2, buf, a.add_count * a.rdlength);
++ addr_list = (struct in_addr**)(buf + a.add_count * a.rdlength);
++ addr_list[0] = in;
++ for (i = a.add_count-1; i>=0; --i)
++ addr_list[i+1] = (struct in_addr*)(buf - sizeof(struct in_addr*)*2 + a.rdlength * i);
++ addr_list[a.add_count + 1] = 0;
++ buflen -= (((char*)&(addr_list[a.add_count + 2])) - buf);
++ buf = (char*)&addr_list[a.add_count + 2];
++ }
+
+ strncpy(buf, a.dotted, buflen);
+ free(a.dotted);
+
+ if (a.atype == T_A) { /* ADDRESS */
+- memcpy(in, a.rdata, sizeof(*in));
+- result_buf->h_name = buf;
+- result_buf->h_addrtype = AF_INET;
+- result_buf->h_length = sizeof(*in);
+- result_buf->h_addr_list = (char **) addr_list;
++ memcpy(in, a.rdata, sizeof(*in));
++ result_buf->h_name = buf;
++ result_buf->h_addrtype = AF_INET;
++ result_buf->h_length = sizeof(*in);
++ result_buf->h_addr_list = (char **) addr_list;
+ #ifdef __UCLIBC_MJN3_ONLY__
+ #warning TODO -- generate the full list
+ #endif
+- result_buf->h_aliases = alias; /* TODO: generate the full list */
+- free(packet);
+- break;
++ result_buf->h_aliases = alias; /* TODO: generate the full list */
++ free(packet);
++ break;
+ } else {
+- free(packet);
+- *h_errnop=HOST_NOT_FOUND;
+- return TRY_AGAIN;
++ free(packet);
++ *h_errnop=HOST_NOT_FOUND;
++ return TRY_AGAIN;
+ }
+ }
+
+@@ -2049,14 +2030,14 @@ int gethostbyname_r(const char * name,
+ #ifdef L_gethostbyname2_r
+
+ int gethostbyname2_r(const char *name, int family,
+- struct hostent * result_buf,
+- char * buf, size_t buflen,
+- struct hostent ** result,
+- int * h_errnop)
++ struct hostent * result_buf,
++ char * buf, size_t buflen,
++ struct hostent ** result,
++ int * h_errnop)
+ {
+ #ifndef __UCLIBC_HAS_IPV6__
+ return family == (AF_INET)? gethostbyname_r(name, result_buf,
+- buf, buflen, result, h_errnop) : HOST_NOT_FOUND;
++ buf, buflen, result, h_errnop) : HOST_NOT_FOUND;
+ #else /* __UCLIBC_HAS_IPV6__ */
+ struct in6_addr *in;
+ struct in6_addr **addr_list;
+@@ -2084,7 +2065,7 @@ int gethostbyname2_r(const char *name, i
+ __set_errno(0); /* to check for missing /etc/hosts. */
+
+ if ((i=__get_hosts_byname_r(name, AF_INET, result_buf,
+- buf, buflen, result, h_errnop))==0)
++ buf, buflen, result, h_errnop))==0)
+ return i;
+ switch (*h_errnop) {
+ case HOST_NOT_FOUND:
+@@ -2137,10 +2118,10 @@ int gethostbyname2_r(const char *name, i
+ memset((char *) &a, '\0', sizeof(a));
+
+ for (;;) {
+- BIGLOCK;
+- __nameserversXX=__nameservers;
+- __nameserverXX=__nameserver;
+- BIGUNLOCK;
++ __UCLIBC_MUTEX_LOCK(__resolv_lock);
++ __nameserversXX=__nameservers;
++ __nameserverXX=__nameserver;
++ __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
+
+ i = __dns_lookup(buf, T_AAAA, __nameserversXX, __nameserverXX, &packet, &a);
+
+@@ -2190,10 +2171,10 @@ int gethostbyname2_r(const char *name, i
+
+ #ifdef L_gethostbyaddr_r
+ int gethostbyaddr_r (const void *addr, socklen_t len, int type,
+- struct hostent * result_buf,
+- char * buf, size_t buflen,
+- struct hostent ** result,
+- int * h_errnop)
++ struct hostent * result_buf,
++ char * buf, size_t buflen,
++ struct hostent ** result,
++ int * h_errnop)
+
+ {
+ struct in_addr *in;
+@@ -2234,7 +2215,7 @@ int gethostbyaddr_r (const void *addr, s
+
+ /* do /etc/hosts first */
+ if ((i=__get_hosts_byaddr_r(addr, len, type, result_buf,
+- buf, buflen, result, h_errnop))==0)
++ buf, buflen, result, h_errnop))==0)
+ return i;
+ switch (*h_errnop) {
+ case HOST_NOT_FOUND:
+@@ -2294,7 +2275,7 @@ int gethostbyaddr_r (const void *addr, s
+ addr_list[0] = in;
+
+ sprintf(buf, "%u.%u.%u.%u.in-addr.arpa",
+- tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]);
++ tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]);
+ #ifdef __UCLIBC_HAS_IPV6__
+ } else {
+ memcpy(in6->s6_addr, addr, len);
+@@ -2304,7 +2285,7 @@ int gethostbyaddr_r (const void *addr, s
+
+ for (i = len - 1; i >= 0; i--) {
+ qp += sprintf(qp, "%x.%x.", in6->s6_addr[i] & 0xf,
+- (in6->s6_addr[i] >> 4) & 0xf);
++ (in6->s6_addr[i] >> 4) & 0xf);
+ }
+ strcpy(qp, "ip6.int");
+ #endif /* __UCLIBC_HAS_IPV6__ */
+@@ -2314,10 +2295,10 @@ int gethostbyaddr_r (const void *addr, s
+
+ for (;;) {
+
+- BIGLOCK;
+- __nameserversXX=__nameservers;
+- __nameserverXX=__nameserver;
+- BIGUNLOCK;
++ __UCLIBC_MUTEX_LOCK(__resolv_lock);
++ __nameserversXX=__nameservers;
++ __nameserverXX=__nameserver;
++ __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
+ i = __dns_lookup(buf, T_PTR, __nameserversXX, __nameserverXX, &packet, &a);
+
+ if (i < 0) {
+@@ -2381,7 +2362,7 @@ int gethostbyaddr_r (const void *addr, s
+ * Return size of compressed name or -1 if there was an error.
+ */
+ int __dn_expand(const u_char *msg, const u_char *eom, const u_char *src,
+- char *dst, int dstsiz)
++ char *dst, int dstsiz)
+ {
+ int n = ns_name_uncompress(msg, eom, src, dst, (size_t)dstsiz);
+
+@@ -2401,7 +2382,7 @@ int __dn_expand(const u_char *msg, const
+ */
+ static int printable(int ch)
+ {
+- return (ch > 0x20 && ch < 0x7f);
++ return (ch > 0x20 && ch < 0x7f);
+ }
+
+ /*
+@@ -2413,18 +2394,18 @@ static int printable(int ch)
+ */
+ static int special(int ch)
+ {
+- switch (ch) {
++ switch (ch) {
+ case 0x22: /* '"' */
+ case 0x2E: /* '.' */
+ case 0x3B: /* ';' */
+ case 0x5C: /* '\\' */
+- /* Special modifiers in zone files. */
++ /* Special modifiers in zone files. */
+ case 0x40: /* '@' */
+ case 0x24: /* '$' */
+- return (1);
++ return (1);
+ default:
+- return (0);
+- }
++ return (0);
++ }
+ }
+
+ /*
+@@ -2436,7 +2417,7 @@ static int special(int ch)
+ * Root domain returns as "." not "".
+ */
+ int __ns_name_uncompress(const u_char *msg, const u_char *eom,
+- const u_char *src, char *dst, size_t dstsiz)
++ const u_char *src, char *dst, size_t dstsiz)
+ {
+ u_char tmp[NS_MAXCDNAME];
+ int n;
+@@ -2525,7 +2506,7 @@ int __ns_name_ntop(const u_char *src, ch
+ return (-1);
+ }
+ *dn++ = '\0';
+- return (dn - dst);
++ return (dn - dst);
+ }
+
+ /*
+@@ -2535,7 +2516,7 @@ int __ns_name_ntop(const u_char *src, ch
+ * -1 if it fails, or consumed octets if it succeeds.
+ */
+ int __ns_name_unpack(const u_char *msg, const u_char *eom, const u_char *src,
+- u_char *dst, size_t dstsiz)
++ u_char *dst, size_t dstsiz)
+ {
+ const u_char *srcp, *dstlim;
+ u_char *dstp;
+@@ -2554,46 +2535,46 @@ int __ns_name_unpack(const u_char *msg,
+ while ((n = *srcp++) != 0) {
+ /* Check for indirection. */
+ switch (n & NS_CMPRSFLGS) {
+- case 0:
+- /* Limit checks. */
+- if (dstp + n + 1 >= dstlim || srcp + n >= eom) {
+- __set_errno (EMSGSIZE);
+- return (-1);
+- }
+- checked += n + 1;
+- *dstp++ = n;
+- memcpy(dstp, srcp, n);
+- dstp += n;
+- srcp += n;
+- break;
++ case 0:
++ /* Limit checks. */
++ if (dstp + n + 1 >= dstlim || srcp + n >= eom) {
++ __set_errno (EMSGSIZE);
++ return (-1);
++ }
++ checked += n + 1;
++ *dstp++ = n;
++ memcpy(dstp, srcp, n);
++ dstp += n;
++ srcp += n;
++ break;
+
+- case NS_CMPRSFLGS:
+- if (srcp >= eom) {
+- __set_errno (EMSGSIZE);
+- return (-1);
+- }
+- if (len < 0)
+- len = srcp - src + 1;
+- srcp = msg + (((n & 0x3f) << 8) | (*srcp & 0xff));
+- if (srcp < msg || srcp >= eom) { /* Out of range. */
+- __set_errno (EMSGSIZE);
+- return (-1);
+- }
+- checked += 2;
+- /*
+- * Check for loops in the compressed name;
+- * if we've looked at the whole message,
+- * there must be a loop.
+- */
+- if (checked >= eom - msg) {
+- __set_errno (EMSGSIZE);
+- return (-1);
+- }
+- break;
++ case NS_CMPRSFLGS:
++ if (srcp >= eom) {
++ __set_errno (EMSGSIZE);
++ return (-1);
++ }
++ if (len < 0)
++ len = srcp - src + 1;
++ srcp = msg + (((n & 0x3f) << 8) | (*srcp & 0xff));
++ if (srcp < msg || srcp >= eom) { /* Out of range. */
++ __set_errno (EMSGSIZE);
++ return (-1);
++ }
++ checked += 2;
++ /*
++ * Check for loops in the compressed name;
++ * if we've looked at the whole message,
++ * there must be a loop.
++ */
++ if (checked >= eom - msg) {
++ __set_errno (EMSGSIZE);
++ return (-1);
++ }
++ break;
+
+- default:
+- __set_errno (EMSGSIZE);
+- return (-1); /* flag error */
++ default:
++ __set_errno (EMSGSIZE);
++ return (-1); /* flag error */
+ }
+ }
+ *dstp = '\0';
+diff --git a/libc/inet/rpc/create_xid.c b/libc/inet/rpc/create_xid.c
+index cbb961e..c86cbb4 100644
+--- a/libc/inet/rpc/create_xid.c
++++ b/libc/inet/rpc/create_xid.c
+@@ -27,15 +27,7 @@
+
+ /* The RPC code is not threadsafe, but new code should be threadsafe. */
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t createxid_lock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&createxid_lock)
+-# define UNLOCK __pthread_mutex_unlock(&createxid_lock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ static int is_initialized;
+ static struct drand48_data __rpc_lrand48_data;
+@@ -43,22 +35,22 @@ static struct drand48_data __rpc_lrand48
+ unsigned long
+ _create_xid (void)
+ {
+- unsigned long res;
++ unsigned long res;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+
+- if (!is_initialized)
+- {
+- struct timeval now;
++ if (!is_initialized)
++ {
++ struct timeval now;
+
+- gettimeofday (&now, (struct timezone *) 0);
+- srand48_r (now.tv_sec ^ now.tv_usec, &__rpc_lrand48_data);
+- is_initialized = 1;
+- }
++ gettimeofday (&now, (struct timezone *) 0);
++ srand48_r (now.tv_sec ^ now.tv_usec, &__rpc_lrand48_data);
++ is_initialized = 1;
++ }
+
+- lrand48_r (&__rpc_lrand48_data, &res);
++ lrand48_r (&__rpc_lrand48_data, &res);
+
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+
+- return res;
++ return res;
+ }
+diff --git a/libc/misc/dirent/closedir.c b/libc/misc/dirent/closedir.c
+index 068e2d3..56adb23 100644
+--- a/libc/misc/dirent/closedir.c
++++ b/libc/misc/dirent/closedir.c
+@@ -4,7 +4,6 @@
+ #include <unistd.h>
+ #include "dirstream.h"
+
+-
+ int closedir(DIR * dir)
+ {
+ int fd;
+@@ -19,14 +18,10 @@ int closedir(DIR * dir)
+ __set_errno(EBADF);
+ return -1;
+ }
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_lock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_LOCK(dir->dd_lock);
+ fd = dir->dd_fd;
+ dir->dd_fd = -1;
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_unlock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
+ free(dir->dd_buf);
+ free(dir);
+ return close(fd);
+diff --git a/libc/misc/dirent/dirstream.h b/libc/misc/dirent/dirstream.h
+index 2dd0264..bd721c5 100644
+--- a/libc/misc/dirent/dirstream.h
++++ b/libc/misc/dirent/dirstream.h
+@@ -26,9 +26,8 @@ Cambridge, MA 02139, USA. */
+
+ #include <features.h>
+ #include <sys/types.h>
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-#endif
++
++#include <bits/uClibc_mutex.h>
+
+ /* For now, syscall readdir () only supports one entry at a time. It
+ * will be changed in the future.
+@@ -63,11 +62,7 @@ struct __dirstream {
+ size_t dd_max;
+
+ /* lock */
+-#ifdef __UCLIBC_HAS_THREADS__
+- pthread_mutex_t dd_lock;
+-#else
+- void *dd_lock;
+-#endif
++ __UCLIBC_MUTEX(dd_lock);
+ }; /* stream data from opendir() */
+
+
+diff --git a/libc/misc/dirent/readdir.c b/libc/misc/dirent/readdir.c
+index 1f196e1..c55317a 100644
+--- a/libc/misc/dirent/readdir.c
++++ b/libc/misc/dirent/readdir.c
+@@ -5,7 +5,6 @@
+ #include <dirent.h>
+ #include "dirstream.h"
+
+-
+ struct dirent *readdir(DIR * dir)
+ {
+ ssize_t bytes;
+@@ -16,9 +15,7 @@ struct dirent *readdir(DIR * dir)
+ return NULL;
+ }
+
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_lock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_LOCK(dir->dd_lock);
+
+ do {
+ if (dir->dd_size <= dir->dd_nextloc) {
+@@ -44,8 +41,6 @@ struct dirent *readdir(DIR * dir)
+ } while (de->d_ino == 0);
+
+ all_done:
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_unlock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
+ return de;
+ }
+diff --git a/libc/misc/dirent/readdir64.c b/libc/misc/dirent/readdir64.c
+index f798c6f..6da3b0d 100644
+--- a/libc/misc/dirent/readdir64.c
++++ b/libc/misc/dirent/readdir64.c
+@@ -20,7 +20,6 @@
+ #include <dirent.h>
+ #include "dirstream.h"
+
+-
+ struct dirent64 *readdir64(DIR * dir)
+ {
+ ssize_t bytes;
+@@ -31,9 +30,7 @@ struct dirent64 *readdir64(DIR * dir)
+ return NULL;
+ }
+
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_lock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_LOCK(dir->dd_lock);
+
+ do {
+ if (dir->dd_size <= dir->dd_nextloc) {
+@@ -59,9 +56,7 @@ struct dirent64 *readdir64(DIR * dir)
+ } while (de->d_ino == 0);
+
+ all_done:
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_unlock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
+
+ return de;
+ }
+diff --git a/libc/misc/dirent/readdir64_r.c b/libc/misc/dirent/readdir64_r.c
+index da3564e..cc96eff 100644
+--- a/libc/misc/dirent/readdir64_r.c
++++ b/libc/misc/dirent/readdir64_r.c
+@@ -19,7 +19,6 @@
+ #include <dirent.h>
+ #include "dirstream.h"
+
+-
+ int readdir64_r(DIR *dir, struct dirent64 *entry, struct dirent64 **result)
+ {
+ int ret;
+@@ -32,21 +31,19 @@ int readdir64_r(DIR *dir, struct dirent6
+ }
+ de = NULL;
+
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_lock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_LOCK(dir->dd_lock);
+
+ do {
+ if (dir->dd_size <= dir->dd_nextloc) {
+- /* read dir->dd_max bytes of directory entries. */
+- bytes = __getdents64(dir->dd_fd, dir->dd_buf, dir->dd_max);
+- if (bytes <= 0) {
+- *result = NULL;
+- ret = errno;
+- goto all_done;
+- }
+- dir->dd_size = bytes;
+- dir->dd_nextloc = 0;
++ /* read dir->dd_max bytes of directory entries. */
++ bytes = __getdents64(dir->dd_fd, dir->dd_buf, dir->dd_max);
++ if (bytes <= 0) {
++ *result = NULL;
++ ret = errno;
++ goto all_done;
++ }
++ dir->dd_size = bytes;
++ dir->dd_nextloc = 0;
+ }
+
+ de = (struct dirent64 *) (((char *) dir->dd_buf) + dir->dd_nextloc);
+@@ -66,12 +63,10 @@ int readdir64_r(DIR *dir, struct dirent6
+ }
+ ret = 0;
+
+-all_done:
++ all_done:
+
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_unlock(&(dir->dd_lock));
+-#endif
+- return((de != NULL)? 0 : ret);
++ __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
++ return((de != NULL)? 0 : ret);
+ }
+ #endif /* __UCLIBC_HAS_LFS__ */
+
+diff --git a/libc/misc/dirent/readdir_r.c b/libc/misc/dirent/readdir_r.c
+index 245dcbd..aeccdd8 100644
+--- a/libc/misc/dirent/readdir_r.c
++++ b/libc/misc/dirent/readdir_r.c
+@@ -5,7 +5,6 @@
+ #include <dirent.h>
+ #include "dirstream.h"
+
+-
+ int readdir_r(DIR *dir, struct dirent *entry, struct dirent **result)
+ {
+ int ret;
+@@ -18,21 +17,19 @@ int readdir_r(DIR *dir, struct dirent *e
+ }
+ de = NULL;
+
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_lock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_LOCK(dir->dd_lock);
+
+ do {
+ if (dir->dd_size <= dir->dd_nextloc) {
+- /* read dir->dd_max bytes of directory entries. */
+- bytes = __getdents(dir->dd_fd, dir->dd_buf, dir->dd_max);
+- if (bytes <= 0) {
+- *result = NULL;
+- ret = errno;
+- goto all_done;
+- }
+- dir->dd_size = bytes;
+- dir->dd_nextloc = 0;
++ /* read dir->dd_max bytes of directory entries. */
++ bytes = __getdents(dir->dd_fd, dir->dd_buf, dir->dd_max);
++ if (bytes <= 0) {
++ *result = NULL;
++ ret = errno;
++ goto all_done;
++ }
++ dir->dd_size = bytes;
++ dir->dd_nextloc = 0;
+ }
+
+ de = (struct dirent *) (((char *) dir->dd_buf) + dir->dd_nextloc);
+@@ -52,10 +49,8 @@ int readdir_r(DIR *dir, struct dirent *e
+ }
+ ret = 0;
+
+-all_done:
++ all_done:
+
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_unlock(&(dir->dd_lock));
+-#endif
+- return((de != NULL)? 0 : ret);
++ __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
++ return((de != NULL)? 0 : ret);
+ }
+diff --git a/libc/misc/dirent/rewinddir.c b/libc/misc/dirent/rewinddir.c
+index 60ef71d..fe8fc2a 100644
+--- a/libc/misc/dirent/rewinddir.c
++++ b/libc/misc/dirent/rewinddir.c
+@@ -3,7 +3,6 @@
+ #include <unistd.h>
+ #include "dirstream.h"
+
+-
+ /* rewinddir() just does an lseek(fd,0,0) - see close for comments */
+ void rewinddir(DIR * dir)
+ {
+@@ -11,12 +10,8 @@ void rewinddir(DIR * dir)
+ __set_errno(EBADF);
+ return;
+ }
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_lock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_LOCK(dir->dd_lock);
+ lseek(dir->dd_fd, 0, SEEK_SET);
+ dir->dd_nextoff = dir->dd_nextloc = dir->dd_size = 0;
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_unlock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
+ }
+diff --git a/libc/misc/dirent/seekdir.c b/libc/misc/dirent/seekdir.c
+index 139f1e1..6d6f5f0 100644
+--- a/libc/misc/dirent/seekdir.c
++++ b/libc/misc/dirent/seekdir.c
+@@ -3,19 +3,14 @@
+ #include <unistd.h>
+ #include "dirstream.h"
+
+-
+ void seekdir(DIR * dir, long int offset)
+ {
+ if (!dir) {
+ __set_errno(EBADF);
+ return;
+ }
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_lock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_LOCK(dir->dd_lock);
+ dir->dd_nextoff = lseek(dir->dd_fd, offset, SEEK_SET);
+ dir->dd_size = dir->dd_nextloc = 0;
+-#ifdef __UCLIBC_HAS_THREADS__
+- __pthread_mutex_unlock(&(dir->dd_lock));
+-#endif
++ __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
+ }
+diff --git a/libc/misc/mntent/mntent.c b/libc/misc/mntent/mntent.c
+index d98a687..af6d848 100644
+--- a/libc/misc/mntent/mntent.c
++++ b/libc/misc/mntent/mntent.c
+@@ -3,15 +3,9 @@
+ #include <string.h>
+ #include <mntent.h>
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
++#include <bits/uClibc_mutex.h>
++
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ /* Reentrant version of getmntent. */
+ struct mntent *getmntent_r (FILE *filep,
+@@ -67,7 +61,7 @@ struct mntent *getmntent(FILE * filep)
+ struct mntent *tmp;
+ static char *buff = NULL;
+ static struct mntent mnt;
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+
+ if (!buff) {
+ buff = malloc(BUFSIZ);
+@@ -76,7 +70,7 @@ struct mntent *getmntent(FILE * filep)
+ }
+
+ tmp = getmntent_r(filep, &mnt, buff, BUFSIZ);
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return(tmp);
+ }
+
+diff --git a/libc/misc/pthread/weaks.c b/libc/misc/pthread/weaks.c
+index 89c2611..c27bd10 100644
+--- a/libc/misc/pthread/weaks.c
++++ b/libc/misc/pthread/weaks.c
+@@ -21,6 +21,7 @@
+ #include <limits.h>
+ #include <stdlib.h>
+
++static void __pthread_return_void __P ((void));
+ static int __pthread_return_0 __P ((void));
+ static int __pthread_return_1 __P ((void));
+
+@@ -104,8 +105,17 @@ weak_alias (__pthread_return_0, __pthrea
+ weak_alias (__pthread_return_0, __pthread_mutex_trylock)
+ weak_alias (__pthread_return_0, __pthread_mutex_unlock)
+
++weak_alias (__pthread_return_void, _pthread_cleanup_push_defer)
++weak_alias (__pthread_return_void, _pthread_cleanup_pop_restore)
++
+ /**********************************************************************/
+
++static void
++__pthread_return_void (void)
++{
++ return;
++}
++
+ static int
+ __pthread_return_0 (void)
+ {
+diff --git a/libc/misc/syslog/syslog.c b/libc/misc/syslog/syslog.c
+index 2b478e1..9e9ddbf 100644
+--- a/libc/misc/syslog/syslog.c
++++ b/libc/misc/syslog/syslog.c
+@@ -80,17 +80,9 @@
+ #include <ctype.h>
+ #include <signal.h>
+
++#include <bits/uClibc_mutex.h>
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
+-
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+
+ static int LogFile = -1; /* fd for log */
+ static int connected; /* have done connect */
+@@ -110,26 +102,26 @@ int setlogmask(int pmask);
+ static void
+ closelog_intern(int to_default)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (LogFile != -1) {
+ (void) close(LogFile);
+ }
+ LogFile = -1;
+ connected = 0;
+ if (to_default)
+- {
+- LogStat = 0;
+- LogTag = "syslog";
+- LogFacility = LOG_USER;
+- LogMask = 0xff;
+- }
+- UNLOCK;
++ {
++ LogStat = 0;
++ LogTag = "syslog";
++ LogFacility = LOG_USER;
++ LogMask = 0xff;
++ }
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ static void
+ sigpipe_handler (int sig)
+ {
+- closelog_intern (0);
++ closelog_intern (0);
+ }
+
+ /*
+@@ -165,7 +157,7 @@ vsyslog( int pri, const char *fmt, va_li
+
+ saved_errno = errno;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+
+ /* See if we should just throw out this message. */
+ if (!(LogMask & LOG_MASK(LOG_PRI(pri))) || (pri &~ (LOG_PRIMASK|LOG_FACMASK)))
+@@ -208,7 +200,7 @@ vsyslog( int pri, const char *fmt, va_li
+ if (p >= end || p < head_end) { /* Returned -1 in case of error... */
+ static const char truncate_msg[12] = "[truncated] ";
+ memmove(head_end + sizeof(truncate_msg), head_end,
+- end - head_end - sizeof(truncate_msg));
++ end - head_end - sizeof(truncate_msg));
+ memcpy(head_end, truncate_msg, sizeof(truncate_msg));
+ if (p < head_end) {
+ while (p < end && *p) {
+@@ -261,11 +253,11 @@ vsyslog( int pri, const char *fmt, va_li
+ (void)close(fd);
+ }
+
+-getout:
+- UNLOCK;
++ getout:
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ if (sigpipe == 0)
+ sigaction (SIGPIPE, &oldaction,
+- (struct sigaction *) NULL);
++ (struct sigaction *) NULL);
+ }
+
+ /*
+@@ -276,48 +268,48 @@ openlog( const char *ident, int logstat,
+ {
+ int logType = SOCK_DGRAM;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+
+ if (ident != NULL)
+- LogTag = ident;
++ LogTag = ident;
+ LogStat = logstat;
+ if (logfac != 0 && (logfac &~ LOG_FACMASK) == 0)
+- LogFacility = logfac;
++ LogFacility = logfac;
+ if (LogFile == -1) {
+- SyslogAddr.sa_family = AF_UNIX;
+- (void)strncpy(SyslogAddr.sa_data, _PATH_LOG,
+- sizeof(SyslogAddr.sa_data));
+-retry:
+- if (LogStat & LOG_NDELAY) {
+- if ((LogFile = socket(AF_UNIX, logType, 0)) == -1){
+- UNLOCK;
+- return;
+- }
+- /* fcntl(LogFile, F_SETFD, 1); */
+- }
++ SyslogAddr.sa_family = AF_UNIX;
++ (void)strncpy(SyslogAddr.sa_data, _PATH_LOG,
++ sizeof(SyslogAddr.sa_data));
++ retry:
++ if (LogStat & LOG_NDELAY) {
++ if ((LogFile = socket(AF_UNIX, logType, 0)) == -1){
++ goto DONE;
++ }
++ /* fcntl(LogFile, F_SETFD, 1); */
++ }
+ }
+
+ if (LogFile != -1 && !connected) {
+- if (connect(LogFile, &SyslogAddr, sizeof(SyslogAddr) -
+- sizeof(SyslogAddr.sa_data) + strlen(SyslogAddr.sa_data)) != -1)
+- {
+- connected = 1;
+- } else if (logType == SOCK_DGRAM) {
+- logType = SOCK_STREAM;
+- if (LogFile != -1) {
+- close(LogFile);
+- LogFile = -1;
+- }
+- goto retry;
+- } else {
+- if (LogFile != -1) {
+- close(LogFile);
+- LogFile = -1;
+- }
+- }
++ if (connect(LogFile, &SyslogAddr, sizeof(SyslogAddr) -
++ sizeof(SyslogAddr.sa_data) + strlen(SyslogAddr.sa_data)) != -1)
++ {
++ connected = 1;
++ } else if (logType == SOCK_DGRAM) {
++ logType = SOCK_STREAM;
++ if (LogFile != -1) {
++ close(LogFile);
++ LogFile = -1;
++ }
++ goto retry;
++ } else {
++ if (LogFile != -1) {
++ close(LogFile);
++ LogFile = -1;
++ }
++ }
+ }
+
+- UNLOCK;
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ /*
+@@ -335,10 +327,10 @@ int setlogmask(int pmask)
+ int omask;
+
+ omask = LogMask;
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (pmask != 0)
+- LogMask = pmask;
+- UNLOCK;
++ LogMask = pmask;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return (omask);
+ }
+
+diff --git a/libc/misc/time/time.c b/libc/misc/time/time.c
+index f43bb8a..6165a52 100644
+--- a/libc/misc/time/time.c
++++ b/libc/misc/time/time.c
+@@ -143,6 +143,8 @@
+ #include <locale.h>
+ #include <bits/uClibc_uintmaxtostr.h>
+
++#include <bits/uClibc_mutex.h>
++
+ #ifdef __UCLIBC_HAS_XLOCALE__
+ #include <xlocale.h>
+ #endif
+@@ -191,21 +193,7 @@ typedef struct {
+ char tzname[TZNAME_MAX+1];
+ } rule_struct;
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-
+-#include <pthread.h>
+-
+-extern pthread_mutex_t _time_tzlock;
+-
+-#define TZLOCK __pthread_mutex_lock(&_time_tzlock)
+-#define TZUNLOCK __pthread_mutex_unlock(&_time_tzlock)
+-
+-#else
+-
+-#define TZLOCK ((void) 0)
+-#define TZUNLOCK ((void) 0)
+-
+-#endif
++__UCLIBC_MUTEX_EXTERN(_time_tzlock);
+
+ extern rule_struct _time_tzinfo[2];
+
+@@ -542,13 +530,13 @@ struct tm *localtime(const time_t *timer
+ struct tm *localtime_r(register const time_t *__restrict timer,
+ register struct tm *__restrict result)
+ {
+- TZLOCK;
++ __UCLIBC_MUTEX_LOCK(_time_tzlock);
+
+ tzset();
+
+ __time_localtime_tzi(timer, result, _time_tzinfo);
+
+- TZUNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(_time_tzlock);
+
+ return result;
+ }
+@@ -1037,7 +1025,7 @@ size_t __XL(strftime)(char *__restrict s
+ goto LOOP;
+ }
+
+- o = spec + 26; /* set to "????" */
++ o = ((const char *) spec) + 26; /* set to "????" */
+ if ((code & MASK_SPEC) == CALC_SPEC) {
+
+ if (*p == 's') {
+@@ -1073,17 +1061,15 @@ size_t __XL(strftime)(char *__restrict s
+
+ #ifdef __UCLIBC_HAS_TM_EXTENSIONS__
+
+-#define RSP_TZUNLOCK ((void) 0)
+ #define RSP_TZNAME timeptr->tm_zone
+ #define RSP_GMT_OFFSET (-timeptr->tm_gmtoff)
+
+ #else
+
+-#define RSP_TZUNLOCK TZUNLOCK
+ #define RSP_TZNAME rsp->tzname
+ #define RSP_GMT_OFFSET rsp->gmt_offset
+
+- TZLOCK;
++ __UCLIBC_MUTEX_LOCK(_time_tzlock);
+
+ rsp = _time_tzinfo;
+ if (timeptr->tm_isdst > 0) {
+@@ -1114,15 +1100,17 @@ size_t __XL(strftime)(char *__restrict s
+ }
+ #endif
+ o_count = SIZE_MAX;
+- RSP_TZUNLOCK;
++/* RSP_TZUNLOCK; */
++#ifdef __UCLIBC_HAS_TM_EXTENSIONS__
+ goto OUTPUT;
++#endif
+ } else { /* z */
+ *s = '+';
+ if ((tzo = -RSP_GMT_OFFSET) < 0) {
+ tzo = -tzo;
+ *s = '-';
+ }
+- RSP_TZUNLOCK;
++/* RSP_TZUNLOCK; */
+ ++s;
+ --count;
+
+@@ -1131,7 +1119,13 @@ size_t __XL(strftime)(char *__restrict s
+
+ i = 16 + 6; /* 0-fill, width = 4 */
+ }
+-
++#ifdef __UCLIBC_HAS_TM_EXTENSIONS__
++#else
++ __UCLIBC_MUTEX_UNLOCK(_time_tzlock);
++ if (*p == 'Z') {
++ goto OUTPUT;
++ }
++#endif
+ } else {
+ /* TODO: don't need year for U, W */
+ for (i=0 ; i < 3 ; i++) {
+@@ -1664,9 +1658,7 @@ int daylight = 0;
+ long timezone = 0;
+ char *tzname[2] = { (char *) UTC, (char *) (UTC-1) };
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-pthread_mutex_t _time_tzlock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-#endif
++__UCLIBC_MUTEX_INIT(_time_tzlock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+
+ rule_struct _time_tzinfo[2];
+
+@@ -1796,7 +1788,7 @@ void tzset(void)
+ static char oldval[TZ_BUFLEN]; /* BSS-zero'd. */
+ #endif /* __UCLIBC_HAS_TZ_CACHING__ */
+
+- TZLOCK;
++ __UCLIBC_MUTEX_LOCK(_time_tzlock);
+
+ e = getenv(TZ); /* TZ env var always takes precedence. */
+
+@@ -1962,10 +1954,10 @@ void tzset(void)
+ daylight = !!_time_tzinfo[1].tzname[0];
+ timezone = _time_tzinfo[0].gmt_offset;
+
+-#if defined(__UCLIBC_HAS_TZ_FILE__)
++#if defined(__UCLIBC_HAS_TZ_FILE__) || defined(__UCLIBC_HAS_TZ_CACHING__)
+ FAST_DONE:
+ #endif
+- TZUNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(_time_tzlock);
+ }
+
+ #endif
+@@ -2167,13 +2159,13 @@ time_t _time_mktime(struct tm *timeptr,
+ {
+ time_t t;
+
+- TZLOCK;
++ __UCLIBC_MUTEX_LOCK(_time_tzlock);
+
+ tzset();
+
+ t = _time_mktime_tzi(timeptr, store_on_success, _time_tzinfo);
+
+- TZUNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(_time_tzlock);
+
+ return t;
+ }
+diff --git a/libc/misc/ttyent/getttyent.c b/libc/misc/ttyent/getttyent.c
+index 6e2fbd2..c85c73a 100644
+--- a/libc/misc/ttyent/getttyent.c
++++ b/libc/misc/ttyent/getttyent.c
+@@ -35,9 +35,6 @@
+ #include <ctype.h>
+ #include <string.h>
+ #include <stdlib.h>
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-#endif
+
+ static char zapchar;
+ static FILE *tf;
+@@ -50,8 +47,8 @@ struct ttyent * getttynam(const char *tt
+
+ setttyent();
+ while ((t = getttyent()))
+- if (!strcmp(tty, t->ty_name))
+- break;
++ if (!strcmp(tty, t->ty_name))
++ break;
+ endttyent();
+ return (t);
+ }
+@@ -67,27 +64,27 @@ static char * skip(register char *p)
+ register int c, q;
+
+ for (q = 0, t = p; (c = *p) != '\0'; p++) {
+- if (c == '"') {
+- q ^= QUOTED; /* obscure, but nice */
+- continue;
+- }
+- if (q == QUOTED && *p == '\\' && *(p+1) == '"')
+- p++;
+- *t++ = *p;
+- if (q == QUOTED)
+- continue;
+- if (c == '#') {
+- zapchar = c;
+- *p = 0;
+- break;
+- }
+- if (c == '\t' || c == ' ' || c == '\n') {
+- zapchar = c;
+- *p++ = 0;
+- while ((c = *p) == '\t' || c == ' ' || c == '\n')
+- p++;
+- break;
+- }
++ if (c == '"') {
++ q ^= QUOTED; /* obscure, but nice */
++ continue;
++ }
++ if (q == QUOTED && *p == '\\' && *(p+1) == '"')
++ p++;
++ *t++ = *p;
++ if (q == QUOTED)
++ continue;
++ if (c == '#') {
++ zapchar = c;
++ *p = 0;
++ break;
++ }
++ if (c == '\t' || c == ' ' || c == '\n') {
++ zapchar = c;
++ *p++ = 0;
++ while ((c = *p) == '\t' || c == ' ' || c == '\n')
++ p++;
++ break;
++ }
+ }
+ *--t = '\0';
+ return (p);
+@@ -104,46 +101,46 @@ struct ttyent * getttyent(void)
+ register int c;
+ register char *p;
+ static char *line = NULL;
++ struct ttyent *retval = NULL;
+
+ if (!tf && !setttyent())
+- return (NULL);
++ return (NULL);
+
+ if (!line) {
+- line = malloc(BUFSIZ);
++ line = malloc(BUFSIZ);
+ if (!line)
+ abort();
+ }
+
+- __STDIO_ALWAYS_THREADLOCK(tf);
++ __STDIO_ALWAYS_THREADLOCK(tf);
+
+ for (;;) {
+- if (!fgets_unlocked(p = line, BUFSIZ, tf)) {
+- __STDIO_ALWAYS_THREADUNLOCK(tf);
+- return (NULL);
+- }
+- /* skip lines that are too big */
+- if (!index(p, '\n')) {
+- while ((c = getc_unlocked(tf)) != '\n' && c != EOF)
+- ;
+- continue;
+- }
+- while (isspace(*p))
+- ++p;
+- if (*p && *p != '#')
+- break;
++ if (!fgets_unlocked(p = line, BUFSIZ, tf)) {
++ goto DONE;
++ }
++ /* skip lines that are too big */
++ if (!index(p, '\n')) {
++ while ((c = getc_unlocked(tf)) != '\n' && c != EOF)
++ ;
++ continue;
++ }
++ while (isspace(*p))
++ ++p;
++ if (*p && *p != '#')
++ break;
+ }
+
+ zapchar = 0;
+ tty.ty_name = p;
+ p = skip(p);
+ if (!*(tty.ty_getty = p))
+- tty.ty_getty = tty.ty_type = NULL;
++ tty.ty_getty = tty.ty_type = NULL;
+ else {
+- p = skip(p);
+- if (!*(tty.ty_type = p))
+- tty.ty_type = NULL;
+- else
+- p = skip(p);
++ p = skip(p);
++ if (!*(tty.ty_type = p))
++ tty.ty_type = NULL;
++ else
++ p = skip(p);
+ }
+ tty.ty_status = 0;
+ tty.ty_window = NULL;
+@@ -151,43 +148,45 @@ struct ttyent * getttyent(void)
+ #define scmp(e) !strncmp(p, e, sizeof(e) - 1) && isspace(p[sizeof(e) - 1])
+ #define vcmp(e) !strncmp(p, e, sizeof(e) - 1) && p[sizeof(e) - 1] == '='
+ for (; *p; p = skip(p)) {
+- if (scmp(_TTYS_OFF))
+- tty.ty_status &= ~TTY_ON;
+- else if (scmp(_TTYS_ON))
+- tty.ty_status |= TTY_ON;
+- else if (scmp(_TTYS_SECURE))
+- tty.ty_status |= TTY_SECURE;
+- else if (vcmp(_TTYS_WINDOW))
+- tty.ty_window = value(p);
+- else
+- break;
++ if (scmp(_TTYS_OFF))
++ tty.ty_status &= ~TTY_ON;
++ else if (scmp(_TTYS_ON))
++ tty.ty_status |= TTY_ON;
++ else if (scmp(_TTYS_SECURE))
++ tty.ty_status |= TTY_SECURE;
++ else if (vcmp(_TTYS_WINDOW))
++ tty.ty_window = value(p);
++ else
++ break;
+ }
+- /* We can release the lock only here since `zapchar' is global. */
+- __STDIO_ALWAYS_THREADUNLOCK(tf);
+
+ if (zapchar == '#' || *p == '#')
+- while ((c = *++p) == ' ' || c == '\t')
+- ;
++ while ((c = *++p) == ' ' || c == '\t')
++ ;
+ tty.ty_comment = p;
+ if (*p == 0)
+- tty.ty_comment = 0;
++ tty.ty_comment = 0;
+ if ((p = index(p, '\n')))
+- *p = '\0';
+- return (&tty);
++ *p = '\0';
++ retval = &tty;
++
++ DONE:
++ __STDIO_ALWAYS_THREADUNLOCK(tf);
++ return retval;
+ }
+
+ int setttyent(void)
+ {
+
+ if (tf) {
+- rewind(tf);
+- return (1);
++ rewind(tf);
++ return (1);
+ } else if ((tf = fopen(_PATH_TTYS, "r"))) {
+- /* We do the locking ourselves. */
++ /* We do the locking ourselves. */
+ #ifdef __UCLIBC_HAS_THREADS__
+- __fsetlocking (tf, FSETLOCKING_BYCALLER);
++ __fsetlocking (tf, FSETLOCKING_BYCALLER);
+ #endif
+- return (1);
++ return (1);
+ }
+ return (0);
+ }
+@@ -197,9 +196,9 @@ int endttyent(void)
+ int rval;
+
+ if (tf) {
+- rval = !(fclose(tf) == EOF);
+- tf = NULL;
+- return (rval);
++ rval = !(fclose(tf) == EOF);
++ tf = NULL;
++ return (rval);
+ }
+ return (1);
+ }
+diff --git a/libc/misc/utmp/utent.c b/libc/misc/utmp/utent.c
+index c1d8d6f..0fc6df4 100644
+--- a/libc/misc/utmp/utent.c
++++ b/libc/misc/utmp/utent.c
+@@ -20,19 +20,9 @@
+ #include <string.h>
+ #include <utmp.h>
+
++#include <bits/uClibc_mutex.h>
+
+-
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t utmplock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&utmplock)
+-# define UNLOCK __pthread_mutex_unlock(&utmplock)
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
+-
+-
++__UCLIBC_MUTEX_STATIC(utmplock, PTHREAD_MUTEX_INITIALIZER);
+
+ /* Some global crap */
+ static int static_fd = -1;
+@@ -46,19 +36,19 @@ static struct utmp *__getutent(int utmp_
+
+ {
+ if (utmp_fd == -1) {
+- setutent();
++ setutent();
+ }
+ if (utmp_fd == -1) {
+- return NULL;
++ return NULL;
+ }
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(utmplock);
+ if (read(utmp_fd, (char *) &static_utmp, sizeof(struct utmp)) != sizeof(struct utmp))
+- {
+- return NULL;
+- }
++ {
++ return NULL;
++ }
+
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(utmplock);
+ return &static_utmp;
+ }
+
+@@ -66,39 +56,39 @@ void setutent(void)
+ {
+ int ret;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(utmplock);
+ if (static_fd == -1) {
+- if ((static_fd = open(static_ut_name, O_RDWR)) < 0) {
+- if ((static_fd = open(static_ut_name, O_RDONLY)) < 0) {
+- goto bummer;
+- }
+- }
+- /* Make sure the file will be closed on exec() */
+- ret = fcntl(static_fd, F_GETFD, 0);
+- if (ret >= 0) {
+- ret = fcntl(static_fd, F_GETFD, 0);
+- }
+- if (ret < 0) {
+-bummer:
+- UNLOCK;
+- static_fd = -1;
+- close(static_fd);
+- return;
+- }
++ if ((static_fd = open(static_ut_name, O_RDWR)) < 0) {
++ if ((static_fd = open(static_ut_name, O_RDONLY)) < 0) {
++ goto bummer;
++ }
++ }
++ /* Make sure the file will be closed on exec() */
++ ret = fcntl(static_fd, F_GETFD, 0);
++ if (ret >= 0) {
++ ret = fcntl(static_fd, F_GETFD, 0);
++ }
++ if (ret < 0) {
++ bummer:
++ close(static_fd);
++ static_fd = -1;
++ goto DONE;
++ }
+ }
+ lseek(static_fd, 0, SEEK_SET);
+- UNLOCK;
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(utmplock);
+ return;
+ }
+
+ void endutent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(utmplock);
+ if (static_fd != -1) {
+- close(static_fd);
++ close(static_fd);
+ }
+ static_fd = -1;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(utmplock);
+ }
+
+ /* Locking is done in __getutent */
+@@ -113,22 +103,22 @@ struct utmp *getutid (const struct utmp
+ struct utmp *lutmp;
+
+ while ((lutmp = __getutent(static_fd)) != NULL) {
+- if ( (utmp_entry->ut_type == RUN_LVL ||
+- utmp_entry->ut_type == BOOT_TIME ||
+- utmp_entry->ut_type == NEW_TIME ||
+- utmp_entry->ut_type == OLD_TIME) &&
+- lutmp->ut_type == utmp_entry->ut_type)
+- {
+- return lutmp;
+- }
+- if ( (utmp_entry->ut_type == INIT_PROCESS ||
+- utmp_entry->ut_type == DEAD_PROCESS ||
+- utmp_entry->ut_type == LOGIN_PROCESS ||
+- utmp_entry->ut_type == USER_PROCESS) &&
+- !strncmp(lutmp->ut_id, utmp_entry->ut_id, sizeof(lutmp->ut_id)))
+- {
+- return lutmp;
+- }
++ if ( (utmp_entry->ut_type == RUN_LVL ||
++ utmp_entry->ut_type == BOOT_TIME ||
++ utmp_entry->ut_type == NEW_TIME ||
++ utmp_entry->ut_type == OLD_TIME) &&
++ lutmp->ut_type == utmp_entry->ut_type)
++ {
++ return lutmp;
++ }
++ if ( (utmp_entry->ut_type == INIT_PROCESS ||
++ utmp_entry->ut_type == DEAD_PROCESS ||
++ utmp_entry->ut_type == LOGIN_PROCESS ||
++ utmp_entry->ut_type == USER_PROCESS) &&
++ !strncmp(lutmp->ut_id, utmp_entry->ut_id, sizeof(lutmp->ut_id)))
++ {
++ return lutmp;
++ }
+ }
+
+ return NULL;
+@@ -140,11 +130,11 @@ struct utmp *getutline(const struct utmp
+ struct utmp *lutmp;
+
+ while ((lutmp = __getutent(static_fd)) != NULL) {
+- if ((lutmp->ut_type == USER_PROCESS || lutmp->ut_type == LOGIN_PROCESS) &&
+- !strcmp(lutmp->ut_line, utmp_entry->ut_line))
+- {
+- return lutmp;
+- }
++ if ((lutmp->ut_type == USER_PROCESS || lutmp->ut_type == LOGIN_PROCESS) &&
++ !strcmp(lutmp->ut_line, utmp_entry->ut_line))
++ {
++ return lutmp;
++ }
+ }
+
+ return NULL;
+@@ -152,42 +142,42 @@ struct utmp *getutline(const struct utmp
+
+ struct utmp *pututline (const struct utmp *utmp_entry)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(utmplock);
+ /* Ignore the return value. That way, if they've already positioned
+ the file pointer where they want it, everything will work out. */
+ lseek(static_fd, (off_t) - sizeof(struct utmp), SEEK_CUR);
+
+ if (getutid(utmp_entry) != NULL) {
+- lseek(static_fd, (off_t) - sizeof(struct utmp), SEEK_CUR);
+- if (write(static_fd, utmp_entry, sizeof(struct utmp)) != sizeof(struct utmp))
+- return NULL;
++ lseek(static_fd, (off_t) - sizeof(struct utmp), SEEK_CUR);
++ if (write(static_fd, utmp_entry, sizeof(struct utmp)) != sizeof(struct utmp))
++ return NULL;
+ } else {
+- lseek(static_fd, (off_t) 0, SEEK_END);
+- if (write(static_fd, utmp_entry, sizeof(struct utmp)) != sizeof(struct utmp))
+- return NULL;
++ lseek(static_fd, (off_t) 0, SEEK_END);
++ if (write(static_fd, utmp_entry, sizeof(struct utmp)) != sizeof(struct utmp))
++ return NULL;
+ }
+
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(utmplock);
+ return (struct utmp *)utmp_entry;
+ }
+
+ int utmpname (const char *new_ut_name)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(utmplock);
+ if (new_ut_name != NULL) {
+- if (static_ut_name != default_file_name)
+- free((char *)static_ut_name);
+- static_ut_name = strdup(new_ut_name);
+- if (static_ut_name == NULL) {
+- /* We should probably whine about out-of-memory
+- * errors here... Instead just reset to the default */
+- static_ut_name = default_file_name;
+- }
++ if (static_ut_name != default_file_name)
++ free((char *)static_ut_name);
++ static_ut_name = strdup(new_ut_name);
++ if (static_ut_name == NULL) {
++ /* We should probably whine about out-of-memory
++ * errors here... Instead just reset to the default */
++ static_ut_name = default_file_name;
++ }
+ }
+
+ if (static_fd != -1)
+- close(static_fd);
+- UNLOCK;
++ close(static_fd);
++ __UCLIBC_MUTEX_UNLOCK(utmplock);
+ return 0;
+ }
+
+diff --git a/libc/misc/wchar/wstdio.c b/libc/misc/wchar/wstdio.c
+index b49494f..408c57a 100644
+--- a/libc/misc/wchar/wstdio.c
++++ b/libc/misc/wchar/wstdio.c
+@@ -82,9 +82,6 @@ strong_alias(NAME,NAME##_unlocked) \
+ void NAME PARAMS
+ #endif
+
+-#define __STDIO_THREADLOCK_OPENLIST
+-#define __STDIO_THREADUNLOCK_OPENLIST
+-
+ #else /* __UCLIBC_HAS_THREADS__ */
+
+ #include <pthread.h>
+@@ -112,15 +109,6 @@ void NAME PARAMS \
+ } \
+ void NAME##_unlocked PARAMS
+
+-#define __STDIO_THREADLOCK_OPENLIST \
+- __pthread_mutex_lock(&_stdio_openlist_lock)
+-
+-#define __STDIO_THREADUNLOCK_OPENLIST \
+- __pthread_mutex_unlock(&_stdio_openlist_lock)
+-
+-#define __STDIO_THREADTRYLOCK_OPENLIST \
+- __pthread_mutex_trylock(&_stdio_openlist_lock)
+-
+ #endif /* __UCLIBC_HAS_THREADS__ */
+
+ #ifndef __STDIO_BUFFERS
+diff --git a/libc/pwd_grp/lckpwdf.c b/libc/pwd_grp/lckpwdf.c
+index 6b9c251..063fed4 100644
+--- a/libc/pwd_grp/lckpwdf.c
++++ b/libc/pwd_grp/lckpwdf.c
+@@ -27,15 +27,9 @@
+ #include <sys/file.h>
+ #include <paths.h>
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
++#include <bits/uClibc_mutex.h>
++
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ /* How long to wait for getting the lock before returning with an
+ error. */
+@@ -57,18 +51,18 @@ int lckpwdf (void)
+ struct sigaction new_act; /* New signal action. */
+ struct flock fl; /* Information struct for locking. */
+ int result;
++ int rv = -1;
+
+ if (lock_fd != -1)
+ /* Still locked by own process. */
+ return -1;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+
+ lock_fd = open (_PATH_PASSWD, O_WRONLY);
+ if (lock_fd == -1) {
+ /* Cannot create lock file. */
+- UNLOCK;
+- return -1;
++ goto DONE;
+ }
+
+ /* Make sure file gets correctly closed when process finished. */
+@@ -77,16 +71,14 @@ int lckpwdf (void)
+ /* Cannot get file flags. */
+ close(lock_fd);
+ lock_fd = -1;
+- UNLOCK;
+- return -1;
++ goto DONE;
+ }
+ flags |= FD_CLOEXEC; /* Close on exit. */
+ if (fcntl (lock_fd, F_SETFD, flags) < 0) {
+ /* Cannot set new flags. */
+ close(lock_fd);
+ lock_fd = -1;
+- UNLOCK;
+- return -1;
++ goto DONE;
+ }
+
+ /* Now we have to get exclusive write access. Since multiple
+@@ -107,8 +99,7 @@ int lckpwdf (void)
+ /* Cannot install signal handler. */
+ close(lock_fd);
+ lock_fd = -1;
+- UNLOCK;
+- return -1;
++ goto DONE;
+ }
+
+ /* Now make sure the alarm signal is not blocked. */
+@@ -118,8 +109,7 @@ int lckpwdf (void)
+ sigaction (SIGALRM, &saved_act, NULL);
+ close(lock_fd);
+ lock_fd = -1;
+- UNLOCK;
+- return -1;
++ goto DONE;
+ }
+
+ /* Start timer. If we cannot get the lock in the specified time we
+@@ -146,12 +136,14 @@ int lckpwdf (void)
+ if (result < 0) {
+ close(lock_fd);
+ lock_fd = -1;
+- UNLOCK;
+- return -1;
++ goto DONE;
+ }
+
+- UNLOCK;
+- return 0;
++ rv = 0;
++
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(mylock);
++ return rv;
+ }
+
+
+@@ -164,11 +156,11 @@ int ulckpwdf (void)
+ result = -1;
+ }
+ else {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ result = close (lock_fd);
+ /* Mark descriptor as unused. */
+ lock_fd = -1;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ return result;
+diff --git a/libc/pwd_grp/pwd_grp.c b/libc/pwd_grp/pwd_grp.c
+index 91c0d83..a302c7c 100644
+--- a/libc/pwd_grp/pwd_grp.c
++++ b/libc/pwd_grp/pwd_grp.c
+@@ -42,9 +42,8 @@
+ #include <pwd.h>
+ #include <grp.h>
+ #include <shadow.h>
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-#endif
++
++#include <bits/uClibc_mutex.h>
+
+ /**********************************************************************/
+ /* Sizes for staticly allocated buffers. */
+@@ -445,34 +444,27 @@ int getpw(uid_t uid, char *buf)
+ /**********************************************************************/
+ #ifdef L_getpwent_r
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK ((void) 0)
+-# define UNLOCK ((void) 0)
+-#endif
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ static FILE *pwf /*= NULL*/;
+
+ void setpwent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (pwf) {
+ rewind(pwf);
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ void endpwent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (pwf) {
+ fclose(pwf);
+ pwf = NULL;
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+
+@@ -482,7 +474,7 @@ int getpwent_r(struct passwd *__restrict
+ {
+ int rv;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+
+ *result = NULL; /* In case of error... */
+
+@@ -500,7 +492,7 @@ int getpwent_r(struct passwd *__restrict
+ }
+
+ ERR:
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+
+ return rv;
+ }
+@@ -509,34 +501,27 @@ int getpwent_r(struct passwd *__restrict
+ /**********************************************************************/
+ #ifdef L_getgrent_r
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK ((void) 0)
+-# define UNLOCK ((void) 0)
+-#endif
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ static FILE *grf /*= NULL*/;
+
+ void setgrent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (grf) {
+ rewind(grf);
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ void endgrent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (grf) {
+ fclose(grf);
+ grf = NULL;
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ int getgrent_r(struct group *__restrict resultbuf,
+@@ -545,7 +530,7 @@ int getgrent_r(struct group *__restrict
+ {
+ int rv;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+
+ *result = NULL; /* In case of error... */
+
+@@ -563,7 +548,7 @@ int getgrent_r(struct group *__restrict
+ }
+
+ ERR:
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+
+ return rv;
+ }
+@@ -572,34 +557,27 @@ int getgrent_r(struct group *__restrict
+ /**********************************************************************/
+ #ifdef L_getspent_r
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK ((void) 0)
+-# define UNLOCK ((void) 0)
+-#endif
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ static FILE *spf /*= NULL*/;
+
+ void setspent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (spf) {
+ rewind(spf);
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ void endspent(void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (spf) {
+ fclose(spf);
+ spf = NULL;
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+
+ int getspent_r(struct spwd *resultbuf, char *buffer,
+@@ -607,7 +585,7 @@ int getspent_r(struct spwd *resultbuf, c
+ {
+ int rv;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+
+ *result = NULL; /* In case of error... */
+
+@@ -625,7 +603,7 @@ int getspent_r(struct spwd *resultbuf, c
+ }
+
+ ERR:
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+
+ return rv;
+ }
+diff --git a/libc/stdio/_READ.c b/libc/stdio/_READ.c
+index 7d3c38c..fe1bc91 100644
+--- a/libc/stdio/_READ.c
++++ b/libc/stdio/_READ.c
+@@ -41,7 +41,7 @@ size_t __stdio_READ(register FILE *strea
+ #warning EINTR?
+ #endif
+ /* RETRY: */
+- if ((rv = __READ(stream, buf, bufsize)) <= 0) {
++ if ((rv = __READ(stream, (char *) buf, bufsize)) <= 0) {
+ if (rv == 0) {
+ __STDIO_STREAM_SET_EOF(stream);
+ } else {
+diff --git a/libc/stdio/_WRITE.c b/libc/stdio/_WRITE.c
+index d300d39..4131eb7 100644
+--- a/libc/stdio/_WRITE.c
++++ b/libc/stdio/_WRITE.c
+@@ -47,7 +47,7 @@ size_t __stdio_WRITE(register FILE *stre
+ return bufsize;
+ }
+ stodo = (todo <= SSIZE_MAX) ? todo : SSIZE_MAX;
+- if ((rv = __WRITE(stream, buf, stodo)) >= 0) {
++ if ((rv = __WRITE(stream, (char *) buf, stodo)) >= 0) {
+ #ifdef __UCLIBC_MJN3_ONLY__
+ #warning TODO: Make custom stream write return check optional.
+ #endif
+diff --git a/libc/stdio/_fopen.c b/libc/stdio/_fopen.c
+index f7f5bb6..4984f11 100644
+--- a/libc/stdio/_fopen.c
++++ b/libc/stdio/_fopen.c
+@@ -194,10 +194,23 @@ FILE *_stdio_fopen(intptr_t fname_or_mod
+ #endif
+
+ #ifdef __STDIO_HAS_OPENLIST
+- __STDIO_THREADLOCK_OPENLIST;
+- stream->__nextopen = _stdio_openlist; /* New files are inserted at */
+- _stdio_openlist = stream; /* the head of the list. */
+- __STDIO_THREADUNLOCK_OPENLIST;
++#if defined(__UCLIBC_HAS_THREADS__) && defined(__STDIO_BUFFERS)
++ if (!(stream->__modeflags & __FLAG_FREEFILE))
++ {
++ /* An freopen call so the file was never removed from the list. */
++ }
++ else
++#endif
++ {
++ /* We have to lock the del mutex in case another thread wants to fclose()
++ * the last file. */
++ __STDIO_THREADLOCK_OPENLIST_DEL;
++ __STDIO_THREADLOCK_OPENLIST_ADD;
++ stream->__nextopen = _stdio_openlist; /* New files are inserted at */
++ _stdio_openlist = stream; /* the head of the list. */
++ __STDIO_THREADUNLOCK_OPENLIST_ADD;
++ __STDIO_THREADUNLOCK_OPENLIST_DEL;
++ }
+ #endif
+
+ __STDIO_STREAM_VALIDATE(stream);
+diff --git a/libc/stdio/_stdio.c b/libc/stdio/_stdio.c
+index 4aae3c4..9cfe02c 100644
+--- a/libc/stdio/_stdio.c
++++ b/libc/stdio/_stdio.c
+@@ -151,8 +151,12 @@ FILE *__stdout = _stdio_streams + 1; /*
+ FILE *_stdio_openlist = _stdio_streams;
+
+ # ifdef __UCLIBC_HAS_THREADS__
+-pthread_mutex_t _stdio_openlist_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-int _stdio_openlist_delflag = 0;
++__UCLIBC_MUTEX_INIT(_stdio_openlist_add_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
++#ifdef __STDIO_BUFFERS
++__UCLIBC_MUTEX_INIT(_stdio_openlist_del_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
++volatile int _stdio_openlist_use_count = 0;
++int _stdio_openlist_del_count = 0;
++#endif
+ # endif
+
+ #endif
+@@ -162,10 +166,10 @@ int _stdio_openlist_delflag = 0;
+ /* 2 if threading not initialized and 0 otherwise; */
+ int _stdio_user_locking = 2;
+
+-void __stdio_init_mutex(pthread_mutex_t *m)
++void __stdio_init_mutex(__UCLIBC_MUTEX_TYPE *m)
+ {
+- static const pthread_mutex_t __stdio_mutex_initializer
+- = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
++ const __UCLIBC_MUTEX_STATIC(__stdio_mutex_initializer,
++ PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+
+ memcpy(m, &__stdio_mutex_initializer, sizeof(__stdio_mutex_initializer));
+ }
+@@ -184,7 +188,11 @@ void _stdio_term(void)
+ * locked, then I suppose there is a chance that a pointer in the
+ * chain might be corrupt due to a partial store.
+ */
+- __stdio_init_mutex(&_stdio_openlist_lock);
++ __stdio_init_mutex(&_stdio_openlist_add_lock);
++#warning check
++#ifdef __STDIO_BUFFERS
++ __stdio_init_mutex(&_stdio_openlist_del_lock);
++#endif
+
+ /* Next we need to worry about the streams themselves. If a stream
+ * is currently locked, then it may be in an invalid state. So we
+@@ -192,7 +200,7 @@ void _stdio_term(void)
+ * Then we reinitialize the locks.
+ */
+ for (ptr = _stdio_openlist ; ptr ; ptr = ptr->__nextopen ) {
+- if (__STDIO_ALWAYS_THREADTRYLOCK(ptr)) {
++ if (__STDIO_ALWAYS_THREADTRYLOCK_CANCEL_UNSAFE(ptr)) {
+ /* The stream is already locked, so we don't want to touch it.
+ * However, if we have custom streams, we can't just close it
+ * or leave it locked since a custom stream may be stacked
+@@ -258,10 +266,6 @@ void _stdio_init(void)
+ #error Assumption violated about __MASK_READING and __FLAG_UNGOT
+ #endif
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-#endif
+-
+ #ifndef NDEBUG
+
+ void _stdio_validate_FILE(const FILE *stream)
+diff --git a/libc/stdio/_stdio.h b/libc/stdio/_stdio.h
+index e3c2c58..decf57d 100644
+--- a/libc/stdio/_stdio.h
++++ b/libc/stdio/_stdio.h
+@@ -22,23 +22,57 @@
+ #include <wchar.h>
+ #endif
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
++#include <bits/uClibc_mutex.h>
+
+-#define __STDIO_THREADLOCK_OPENLIST \
+- __pthread_mutex_lock(&_stdio_openlist_lock)
++#define __STDIO_THREADLOCK_OPENLIST_ADD \
++ __UCLIBC_MUTEX_LOCK(_stdio_openlist_add_lock)
+
+-#define __STDIO_THREADUNLOCK_OPENLIST \
+- __pthread_mutex_unlock(&_stdio_openlist_lock)
++#define __STDIO_THREADUNLOCK_OPENLIST_ADD \
++ __UCLIBC_MUTEX_UNLOCK(_stdio_openlist_add_lock)
+
+-#define __STDIO_THREADTRYLOCK_OPENLIST \
+- __pthread_mutex_trylock(&_stdio_openlist_lock)
++#ifdef __STDIO_BUFFERS
+
+-#else
++#define __STDIO_THREADLOCK_OPENLIST_DEL \
++ __UCLIBC_MUTEX_LOCK(_stdio_openlist_del_lock)
++
++#define __STDIO_THREADUNLOCK_OPENLIST_DEL \
++ __UCLIBC_MUTEX_UNLOCK(_stdio_openlist_del_lock)
+
+-#define __STDIO_THREADLOCK_OPENLIST ((void)0)
+-#define __STDIO_THREADUNLOCK_OPENLIST ((void)0)
++#define __STDIO_OPENLIST_INC_USE \
++do { \
++ __STDIO_THREADLOCK_OPENLIST_DEL; \
++ ++_stdio_openlist_use_count; \
++ __STDIO_THREADUNLOCK_OPENLIST_DEL; \
++} while (0)
++
++extern void _stdio_openlist_dec_use(void);
++
++#define __STDIO_OPENLIST_DEC_USE \
++ _stdio_openlist_dec_use()
++
++#define __STDIO_OPENLIST_INC_DEL_CNT \
++do { \
++ __STDIO_THREADLOCK_OPENLIST_DEL; \
++ ++_stdio_openlist_del_count; \
++ __STDIO_THREADUNLOCK_OPENLIST_DEL; \
++} while (0)
++
++#define __STDIO_OPENLIST_DEC_DEL_CNT \
++do { \
++ __STDIO_THREADLOCK_OPENLIST_DEL; \
++ --_stdio_openlist_del_count; \
++ __STDIO_THREADUNLOCK_OPENLIST_DEL; \
++} while (0)
++
++#endif /* __STDIO_BUFFERS */
+
++#ifndef __STDIO_THREADLOCK_OPENLIST_DEL
++#define __STDIO_THREADLOCK_OPENLIST_DEL ((void)0)
++#define __STDIO_THREADUNLOCK_OPENLIST_DEL ((void)0)
++#define __STDIO_OPENLIST_INC_USE ((void)0)
++#define __STDIO_OPENLIST_DEC_USE ((void)0)
++#define __STDIO_OPENLIST_INC_DEL_CNT ((void)0)
++#define __STDIO_OPENLIST_DEC_DEL_CNT ((void)0)
+ #endif
+
+ #define __UNDEFINED_OR_NONPORTABLE ((void)0)
+diff --git a/libc/stdio/fclose.c b/libc/stdio/fclose.c
+index 4df2e42..dfababc 100644
+--- a/libc/stdio/fclose.c
++++ b/libc/stdio/fclose.c
+@@ -12,30 +12,34 @@ int fclose(register FILE *stream)
+ int rv = 0;
+ __STDIO_AUTO_THREADLOCK_VAR;
+
+- /* First, remove the file from the open file list. */
+-#ifdef __STDIO_HAS_OPENLIST
+- {
+- register FILE *ptr;
+-
+- __STDIO_THREADLOCK_OPENLIST;
+- if ((ptr = _stdio_openlist) == stream) {
+- _stdio_openlist = stream->__nextopen;
+- } else {
+- while (ptr) {
+- if (ptr->__nextopen == stream) {
+- ptr->__nextopen = stream->__nextopen;
+- break;
+- }
+- ptr = ptr->__nextopen;
+- }
+- }
+- __STDIO_THREADUNLOCK_OPENLIST;
+-
+- if (!ptr) { /* Did not find stream in the open file list! */
+- return EOF;
+- }
+- }
+-#endif
++#warning dead code... but may want to simply check and not remove
++/* #ifdef __STDIO_HAS_OPENLIST */
++/* #if !defined(__UCLIBC_HAS_THREADS__) || !defined(__STDIO_BUFFERS) */
++/* /\* First, remove the file from the open file list. *\/ */
++/* { */
++/* register FILE *ptr; */
++
++/* __STDIO_THREADLOCK_OPENLIST; */
++/* if ((ptr = _stdio_openlist) == stream) { */
++/* #warning does a mod!!! */
++/* _stdio_openlist = stream->__nextopen; */
++/* } else { */
++/* while (ptr) { */
++/* if (ptr->__nextopen == stream) { */
++/* ptr->__nextopen = stream->__nextopen; */
++/* break; */
++/* } */
++/* ptr = ptr->__nextopen; */
++/* } */
++/* } */
++/* __STDIO_THREADUNLOCK_OPENLIST; */
++
++/* if (!ptr) { /\* Did not find stream in the open file list! *\/ */
++/* return EOF; */
++/* } */
++/* } */
++/* #endif */
++/* #endif */
+
+ __STDIO_AUTO_THREADLOCK(stream);
+
+@@ -80,7 +84,15 @@ int fclose(register FILE *stream)
+ __STDIO_AUTO_THREADUNLOCK(stream);
+
+ __STDIO_STREAM_FREE_BUFFER(stream);
++#warning... inefficient - locks and unlocks twice and walks whole list
++#if defined(__UCLIBC_HAS_THREADS__) && defined(__STDIO_BUFFERS)
++ /* inefficient - locks/unlocks twice and walks whole list */
++ __STDIO_OPENLIST_INC_USE;
++ __STDIO_OPENLIST_INC_DEL_CNT;
++ __STDIO_OPENLIST_DEC_USE; /* This with free the file if necessary. */
++#else
+ __STDIO_STREAM_FREE_FILE(stream);
++#endif
+
+ return rv;
+ }
+diff --git a/libc/stdio/fcloseall.c b/libc/stdio/fcloseall.c
+index dbb6000..f62281a 100644
+--- a/libc/stdio/fcloseall.c
++++ b/libc/stdio/fcloseall.c
+@@ -19,14 +19,34 @@ int fcloseall (void)
+ #ifdef __STDIO_HAS_OPENLIST
+
+ int retval = 0;
++ FILE *f;
+
+- __STDIO_THREADLOCK_OPENLIST;
+- while (_stdio_openlist) {
+- if (fclose(_stdio_openlist)) {
++#warning remove dead code
++/* __STDIO_THREADLOCK_OPENLIST; */
++/* while (_stdio_openlist) { */
++/* if (fclose(_stdio_openlist)) { */
++/* retval = EOF; */
++/* } */
++/* } */
++/* __STDIO_THREADUNLOCK_OPENLIST; */
++
++ __STDIO_OPENLIST_INC_USE;
++
++#warning should probably have a get_head() operation
++ __STDIO_THREADLOCK_OPENLIST_ADD;
++ f = _stdio_openlist;
++ __STDIO_THREADUNLOCK_OPENLIST_ADD;
++
++ while (f) {
++#warning should probably have a get_next() operation
++ FILE *n = f->__nextopen;
++ if (fclose(f)) {
+ retval = EOF;
+ }
++ f = n;
+ }
+- __STDIO_THREADUNLOCK_OPENLIST;
++
++ __STDIO_OPENLIST_DEC_USE;
+
+ return retval;
+
+diff --git a/libc/stdio/fflush.c b/libc/stdio/fflush.c
+index 6baa0ec..66b65cd 100644
+--- a/libc/stdio/fflush.c
++++ b/libc/stdio/fflush.c
+@@ -20,23 +20,50 @@ weak_alias(__fflush_unlocked,fflush_unlo
+ weak_alias(__fflush_unlocked,fflush);
+ #endif
+
+-#ifdef __UCLIBC_HAS_THREADS__
+ /* Even if the stream is set to user-locking, we still need to lock
+ * when all (lbf) writing streams are flushed. */
+-#define MY_STDIO_THREADLOCK(STREAM) \
+- if (_stdio_user_locking != 2) { \
+- __STDIO_ALWAYS_THREADLOCK(STREAM); \
+- }
+
+-#define MY_STDIO_THREADUNLOCK(STREAM) \
+- if (_stdio_user_locking != 2) { \
+- __STDIO_ALWAYS_THREADUNLOCK(STREAM); \
+- }
+-#else
+-#define MY_STDIO_THREADLOCK(STREAM) ((void)0)
+-#define MY_STDIO_THREADUNLOCK(STREAM) ((void)0)
+-#endif
++#define __MY_STDIO_THREADLOCK(__stream) \
++ __UCLIBC_MUTEX_CONDITIONAL_LOCK((__stream)->__lock, \
++ (_stdio_user_locking != 2))
++
++#define __MY_STDIO_THREADUNLOCK(__stream) \
++ __UCLIBC_MUTEX_CONDITIONAL_UNLOCK((__stream)->__lock, \
++ (_stdio_user_locking != 2))
+
++#if defined(__UCLIBC_HAS_THREADS__) && defined(__STDIO_BUFFERS)
++void _stdio_openlist_dec_use(void)
++{
++ __STDIO_THREADLOCK_OPENLIST_DEL;
++ if ((_stdio_openlist_use_count == 1) && (_stdio_openlist_del_count > 0)) {
++ FILE *p = NULL;
++ FILE *n;
++ FILE *stream;
++
++ __STDIO_THREADLOCK_OPENLIST_ADD;
++ for (stream = _stdio_openlist; stream; stream = n) {
++#warning walk the list and clear out all fclosed()d files
++ n = stream->__nextopen;
++#warning fix for nonatomic
++ if ((stream->__modeflags & (__FLAG_READONLY|__FLAG_WRITEONLY))
++ == (__FLAG_READONLY|__FLAG_WRITEONLY)
++ ) { /* The file was closed so remove from the list. */
++ if (!p) {
++ _stdio_openlist = n;
++ } else {
++ p->__nextopen = n;
++ }
++ __STDIO_STREAM_FREE_FILE(stream);
++ } else {
++ p = stream;
++ }
++ }
++ __STDIO_THREADUNLOCK_OPENLIST_DEL;
++ }
++ --_stdio_openlist_use_count;
++ __STDIO_THREADUNLOCK_OPENLIST_DEL;
++}
++#endif
+
+ int __fflush_unlocked(register FILE *stream)
+ {
+@@ -60,23 +87,39 @@ int __fflush_unlocked(register FILE *str
+ }
+
+ if (!stream) { /* Flush all (lbf) writing streams. */
+- __STDIO_THREADLOCK_OPENLIST;
+- for (stream = _stdio_openlist; stream ; stream = stream->__nextopen) {
+- MY_STDIO_THREADLOCK(stream);
+- if (!(((stream->__modeflags | bufmask)
+- ^ (__FLAG_WRITING|__FLAG_LBF)
+- ) & (__FLAG_WRITING|__MASK_BUFMODE))
+- ) {
+- if (!__STDIO_COMMIT_WRITE_BUFFER(stream)) {
+- __STDIO_STREAM_DISABLE_PUTC(stream);
+- __STDIO_STREAM_CLEAR_WRITING(stream);
+- } else {
+- retval = EOF;
++
++ __STDIO_OPENLIST_INC_USE;
++
++ __STDIO_THREADLOCK_OPENLIST_ADD;
++ stream = _stdio_openlist;
++ __STDIO_THREADUNLOCK_OPENLIST_ADD;
++
++ while(stream) {
++ /* We only care about currently writing streams and do not want to
++ * block trying to obtain mutexes on non-writing streams. */
++#warning fix for nonatomic
++#warning unnecessary check if no threads
++ if (__STDIO_STREAM_IS_WRITING(stream)) { /* ONLY IF ATOMIC!!! */
++ __MY_STDIO_THREADLOCK(stream);
++ /* Need to check again once we have the lock. */
++ if (!(((stream->__modeflags | bufmask)
++ ^ (__FLAG_WRITING|__FLAG_LBF)
++ ) & (__FLAG_WRITING|__MASK_BUFMODE))
++ ) {
++ if (!__STDIO_COMMIT_WRITE_BUFFER(stream)) {
++ __STDIO_STREAM_DISABLE_PUTC(stream);
++ __STDIO_STREAM_CLEAR_WRITING(stream);
++ } else {
++ retval = EOF;
++ }
+ }
++ __MY_STDIO_THREADUNLOCK(stream);
+ }
+- MY_STDIO_THREADUNLOCK(stream);
++ stream = stream->__nextopen;
+ }
+- __STDIO_THREADUNLOCK_OPENLIST;
++
++ __STDIO_OPENLIST_DEC_USE;
++
+ } else if (__STDIO_STREAM_IS_WRITING(stream)) {
+ if (!__STDIO_COMMIT_WRITE_BUFFER(stream)) {
+ __STDIO_STREAM_DISABLE_PUTC(stream);
+diff --git a/libc/stdio/flockfile.c b/libc/stdio/flockfile.c
+index 0dcc7c2..3fad711 100644
+--- a/libc/stdio/flockfile.c
++++ b/libc/stdio/flockfile.c
+@@ -11,6 +11,6 @@ void flockfile(FILE *stream)
+ {
+ __STDIO_STREAM_VALIDATE(stream);
+
+- __STDIO_ALWAYS_THREADLOCK(stream);
++ __STDIO_ALWAYS_THREADLOCK_CANCEL_UNSAFE(stream);
+ }
+
+diff --git a/libc/stdio/freopen.c b/libc/stdio/freopen.c
+index 0eccaac..36b8488 100644
+--- a/libc/stdio/freopen.c
++++ b/libc/stdio/freopen.c
+@@ -42,6 +42,8 @@ FILE *freopen(const char * __restrict fi
+
+ __STDIO_STREAM_VALIDATE(stream);
+
++ __STDIO_OPENLIST_INC_USE; /* Do not remove the file from the list. */
++
+ /* First, flush and close, but don't deallocate, the stream. */
+ /* This also removes the stream for the open file list. */
+ dynmode = (stream->__modeflags & (__FLAG_FREEBUF|__FLAG_FREEFILE));
+@@ -57,9 +59,16 @@ FILE *freopen(const char * __restrict fi
+
+ fp = _stdio_fopen(((intptr_t) filename), mode, stream, FILEDES_ARG);
+
++#warning if fp is NULL, then we do not free file (but beware stdin,stdout,stderr)
++ if (fp) {
++ __STDIO_OPENLIST_DEC_DEL_CNT;
++ }
++
+ /* Reset the allocation flags. */
+ stream->__modeflags |= dynmode;
+
++ __STDIO_OPENLIST_DEC_USE;
++
+ __STDIO_AUTO_THREADUNLOCK(stream);
+
+ return fp;
+diff --git a/libc/stdio/ftello.c b/libc/stdio/ftello.c
+index 7092f34..69385ce 100644
+--- a/libc/stdio/ftello.c
++++ b/libc/stdio/ftello.c
+@@ -48,7 +48,10 @@ OFFSET_TYPE FTELL(register FILE *stream)
+
+ __STDIO_STREAM_VALIDATE(stream);
+
+- if ((__SEEK(stream, &pos, SEEK_CUR) < 0)
++ if ((__SEEK(stream, &pos,
++ ((__STDIO_STREAM_IS_WRITING(stream)
++ && (stream->__modeflags & __FLAG_APPEND))
++ ? SEEK_END : SEEK_CUR)) < 0)
+ || (__stdio_adjust_position(stream, &pos) < 0)) {
+ pos = -1;
+ }
+diff --git a/libc/stdio/ftrylockfile.c b/libc/stdio/ftrylockfile.c
+index d85b8ff..0d2e156 100644
+--- a/libc/stdio/ftrylockfile.c
++++ b/libc/stdio/ftrylockfile.c
+@@ -15,5 +15,5 @@ int ftrylockfile(FILE *stream)
+ {
+ __STDIO_STREAM_VALIDATE(stream);
+
+- return __STDIO_ALWAYS_THREADTRYLOCK(stream);
++ return __STDIO_ALWAYS_THREADTRYLOCK_CANCEL_UNSAFE(stream);
+ }
+diff --git a/libc/stdio/funlockfile.c b/libc/stdio/funlockfile.c
+index 048c093..2ddf097 100644
+--- a/libc/stdio/funlockfile.c
++++ b/libc/stdio/funlockfile.c
+@@ -11,5 +11,5 @@ void funlockfile(FILE *stream)
+ {
+ __STDIO_STREAM_VALIDATE(stream);
+
+- __STDIO_ALWAYS_THREADUNLOCK(stream);
++ __STDIO_ALWAYS_THREADUNLOCK_CANCEL_UNSAFE(stream);
+ }
+diff --git a/libc/stdio/popen.c b/libc/stdio/popen.c
+index c7887ad..ab8d296 100644
+--- a/libc/stdio/popen.c
++++ b/libc/stdio/popen.c
+@@ -14,6 +14,7 @@
+ * Fix failure exit code for failed execve().
+ */
+
++#warning hmm... susv3 says "Pipe streams are byte-oriented."
+
+ #include <stdio.h>
+ #include <stdlib.h>
+@@ -21,6 +22,8 @@
+ #include <unistd.h>
+ #include <sys/wait.h>
+
++#include <bits/uClibc_mutex.h>
++
+ /* uClinux-2.0 has vfork, but Linux 2.0 doesn't */
+ #include <sys/syscall.h>
+ #if ! defined __NR_vfork
+@@ -29,19 +32,11 @@
+ # define VFORK_UNLOCK ((void) 0)
+ #endif
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK ((void) 0)
+-# define UNLOCK ((void) 0)
+-#endif
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ #ifndef VFORK_LOCK
+-# define VFORK_LOCK LOCK
+-# define VFORK_UNLOCK UNLOCK
++# define VFORK_LOCK __UCLIBC_MUTEX_LOCK(mylock)
++# define VFORK_UNLOCK __UCLIBC_MUTEX_UNLOCK(mylock)
+ #endif
+
+ struct popen_list_item {
+@@ -118,10 +113,10 @@ FILE *popen(const char *command, const c
+ if (pid > 0) { /* Parent of vfork... */
+ pi->pid = pid;
+ pi->f = fp;
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ pi->next = popen_list;
+ popen_list = pi;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+
+ return fp;
+ }
+@@ -136,6 +131,8 @@ FILE *popen(const char *command, const c
+ return NULL;
+ }
+
++#warning is pclose correct wrt the new mutex semantics?
++
+ int pclose(FILE *stream)
+ {
+ struct popen_list_item *p;
+@@ -144,7 +141,7 @@ int pclose(FILE *stream)
+
+ /* First, find the list entry corresponding to stream and remove it
+ * from the list. Set p to the list item (NULL if not found). */
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if ((p = popen_list) != NULL) {
+ if (p->f == stream) {
+ popen_list = p->next;
+@@ -163,7 +160,7 @@ int pclose(FILE *stream)
+ } while (1);
+ }
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+
+ if (p) {
+ pid = p->pid; /* Save the pid we need */
+diff --git a/libc/stdio/setvbuf.c b/libc/stdio/setvbuf.c
+index 3fe62c6..6d53ab1 100644
+--- a/libc/stdio/setvbuf.c
++++ b/libc/stdio/setvbuf.c
+@@ -75,8 +75,8 @@ int setvbuf(register FILE * __restrict s
+ }
+
+ stream->__modeflags |= alloc_flag;
+- stream->__bufstart = buf;
+- stream->__bufend = buf + size;
++ stream->__bufstart = (unsigned char *) buf;
++ stream->__bufend = (unsigned char *) buf + size;
+ __STDIO_STREAM_INIT_BUFREAD_BUFPOS(stream);
+ __STDIO_STREAM_DISABLE_GETC(stream);
+ __STDIO_STREAM_DISABLE_PUTC(stream);
+diff --git a/libc/stdio/vasprintf.c b/libc/stdio/vasprintf.c
+index 688ab7c..6d7664d 100644
+--- a/libc/stdio/vasprintf.c
++++ b/libc/stdio/vasprintf.c
+@@ -63,6 +63,8 @@ int vasprintf(char **__restrict buf, con
+ free(*buf);
+ *buf = NULL;
+ }
++ } else {
++ rv = -1;
+ }
+ }
+
+diff --git a/libc/stdio/vdprintf.c b/libc/stdio/vdprintf.c
+index de8362c..7cb707f 100644
+--- a/libc/stdio/vdprintf.c
++++ b/libc/stdio/vdprintf.c
+@@ -15,8 +15,8 @@ int vdprintf(int filedes, const char * _
+ #ifdef __STDIO_BUFFERS
+ char buf[64]; /* TODO: provide _optional_ buffering? */
+
+- f.__bufend = buf + sizeof(buf);
+- f.__bufstart = buf;
++ f.__bufend = (unsigned char *) buf + sizeof(buf);
++ f.__bufstart = (unsigned char *) buf;
+ __STDIO_STREAM_DISABLE_GETC(&f);
+ __STDIO_STREAM_DISABLE_PUTC(&f);
+ __STDIO_STREAM_INIT_BUFREAD_BUFPOS(&f);
+diff --git a/libc/stdio/vfprintf.c b/libc/stdio/vfprintf.c
+index 10114f0..9214e3b 100644
+--- a/libc/stdio/vfprintf.c
++++ b/libc/stdio/vfprintf.c
+@@ -569,7 +569,7 @@ int _ppfs_init(register ppfs_t *ppfs, co
+ ppfs->fmtpos = fmt0; /* rewind */
+ }
+
+-#ifdef NL_MAX_ARG
++#ifdef NL_ARGMAX
+ /* If we have positional args, make sure we know all the types. */
+ {
+ register int *p = ppfs->argtype;
+@@ -581,7 +581,7 @@ int _ppfs_init(register ppfs_t *ppfs, co
+ ++p;
+ }
+ }
+-#endif /* NL_MAX_ARG */
++#endif /* NL_ARGMAX */
+
+ return 0;
+ }
+@@ -1214,7 +1214,7 @@ static size_t _fp_out_narrow(FILE *fp, i
+ }
+ len = buflen;
+ }
+- return r + OUTNSTR(fp, (const char *) buf, len);
++ return r + OUTNSTR(fp, (const unsigned char *) buf, len);
+ }
+
+ #endif /* __STDIO_PRINTF_FLOAT */
+diff --git a/libc/stdlib/abort.c b/libc/stdlib/abort.c
+index 77c2cdc..9f69918 100644
+--- a/libc/stdlib/abort.c
++++ b/libc/stdlib/abort.c
+@@ -70,16 +70,9 @@ extern void _exit __P((int __status)) __
+ static int been_there_done_that = 0;
+
+ /* Be prepared in case multiple threads try to abort() */
+-#ifdef __UCLIBC_HAS_THREADS__
+-# include <pthread.h>
+-static pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock)
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
++#include <bits/uClibc_mutex.h>
+
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+
+ /* Cause an abnormal program termination with core-dump */
+ void abort(void)
+@@ -87,7 +80,7 @@ void abort(void)
+ sigset_t sigset;
+
+ /* Make sure we acquire the lock before proceeding */
+- LOCK;
++ __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(mylock);
+
+ /* Unmask SIGABRT to be sure we can get it */
+ if (__sigemptyset(&sigset) == 0 && __sigaddset(&sigset, SIGABRT) == 0) {
+@@ -110,9 +103,9 @@ void abort(void)
+ #endif
+
+ abort_it:
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE(mylock);
+ raise(SIGABRT);
+- LOCK;
++ __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(mylock);
+ }
+
+ /* Still here? Try to remove any signal handlers */
+diff --git a/libc/stdlib/atexit.c b/libc/stdlib/atexit.c
+index 280f42c..b028068 100644
+--- a/libc/stdlib/atexit.c
++++ b/libc/stdlib/atexit.c
+@@ -40,17 +40,9 @@
+ #include <stdlib.h>
+ #include <errno.h>
+
++#include <bits/uClibc_mutex.h>
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-extern pthread_mutex_t mylock;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
+-
++__UCLIBC_MUTEX_EXTERN(__atexit_lock);
+
+ typedef void (*aefuncp) (void); /* atexit function pointer */
+ typedef void (*oefuncp) (int, void *); /* on_exit function pointer */
+@@ -90,8 +82,9 @@ extern struct exit_function __exit_funct
+ int atexit(aefuncp func)
+ {
+ struct exit_function *efp;
++ int rv = -1;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(__atexit_lock);
+ if (func) {
+ #ifdef __UCLIBC_DYNAMIC_ATEXIT__
+ /* If we are out of function table slots, make some more */
+@@ -99,18 +92,16 @@ int atexit(aefuncp func)
+ efp=realloc(__exit_function_table,
+ (__exit_slots+20)*sizeof(struct exit_function));
+ if (efp==NULL) {
+- UNLOCK;
+ __set_errno(ENOMEM);
+- return -1;
++ goto DONE;
+ }
+ __exit_function_table = efp;
+ __exit_slots+=20;
+ }
+ #else
+ if (__exit_count >= __UCLIBC_MAX_ATEXIT) {
+- UNLOCK;
+ __set_errno(ENOMEM);
+- return -1;
++ goto DONE;
+ }
+ #endif
+ __exit_cleanup = __exit_handler; /* enable cleanup */
+@@ -118,8 +109,12 @@ int atexit(aefuncp func)
+ efp->type = ef_atexit;
+ efp->funcs.atexit = func;
+ }
+- UNLOCK;
+- return 0;
++
++ rv = 0;
++
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(__atexit_lock);
++ return rv;
+ }
+ #endif
+
+@@ -133,8 +128,9 @@ int atexit(aefuncp func)
+ int on_exit(oefuncp func, void *arg)
+ {
+ struct exit_function *efp;
++ int rv = -1;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(__atexit_lock);
+ if (func) {
+ #ifdef __UCLIBC_DYNAMIC_ATEXIT__
+ /* If we are out of function table slots, make some more */
+@@ -142,18 +138,16 @@ int on_exit(oefuncp func, void *arg)
+ efp=realloc(__exit_function_table,
+ (__exit_slots+20)*sizeof(struct exit_function));
+ if (efp==NULL) {
+- UNLOCK;
+ __set_errno(ENOMEM);
+- return -1;
++ goto DONE;
+ }
+ __exit_function_table=efp;
+ __exit_slots+=20;
+ }
+ #else
+ if (__exit_count >= __UCLIBC_MAX_ATEXIT) {
+- UNLOCK;
+ __set_errno(ENOMEM);
+- return -1;
++ goto DONE;
+ }
+ #endif
+
+@@ -163,8 +157,12 @@ int on_exit(oefuncp func, void *arg)
+ efp->funcs.on_exit.func = func;
+ efp->funcs.on_exit.arg = arg;
+ }
+- UNLOCK;
+- return 0;
++
++ rv = 0;
++
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(__atexit_lock);
++ return rv;
+ }
+ #endif
+
+@@ -214,9 +212,8 @@ void __exit_handler(int status)
+ #ifdef L_exit
+ extern void weak_function _stdio_term(void);
+ void (*__exit_cleanup) (int) = 0;
+-#ifdef __UCLIBC_HAS_THREADS__
+-pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-#endif
++
++__UCLIBC_MUTEX_INIT(__atexit_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+
+ #ifdef __UCLIBC_CTOR_DTOR__
+ extern void (*__app_fini)(void);
+@@ -229,11 +226,11 @@ extern void (*__rtld_fini)(void);
+ void exit(int rv)
+ {
+ /* Perform exit-specific cleanup (atexit and on_exit) */
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(__atexit_lock);
+ if (__exit_cleanup) {
+ __exit_cleanup(rv);
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(__atexit_lock);
+
+ #ifdef __UCLIBC_CTOR_DTOR__
+ if (__app_fini != NULL)
+diff --git a/libc/stdlib/malloc-simple/alloc.c b/libc/stdlib/malloc-simple/alloc.c
+index ed14c37..519a875 100644
+--- a/libc/stdlib/malloc-simple/alloc.c
++++ b/libc/stdlib/malloc-simple/alloc.c
+@@ -108,15 +108,14 @@ void free(void *ptr)
+ #endif
+
+ #ifdef L_memalign
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-pthread_mutex_t __malloc_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-# define LOCK __pthread_mutex_lock(&__malloc_lock)
+-# define UNLOCK __pthread_mutex_unlock(&__malloc_lock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
++
++#include <bits/uClibc_mutex.h>
++
++__UCLIBC_MUTEX_EXTERN(__malloc_lock);
++
++#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
++#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
++
+
+ /* List of blocks allocated with memalign or valloc */
+ struct alignlist
+@@ -135,7 +134,7 @@ int __libc_free_aligned(void *ptr)
+ if (ptr == NULL)
+ return 0;
+
+- LOCK;
++ __MALLOC_LOCK;
+ for (l = _aligned_blocks; l != NULL; l = l->next) {
+ if (l->aligned == ptr) {
+ /* Mark the block as free */
+@@ -146,7 +145,7 @@ int __libc_free_aligned(void *ptr)
+ return 1;
+ }
+ }
+- UNLOCK;
++ __MALLOC_UNLOCK;
+ return 0;
+ }
+ void * memalign (size_t alignment, size_t size)
+@@ -159,10 +158,10 @@ void * memalign (size_t alignment, size_
+ return NULL;
+
+ adj = (unsigned long int) ((unsigned long int) ((char *) result -
+- (char *) NULL)) % alignment;
++ (char *) NULL)) % alignment;
+ if (adj != 0) {
+ struct alignlist *l;
+- LOCK;
++ __MALLOC_LOCK;
+ for (l = _aligned_blocks; l != NULL; l = l->next)
+ if (l->aligned == NULL)
+ /* This slot is free. Use it. */
+@@ -171,15 +170,16 @@ void * memalign (size_t alignment, size_
+ l = (struct alignlist *) malloc (sizeof (struct alignlist));
+ if (l == NULL) {
+ free(result);
+- UNLOCK;
+- return NULL;
++ result = NULL;
++ goto DONE;
+ }
+ l->next = _aligned_blocks;
+ _aligned_blocks = l;
+ }
+ l->exact = result;
+ result = l->aligned = (char *) result + alignment - adj;
+- UNLOCK;
++ DONE:
++ __MALLOC_UNLOCK;
+ }
+
+ return result;
+diff --git a/libc/stdlib/malloc-standard/calloc.c b/libc/stdlib/malloc-standard/calloc.c
+index a67dad7..4277954 100644
+--- a/libc/stdlib/malloc-standard/calloc.c
++++ b/libc/stdlib/malloc-standard/calloc.c
+@@ -8,7 +8,7 @@
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+- ftp://gee.cs.oswego.edu/pub/misc/malloc.c
++ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+@@ -31,63 +31,63 @@ void* calloc(size_t n_elements, size_t e
+ * to fall through and call malloc(0) */
+ size = n_elements * elem_size;
+ if (n_elements && elem_size != (size / n_elements)) {
+- __set_errno(ENOMEM);
+- return NULL;
++ __set_errno(ENOMEM);
++ return NULL;
+ }
+
+- LOCK;
++ __MALLOC_LOCK;
+ mem = malloc(size);
+ if (mem != 0) {
+- p = mem2chunk(mem);
++ p = mem2chunk(mem);
+
+- if (!chunk_is_mmapped(p))
+- {
+- /*
+- Unroll clear of <= 36 bytes (72 if 8byte sizes)
+- We know that contents have an odd number of
+- size_t-sized words; minimally 3.
+- */
+-
+- d = (size_t*)mem;
+- clearsize = chunksize(p) - (sizeof(size_t));
+- nclears = clearsize / sizeof(size_t);
+- assert(nclears >= 3);
+-
+- if (nclears > 9)
+- memset(d, 0, clearsize);
+-
+- else {
+- *(d+0) = 0;
+- *(d+1) = 0;
+- *(d+2) = 0;
+- if (nclears > 4) {
+- *(d+3) = 0;
+- *(d+4) = 0;
+- if (nclears > 6) {
+- *(d+5) = 0;
+- *(d+6) = 0;
+- if (nclears > 8) {
+- *(d+7) = 0;
+- *(d+8) = 0;
++ if (!chunk_is_mmapped(p))
++ {
++ /*
++ Unroll clear of <= 36 bytes (72 if 8byte sizes)
++ We know that contents have an odd number of
++ size_t-sized words; minimally 3.
++ */
++
++ d = (size_t*)mem;
++ clearsize = chunksize(p) - (sizeof(size_t));
++ nclears = clearsize / sizeof(size_t);
++ assert(nclears >= 3);
++
++ if (nclears > 9)
++ memset(d, 0, clearsize);
++
++ else {
++ *(d+0) = 0;
++ *(d+1) = 0;
++ *(d+2) = 0;
++ if (nclears > 4) {
++ *(d+3) = 0;
++ *(d+4) = 0;
++ if (nclears > 6) {
++ *(d+5) = 0;
++ *(d+6) = 0;
++ if (nclears > 8) {
++ *(d+7) = 0;
++ *(d+8) = 0;
++ }
++ }
++ }
++ }
+ }
+- }
+- }
+- }
+- }
+ #if 0
+- else
+- {
+- /* Standard unix mmap using /dev/zero clears memory so calloc
+- * doesn't need to actually zero anything....
+- */
+- d = (size_t*)mem;
+- /* Note the additional (sizeof(size_t)) */
+- clearsize = chunksize(p) - 2*(sizeof(size_t));
+- memset(d, 0, clearsize);
+- }
++ else
++ {
++ /* Standard unix mmap using /dev/zero clears memory so calloc
++ * doesn't need to actually zero anything....
++ */
++ d = (size_t*)mem;
++ /* Note the additional (sizeof(size_t)) */
++ clearsize = chunksize(p) - 2*(sizeof(size_t));
++ memset(d, 0, clearsize);
++ }
+ #endif
+ }
+- UNLOCK;
++ __MALLOC_UNLOCK;
+ return mem;
+ }
+
+diff --git a/libc/stdlib/malloc-standard/free.c b/libc/stdlib/malloc-standard/free.c
+index 94e1d65..4e08ef7 100644
+--- a/libc/stdlib/malloc-standard/free.c
++++ b/libc/stdlib/malloc-standard/free.c
+@@ -8,7 +8,7 @@
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+- ftp://gee.cs.oswego.edu/pub/misc/malloc.c
++ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+@@ -42,71 +42,71 @@ static int __malloc_trim(size_t pad, mst
+
+ if (extra > 0) {
+
+- /*
+- Only proceed if end of memory is where we last set it.
+- This avoids problems if there were foreign sbrk calls.
+- */
+- current_brk = (char*)(MORECORE(0));
+- if (current_brk == (char*)(av->top) + top_size) {
+-
+- /*
+- Attempt to release memory. We ignore MORECORE return value,
+- and instead call again to find out where new end of memory is.
+- This avoids problems if first call releases less than we asked,
+- of if failure somehow altered brk value. (We could still
+- encounter problems if it altered brk in some very bad way,
+- but the only thing we can do is adjust anyway, which will cause
+- some downstream failure.)
+- */
+-
+- MORECORE(-extra);
+- new_brk = (char*)(MORECORE(0));
+-
+- if (new_brk != (char*)MORECORE_FAILURE) {
+- released = (long)(current_brk - new_brk);
+-
+- if (released != 0) {
+- /* Success. Adjust top. */
+- av->sbrked_mem -= released;
+- set_head(av->top, (top_size - released) | PREV_INUSE);
+- check_malloc_state();
+- return 1;
++ /*
++ Only proceed if end of memory is where we last set it.
++ This avoids problems if there were foreign sbrk calls.
++ */
++ current_brk = (char*)(MORECORE(0));
++ if (current_brk == (char*)(av->top) + top_size) {
++
++ /*
++ Attempt to release memory. We ignore MORECORE return value,
++ and instead call again to find out where new end of memory is.
++ This avoids problems if first call releases less than we asked,
++ of if failure somehow altered brk value. (We could still
++ encounter problems if it altered brk in some very bad way,
++ but the only thing we can do is adjust anyway, which will cause
++ some downstream failure.)
++ */
++
++ MORECORE(-extra);
++ new_brk = (char*)(MORECORE(0));
++
++ if (new_brk != (char*)MORECORE_FAILURE) {
++ released = (long)(current_brk - new_brk);
++
++ if (released != 0) {
++ /* Success. Adjust top. */
++ av->sbrked_mem -= released;
++ set_head(av->top, (top_size - released) | PREV_INUSE);
++ check_malloc_state();
++ return 1;
++ }
++ }
+ }
+- }
+- }
+ }
+ return 0;
+ }
+
+ /* ------------------------- malloc_trim -------------------------
+- malloc_trim(size_t pad);
++ malloc_trim(size_t pad);
+
+- If possible, gives memory back to the system (via negative
+- arguments to sbrk) if there is unused memory at the `high' end of
+- the malloc pool. You can call this after freeing large blocks of
+- memory to potentially reduce the system-level memory requirements
+- of a program. However, it cannot guarantee to reduce memory. Under
+- some allocation patterns, some large free blocks of memory will be
+- locked between two used chunks, so they cannot be given back to
+- the system.
+-
+- The `pad' argument to malloc_trim represents the amount of free
+- trailing space to leave untrimmed. If this argument is zero,
+- only the minimum amount of memory to maintain internal data
+- structures will be left (one page or less). Non-zero arguments
+- can be supplied to maintain enough trailing space to service
+- future expected allocations without having to re-obtain memory
+- from the system.
+-
+- Malloc_trim returns 1 if it actually released any memory, else 0.
+- On systems that do not support "negative sbrks", it will always
+- return 0.
++ If possible, gives memory back to the system (via negative
++ arguments to sbrk) if there is unused memory at the `high' end of
++ the malloc pool. You can call this after freeing large blocks of
++ memory to potentially reduce the system-level memory requirements
++ of a program. However, it cannot guarantee to reduce memory. Under
++ some allocation patterns, some large free blocks of memory will be
++ locked between two used chunks, so they cannot be given back to
++ the system.
++
++ The `pad' argument to malloc_trim represents the amount of free
++ trailing space to leave untrimmed. If this argument is zero,
++ only the minimum amount of memory to maintain internal data
++ structures will be left (one page or less). Non-zero arguments
++ can be supplied to maintain enough trailing space to service
++ future expected allocations without having to re-obtain memory
++ from the system.
++
++ Malloc_trim returns 1 if it actually released any memory, else 0.
++ On systems that do not support "negative sbrks", it will always
++ return 0.
+ */
+ int malloc_trim(size_t pad)
+ {
+- mstate av = get_malloc_state();
+- __malloc_consolidate(av);
+- return __malloc_trim(pad, av);
++ mstate av = get_malloc_state();
++ __malloc_consolidate(av);
++ return __malloc_trim(pad, av);
+ }
+
+ /*
+@@ -125,8 +125,8 @@ static void malloc_init_state(mstate av)
+
+ /* Establish circular links for normal bins */
+ for (i = 1; i < NBINS; ++i) {
+- bin = bin_at(av,i);
+- bin->fd = bin->bk = bin;
++ bin = bin_at(av,i);
++ bin->fd = bin->bk = bin;
+ }
+
+ av->top_pad = DEFAULT_TOP_PAD;
+@@ -157,15 +157,15 @@ static void malloc_init_state(mstate av)
+
+ /* ------------------------- __malloc_consolidate -------------------------
+
+- __malloc_consolidate is a specialized version of free() that tears
+- down chunks held in fastbins. Free itself cannot be used for this
+- purpose since, among other things, it might place chunks back onto
+- fastbins. So, instead, we need to use a minor variant of the same
+- code.
+-
+- Also, because this routine needs to be called the first time through
+- malloc anyway, it turns out to be the perfect place to trigger
+- initialization code.
++__malloc_consolidate is a specialized version of free() that tears
++down chunks held in fastbins. Free itself cannot be used for this
++purpose since, among other things, it might place chunks back onto
++fastbins. So, instead, we need to use a minor variant of the same
++code.
++
++Also, because this routine needs to be called the first time through
++malloc anyway, it turns out to be the perfect place to trigger
++initialization code.
+ */
+ void __malloc_consolidate(mstate av)
+ {
+@@ -186,78 +186,78 @@ void __malloc_consolidate(mstate av)
+ mchunkptr fwd;
+
+ /*
+- If max_fast is 0, we know that av hasn't
+- yet been initialized, in which case do so below
+- */
++ If max_fast is 0, we know that av hasn't
++ yet been initialized, in which case do so below
++ */
+
+ if (av->max_fast != 0) {
+- clear_fastchunks(av);
++ clear_fastchunks(av);
+
+- unsorted_bin = unsorted_chunks(av);
++ unsorted_bin = unsorted_chunks(av);
+
+- /*
+- Remove each chunk from fast bin and consolidate it, placing it
+- then in unsorted bin. Among other reasons for doing this,
+- placing in unsorted bin avoids needing to calculate actual bins
+- until malloc is sure that chunks aren't immediately going to be
+- reused anyway.
+- */
+-
+- maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
+- fb = &(av->fastbins[0]);
+- do {
+- if ( (p = *fb) != 0) {
+- *fb = 0;
++ /*
++ Remove each chunk from fast bin and consolidate it, placing it
++ then in unsorted bin. Among other reasons for doing this,
++ placing in unsorted bin avoids needing to calculate actual bins
++ until malloc is sure that chunks aren't immediately going to be
++ reused anyway.
++ */
+
++ maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
++ fb = &(av->fastbins[0]);
+ do {
+- check_inuse_chunk(p);
+- nextp = p->fd;
++ if ( (p = *fb) != 0) {
++ *fb = 0;
+
+- /* Slightly streamlined version of consolidation code in free() */
+- size = p->size & ~PREV_INUSE;
+- nextchunk = chunk_at_offset(p, size);
+- nextsize = chunksize(nextchunk);
++ do {
++ check_inuse_chunk(p);
++ nextp = p->fd;
++
++ /* Slightly streamlined version of consolidation code in free() */
++ size = p->size & ~PREV_INUSE;
++ nextchunk = chunk_at_offset(p, size);
++ nextsize = chunksize(nextchunk);
++
++ if (!prev_inuse(p)) {
++ prevsize = p->prev_size;
++ size += prevsize;
++ p = chunk_at_offset(p, -((long) prevsize));
++ unlink(p, bck, fwd);
++ }
++
++ if (nextchunk != av->top) {
++ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
++ set_head(nextchunk, nextsize);
++
++ if (!nextinuse) {
++ size += nextsize;
++ unlink(nextchunk, bck, fwd);
++ }
++
++ first_unsorted = unsorted_bin->fd;
++ unsorted_bin->fd = p;
++ first_unsorted->bk = p;
++
++ set_head(p, size | PREV_INUSE);
++ p->bk = unsorted_bin;
++ p->fd = first_unsorted;
++ set_foot(p, size);
++ }
++
++ else {
++ size += nextsize;
++ set_head(p, size | PREV_INUSE);
++ av->top = p;
++ }
+
+- if (!prev_inuse(p)) {
+- prevsize = p->prev_size;
+- size += prevsize;
+- p = chunk_at_offset(p, -((long) prevsize));
+- unlink(p, bck, fwd);
+- }
++ } while ( (p = nextp) != 0);
+
+- if (nextchunk != av->top) {
+- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+- set_head(nextchunk, nextsize);
+-
+- if (!nextinuse) {
+- size += nextsize;
+- unlink(nextchunk, bck, fwd);
+ }
+-
+- first_unsorted = unsorted_bin->fd;
+- unsorted_bin->fd = p;
+- first_unsorted->bk = p;
+-
+- set_head(p, size | PREV_INUSE);
+- p->bk = unsorted_bin;
+- p->fd = first_unsorted;
+- set_foot(p, size);
+- }
+-
+- else {
+- size += nextsize;
+- set_head(p, size | PREV_INUSE);
+- av->top = p;
+- }
+-
+- } while ( (p = nextp) != 0);
+-
+- }
+- } while (fb++ != maxfb);
++ } while (fb++ != maxfb);
+ }
+ else {
+- malloc_init_state(av);
+- check_malloc_state();
++ malloc_init_state(av);
++ check_malloc_state();
+ }
+ }
+
+@@ -279,9 +279,9 @@ void free(void* mem)
+
+ /* free(0) has no effect */
+ if (mem == NULL)
+- return;
++ return;
+
+- LOCK;
++ __MALLOC_LOCK;
+ av = get_malloc_state();
+ p = mem2chunk(mem);
+ size = chunksize(p);
+@@ -289,9 +289,9 @@ void free(void* mem)
+ check_inuse_chunk(p);
+
+ /*
+- If eligible, place chunk on a fastbin so it can be found
+- and used quickly in malloc.
+- */
++ If eligible, place chunk on a fastbin so it can be found
++ and used quickly in malloc.
++ */
+
+ if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
+
+@@ -300,114 +300,114 @@ void free(void* mem)
+ bordering top into fastbins */
+ && (chunk_at_offset(p, size) != av->top)
+ #endif
+- ) {
++ ) {
+
+- set_fastchunks(av);
+- fb = &(av->fastbins[fastbin_index(size)]);
+- p->fd = *fb;
+- *fb = p;
++ set_fastchunks(av);
++ fb = &(av->fastbins[fastbin_index(size)]);
++ p->fd = *fb;
++ *fb = p;
+ }
+
+ /*
+- Consolidate other non-mmapped chunks as they arrive.
+- */
++ Consolidate other non-mmapped chunks as they arrive.
++ */
+
+ else if (!chunk_is_mmapped(p)) {
+- set_anychunks(av);
++ set_anychunks(av);
++
++ nextchunk = chunk_at_offset(p, size);
++ nextsize = chunksize(nextchunk);
++
++ /* consolidate backward */
++ if (!prev_inuse(p)) {
++ prevsize = p->prev_size;
++ size += prevsize;
++ p = chunk_at_offset(p, -((long) prevsize));
++ unlink(p, bck, fwd);
++ }
++
++ if (nextchunk != av->top) {
++ /* get and clear inuse bit */
++ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
++ set_head(nextchunk, nextsize);
++
++ /* consolidate forward */
++ if (!nextinuse) {
++ unlink(nextchunk, bck, fwd);
++ size += nextsize;
++ }
++
++ /*
++ Place the chunk in unsorted chunk list. Chunks are
++ not placed into regular bins until after they have
++ been given one chance to be used in malloc.
++ */
++
++ bck = unsorted_chunks(av);
++ fwd = bck->fd;
++ p->bk = bck;
++ p->fd = fwd;
++ bck->fd = p;
++ fwd->bk = p;
+
+- nextchunk = chunk_at_offset(p, size);
+- nextsize = chunksize(nextchunk);
++ set_head(p, size | PREV_INUSE);
++ set_foot(p, size);
++
++ check_free_chunk(p);
++ }
++
++ /*
++ If the chunk borders the current high end of memory,
++ consolidate into top
++ */
+
+- /* consolidate backward */
+- if (!prev_inuse(p)) {
+- prevsize = p->prev_size;
+- size += prevsize;
+- p = chunk_at_offset(p, -((long) prevsize));
+- unlink(p, bck, fwd);
+- }
+-
+- if (nextchunk != av->top) {
+- /* get and clear inuse bit */
+- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+- set_head(nextchunk, nextsize);
+-
+- /* consolidate forward */
+- if (!nextinuse) {
+- unlink(nextchunk, bck, fwd);
+- size += nextsize;
+- }
+-
+- /*
+- Place the chunk in unsorted chunk list. Chunks are
+- not placed into regular bins until after they have
+- been given one chance to be used in malloc.
+- */
+-
+- bck = unsorted_chunks(av);
+- fwd = bck->fd;
+- p->bk = bck;
+- p->fd = fwd;
+- bck->fd = p;
+- fwd->bk = p;
+-
+- set_head(p, size | PREV_INUSE);
+- set_foot(p, size);
+-
+- check_free_chunk(p);
+- }
+-
+- /*
+- If the chunk borders the current high end of memory,
+- consolidate into top
+- */
+-
+- else {
+- size += nextsize;
+- set_head(p, size | PREV_INUSE);
+- av->top = p;
+- check_chunk(p);
+- }
+-
+- /*
+- If freeing a large space, consolidate possibly-surrounding
+- chunks. Then, if the total unused topmost memory exceeds trim
+- threshold, ask malloc_trim to reduce top.
+-
+- Unless max_fast is 0, we don't know if there are fastbins
+- bordering top, so we cannot tell for sure whether threshold
+- has been reached unless fastbins are consolidated. But we
+- don't want to consolidate on each free. As a compromise,
+- consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
+- is reached.
+- */
+-
+- if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
+- if (have_fastchunks(av))
+- __malloc_consolidate(av);
+-
+- if ((unsigned long)(chunksize(av->top)) >=
+- (unsigned long)(av->trim_threshold))
+- __malloc_trim(av->top_pad, av);
+- }
++ else {
++ size += nextsize;
++ set_head(p, size | PREV_INUSE);
++ av->top = p;
++ check_chunk(p);
++ }
++
++ /*
++ If freeing a large space, consolidate possibly-surrounding
++ chunks. Then, if the total unused topmost memory exceeds trim
++ threshold, ask malloc_trim to reduce top.
++
++ Unless max_fast is 0, we don't know if there are fastbins
++ bordering top, so we cannot tell for sure whether threshold
++ has been reached unless fastbins are consolidated. But we
++ don't want to consolidate on each free. As a compromise,
++ consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
++ is reached.
++ */
++
++ if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
++ if (have_fastchunks(av))
++ __malloc_consolidate(av);
++
++ if ((unsigned long)(chunksize(av->top)) >=
++ (unsigned long)(av->trim_threshold))
++ __malloc_trim(av->top_pad, av);
++ }
+
+ }
+ /*
+- If the chunk was allocated via mmap, release via munmap()
+- Note that if HAVE_MMAP is false but chunk_is_mmapped is
+- true, then user must have overwritten memory. There's nothing
+- we can do to catch this error unless DEBUG is set, in which case
+- check_inuse_chunk (above) will have triggered error.
+- */
++ If the chunk was allocated via mmap, release via munmap()
++ Note that if HAVE_MMAP is false but chunk_is_mmapped is
++ true, then user must have overwritten memory. There's nothing
++ we can do to catch this error unless DEBUG is set, in which case
++ check_inuse_chunk (above) will have triggered error.
++ */
+
+ else {
+- int ret;
+- size_t offset = p->prev_size;
+- av->n_mmaps--;
+- av->mmapped_mem -= (size + offset);
+- ret = munmap((char*)p - offset, size + offset);
+- /* munmap returns non-zero on failure */
+- assert(ret == 0);
++ int ret;
++ size_t offset = p->prev_size;
++ av->n_mmaps--;
++ av->mmapped_mem -= (size + offset);
++ ret = munmap((char*)p - offset, size + offset);
++ /* munmap returns non-zero on failure */
++ assert(ret == 0);
+ }
+- UNLOCK;
++ __MALLOC_UNLOCK;
+ }
+
+diff --git a/libc/stdlib/malloc-standard/mallinfo.c b/libc/stdlib/malloc-standard/mallinfo.c
+index 51ac423..1e0875c 100644
+--- a/libc/stdlib/malloc-standard/mallinfo.c
++++ b/libc/stdlib/malloc-standard/mallinfo.c
+@@ -8,7 +8,7 @@
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+- ftp://gee.cs.oswego.edu/pub/misc/malloc.c
++ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+@@ -30,11 +30,11 @@ struct mallinfo mallinfo(void)
+ int nblocks;
+ int nfastblocks;
+
+- LOCK;
++ __MALLOC_LOCK;
+ av = get_malloc_state();
+ /* Ensure initialization */
+ if (av->top == 0) {
+- __malloc_consolidate(av);
++ __malloc_consolidate(av);
+ }
+
+ check_malloc_state();
+@@ -48,21 +48,21 @@ struct mallinfo mallinfo(void)
+ fastavail = 0;
+
+ for (i = 0; i < NFASTBINS; ++i) {
+- for (p = av->fastbins[i]; p != 0; p = p->fd) {
+- ++nfastblocks;
+- fastavail += chunksize(p);
+- }
++ for (p = av->fastbins[i]; p != 0; p = p->fd) {
++ ++nfastblocks;
++ fastavail += chunksize(p);
++ }
+ }
+
+ avail += fastavail;
+
+ /* traverse regular bins */
+ for (i = 1; i < NBINS; ++i) {
+- b = bin_at(av, i);
+- for (p = last(b); p != b; p = p->bk) {
+- ++nblocks;
+- avail += chunksize(p);
+- }
++ b = bin_at(av, i);
++ for (p = last(b); p != b; p = p->bk) {
++ ++nblocks;
++ avail += chunksize(p);
++ }
+ }
+
+ mi.smblks = nfastblocks;
+@@ -75,7 +75,7 @@ struct mallinfo mallinfo(void)
+ mi.fsmblks = fastavail;
+ mi.keepcost = chunksize(av->top);
+ mi.usmblks = av->max_total_mem;
+- UNLOCK;
++ __MALLOC_UNLOCK;
+ return mi;
+ }
+
+@@ -84,23 +84,40 @@ void malloc_stats(FILE *file)
+ struct mallinfo mi;
+
+ if (file==NULL) {
+- file = stderr;
++ file = stderr;
+ }
+
+ mi = mallinfo();
+- fprintf(file, "total bytes allocated = %10u\n", (unsigned int)(mi.arena + mi.hblkhd));
+- fprintf(file, "total bytes in use bytes = %10u\n", (unsigned int)(mi.uordblks + mi.hblkhd));
+- fprintf(file, "total non-mmapped bytes allocated = %10d\n", mi.arena);
+- fprintf(file, "number of mmapped regions = %10d\n", mi.hblks);
+- fprintf(file, "total allocated mmap space = %10d\n", mi.hblkhd);
+- fprintf(file, "total allocated sbrk space = %10d\n", mi.uordblks);
++ fprintf(file,
++ "total bytes allocated = %10u\n"
++ "total bytes in use bytes = %10u\n"
++ "total non-mmapped bytes allocated = %10d\n"
++ "number of mmapped regions = %10d\n"
++ "total allocated mmap space = %10d\n"
++ "total allocated sbrk space = %10d\n"
+ #if 0
+- fprintf(file, "number of free chunks = %10d\n", mi.ordblks);
+- fprintf(file, "number of fastbin blocks = %10d\n", mi.smblks);
+- fprintf(file, "space in freed fastbin blocks = %10d\n", mi.fsmblks);
++ "number of free chunks = %10d\n"
++ "number of fastbin blocks = %10d\n"
++ "space in freed fastbin blocks = %10d\n"
+ #endif
+- fprintf(file, "maximum total allocated space = %10d\n", mi.usmblks);
+- fprintf(file, "total free space = %10d\n", mi.fordblks);
+- fprintf(file, "memory releasable via malloc_trim = %10d\n", mi.keepcost);
++ "maximum total allocated space = %10d\n"
++ "total free space = %10d\n"
++ "memory releasable via malloc_trim = %10d\n",
++
++ (unsigned int)(mi.arena + mi.hblkhd),
++ (unsigned int)(mi.uordblks + mi.hblkhd),
++ mi.arena,
++ mi.hblks,
++ mi.hblkhd,
++ mi.uordblks,
++#if 0
++ mi.ordblks,
++ mi.smblks,
++ mi.fsmblks,
++#endif
++ mi.usmblks,
++ mi.fordblks,
++ mi.keepcost
++ );
+ }
+
+diff --git a/libc/stdlib/malloc-standard/malloc.c b/libc/stdlib/malloc-standard/malloc.c
+index 7025e83..60494a0 100644
+--- a/libc/stdlib/malloc-standard/malloc.c
++++ b/libc/stdlib/malloc-standard/malloc.c
+@@ -8,7 +8,7 @@
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+- ftp://gee.cs.oswego.edu/pub/misc/malloc.c
++ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+@@ -17,17 +17,14 @@
+ #define _GNU_SOURCE
+ #include "malloc.h"
+
+-
+-#ifdef __UCLIBC_HAS_THREADS__
+-pthread_mutex_t __malloc_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-#endif
++__UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+
+ /*
+- There is exactly one instance of this struct in this malloc.
+- If you are adapting this malloc in a way that does NOT use a static
+- malloc_state, you MUST explicitly zero-fill it before using. This
+- malloc relies on the property that malloc_state is initialized to
+- all zeroes (as is true of C statics).
++ There is exactly one instance of this struct in this malloc.
++ If you are adapting this malloc in a way that does NOT use a static
++ malloc_state, you MUST explicitly zero-fill it before using. This
++ malloc relies on the property that malloc_state is initialized to
++ all zeroes (as is true of C statics).
+ */
+ struct malloc_state __malloc_state; /* never directly referenced */
+
+@@ -77,30 +74,30 @@ void __do_check_chunk(mchunkptr p)
+
+ if (!chunk_is_mmapped(p)) {
+
+- /* Has legal address ... */
+- if (p != av->top) {
+- if (contiguous(av)) {
+- assert(((char*)p) >= min_address);
+- assert(((char*)p + sz) <= ((char*)(av->top)));
+- }
+- }
+- else {
+- /* top size is always at least MINSIZE */
+- assert((unsigned long)(sz) >= MINSIZE);
+- /* top predecessor always marked inuse */
+- assert(prev_inuse(p));
+- }
++ /* Has legal address ... */
++ if (p != av->top) {
++ if (contiguous(av)) {
++ assert(((char*)p) >= min_address);
++ assert(((char*)p + sz) <= ((char*)(av->top)));
++ }
++ }
++ else {
++ /* top size is always at least MINSIZE */
++ assert((unsigned long)(sz) >= MINSIZE);
++ /* top predecessor always marked inuse */
++ assert(prev_inuse(p));
++ }
+
+ }
+ else {
+- /* address is outside main heap */
+- if (contiguous(av) && av->top != initial_top(av)) {
+- assert(((char*)p) < min_address || ((char*)p) > max_address);
+- }
+- /* chunk is page-aligned */
+- assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
+- /* mem is aligned */
+- assert(aligned_OK(chunk2mem(p)));
++ /* address is outside main heap */
++ if (contiguous(av) && av->top != initial_top(av)) {
++ assert(((char*)p) < min_address || ((char*)p) > max_address);
++ }
++ /* chunk is page-aligned */
++ assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
++ /* mem is aligned */
++ assert(aligned_OK(chunk2mem(p)));
+ }
+ }
+
+@@ -121,21 +118,21 @@ void __do_check_free_chunk(mchunkptr p)
+
+ /* Unless a special marker, must have OK fields */
+ if ((unsigned long)(sz) >= MINSIZE)
+- {
+- assert((sz & MALLOC_ALIGN_MASK) == 0);
+- assert(aligned_OK(chunk2mem(p)));
+- /* ... matching footer field */
+- assert(next->prev_size == sz);
+- /* ... and is fully consolidated */
+- assert(prev_inuse(p));
+- assert (next == av->top || inuse(next));
+-
+- /* ... and has minimally sane links */
+- assert(p->fd->bk == p);
+- assert(p->bk->fd == p);
+- }
++ {
++ assert((sz & MALLOC_ALIGN_MASK) == 0);
++ assert(aligned_OK(chunk2mem(p)));
++ /* ... matching footer field */
++ assert(next->prev_size == sz);
++ /* ... and is fully consolidated */
++ assert(prev_inuse(p));
++ assert (next == av->top || inuse(next));
++
++ /* ... and has minimally sane links */
++ assert(p->fd->bk == p);
++ assert(p->bk->fd == p);
++ }
+ else /* markers are always of size (sizeof(size_t)) */
+- assert(sz == (sizeof(size_t)));
++ assert(sz == (sizeof(size_t)));
+ }
+
+ /* Properties of inuse chunks */
+@@ -146,7 +143,7 @@ void __do_check_inuse_chunk(mchunkptr p)
+ __do_check_chunk(p);
+
+ if (chunk_is_mmapped(p))
+- return; /* mmapped chunks have no next/prev */
++ return; /* mmapped chunks have no next/prev */
+
+ /* Check whether it claims to be in use ... */
+ assert(inuse(p));
+@@ -156,20 +153,20 @@ void __do_check_inuse_chunk(mchunkptr p)
+ /* ... and is surrounded by OK chunks.
+ Since more things can be checked with free chunks than inuse ones,
+ if an inuse chunk borders them and debug is on, it's worth doing them.
+- */
++ */
+ if (!prev_inuse(p)) {
+- /* Note that we cannot even look at prev unless it is not inuse */
+- mchunkptr prv = prev_chunk(p);
+- assert(next_chunk(prv) == p);
+- __do_check_free_chunk(prv);
++ /* Note that we cannot even look at prev unless it is not inuse */
++ mchunkptr prv = prev_chunk(p);
++ assert(next_chunk(prv) == p);
++ __do_check_free_chunk(prv);
+ }
+
+ if (next == av->top) {
+- assert(prev_inuse(next));
+- assert(chunksize(next) >= MINSIZE);
++ assert(prev_inuse(next));
++ assert(chunksize(next) >= MINSIZE);
+ }
+ else if (!inuse(next))
+- __do_check_free_chunk(next);
++ __do_check_free_chunk(next);
+ }
+
+ /* Properties of chunks recycled from fastbins */
+@@ -198,14 +195,14 @@ void __do_check_malloced_chunk(mchunkptr
+ __do_check_remalloced_chunk(p, s);
+
+ /*
+- ... plus, must obey implementation invariant that prev_inuse is
+- always true of any allocated chunk; i.e., that each allocated
+- chunk borders either a previously allocated and still in-use
+- chunk, or the base of its memory arena. This is ensured
+- by making all allocations from the the `lowest' part of any found
+- chunk. This does not necessarily hold however for chunks
+- recycled via fastbins.
+- */
++ ... plus, must obey implementation invariant that prev_inuse is
++ always true of any allocated chunk; i.e., that each allocated
++ chunk borders either a previously allocated and still in-use
++ chunk, or the base of its memory arena. This is ensured
++ by making all allocations from the the `lowest' part of any found
++ chunk. This does not necessarily hold however for chunks
++ recycled via fastbins.
++ */
+
+ assert(prev_inuse(p));
+ }
+@@ -243,7 +240,7 @@ void __do_check_malloc_state(void)
+
+ /* cannot run remaining checks until fully initialized */
+ if (av->top == 0 || av->top == initial_top(av))
+- return;
++ return;
+
+ /* pagesize is a power of 2 */
+ assert((av->pagesize & (av->pagesize-1)) == 0);
+@@ -256,64 +253,64 @@ void __do_check_malloc_state(void)
+ max_fast_bin = fastbin_index(av->max_fast);
+
+ for (i = 0; i < NFASTBINS; ++i) {
+- p = av->fastbins[i];
++ p = av->fastbins[i];
+
+- /* all bins past max_fast are empty */
+- if (i > max_fast_bin)
+- assert(p == 0);
+-
+- while (p != 0) {
+- /* each chunk claims to be inuse */
+- __do_check_inuse_chunk(p);
+- total += chunksize(p);
+- /* chunk belongs in this bin */
+- assert(fastbin_index(chunksize(p)) == i);
+- p = p->fd;
+- }
++ /* all bins past max_fast are empty */
++ if (i > max_fast_bin)
++ assert(p == 0);
++
++ while (p != 0) {
++ /* each chunk claims to be inuse */
++ __do_check_inuse_chunk(p);
++ total += chunksize(p);
++ /* chunk belongs in this bin */
++ assert(fastbin_index(chunksize(p)) == i);
++ p = p->fd;
++ }
+ }
+
+ if (total != 0)
+- assert(have_fastchunks(av));
++ assert(have_fastchunks(av));
+ else if (!have_fastchunks(av))
+- assert(total == 0);
++ assert(total == 0);
+
+ /* check normal bins */
+ for (i = 1; i < NBINS; ++i) {
+- b = bin_at(av,i);
++ b = bin_at(av,i);
+
+- /* binmap is accurate (except for bin 1 == unsorted_chunks) */
+- if (i >= 2) {
+- binbit = get_binmap(av,i);
+- empty = last(b) == b;
+- if (!binbit)
+- assert(empty);
+- else if (!empty)
+- assert(binbit);
+- }
+-
+- for (p = last(b); p != b; p = p->bk) {
+- /* each chunk claims to be free */
+- __do_check_free_chunk(p);
+- size = chunksize(p);
+- total += size;
+- if (i >= 2) {
+- /* chunk belongs in bin */
+- idx = bin_index(size);
+- assert(idx == i);
+- /* lists are sorted */
+- if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
+- assert(p->bk == b ||
+- (unsigned long)chunksize(p->bk) >=
+- (unsigned long)chunksize(p));
+- }
+- }
+- /* chunk is followed by a legal chain of inuse chunks */
+- for (q = next_chunk(p);
+- (q != av->top && inuse(q) &&
+- (unsigned long)(chunksize(q)) >= MINSIZE);
+- q = next_chunk(q))
+- __do_check_inuse_chunk(q);
+- }
++ /* binmap is accurate (except for bin 1 == unsorted_chunks) */
++ if (i >= 2) {
++ binbit = get_binmap(av,i);
++ empty = last(b) == b;
++ if (!binbit)
++ assert(empty);
++ else if (!empty)
++ assert(binbit);
++ }
++
++ for (p = last(b); p != b; p = p->bk) {
++ /* each chunk claims to be free */
++ __do_check_free_chunk(p);
++ size = chunksize(p);
++ total += size;
++ if (i >= 2) {
++ /* chunk belongs in bin */
++ idx = bin_index(size);
++ assert(idx == i);
++ /* lists are sorted */
++ if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
++ assert(p->bk == b ||
++ (unsigned long)chunksize(p->bk) >=
++ (unsigned long)chunksize(p));
++ }
++ }
++ /* chunk is followed by a legal chain of inuse chunks */
++ for (q = next_chunk(p);
++ (q != av->top && inuse(q) &&
++ (unsigned long)(chunksize(q)) >= MINSIZE);
++ q = next_chunk(q))
++ __do_check_inuse_chunk(q);
++ }
+ }
+
+ /* top chunk is OK */
+@@ -326,13 +323,13 @@ void __do_check_malloc_state(void)
+ assert(av->n_mmaps <= av->max_n_mmaps);
+
+ assert((unsigned long)(av->sbrked_mem) <=
+- (unsigned long)(av->max_sbrked_mem));
++ (unsigned long)(av->max_sbrked_mem));
+
+ assert((unsigned long)(av->mmapped_mem) <=
+- (unsigned long)(av->max_mmapped_mem));
++ (unsigned long)(av->max_mmapped_mem));
+
+ assert((unsigned long)(av->max_total_mem) >=
+- (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));
++ (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));
+ }
+ #endif
+
+@@ -370,84 +367,84 @@ static void* __malloc_alloc(size_t nb, m
+ size_t pagemask = av->pagesize - 1;
+
+ /*
+- If there is space available in fastbins, consolidate and retry
+- malloc from scratch rather than getting memory from system. This
+- can occur only if nb is in smallbin range so we didn't consolidate
+- upon entry to malloc. It is much easier to handle this case here
+- than in malloc proper.
+- */
++ If there is space available in fastbins, consolidate and retry
++ malloc from scratch rather than getting memory from system. This
++ can occur only if nb is in smallbin range so we didn't consolidate
++ upon entry to malloc. It is much easier to handle this case here
++ than in malloc proper.
++ */
+
+ if (have_fastchunks(av)) {
+- assert(in_smallbin_range(nb));
+- __malloc_consolidate(av);
+- return malloc(nb - MALLOC_ALIGN_MASK);
++ assert(in_smallbin_range(nb));
++ __malloc_consolidate(av);
++ return malloc(nb - MALLOC_ALIGN_MASK);
+ }
+
+
+ /*
+- If have mmap, and the request size meets the mmap threshold, and
+- the system supports mmap, and there are few enough currently
+- allocated mmapped regions, try to directly map this request
+- rather than expanding top.
+- */
++ If have mmap, and the request size meets the mmap threshold, and
++ the system supports mmap, and there are few enough currently
++ allocated mmapped regions, try to directly map this request
++ rather than expanding top.
++ */
+
+ if ((unsigned long)(nb) >= (unsigned long)(av->mmap_threshold) &&
+ (av->n_mmaps < av->n_mmaps_max)) {
+
+- char* mm; /* return value from mmap call*/
+-
+- /*
+- Round up size to nearest page. For mmapped chunks, the overhead
+- is one (sizeof(size_t)) unit larger than for normal chunks, because there
+- is no following chunk whose prev_size field could be used.
+- */
+- size = (nb + (sizeof(size_t)) + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
+-
+- /* Don't try if size wraps around 0 */
+- if ((unsigned long)(size) > (unsigned long)(nb)) {
+-
+- mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE));
+-
+- if (mm != (char*)(MORECORE_FAILURE)) {
++ char* mm; /* return value from mmap call*/
+
+ /*
+- The offset to the start of the mmapped region is stored
+- in the prev_size field of the chunk. This allows us to adjust
+- returned start address to meet alignment requirements here
+- and in memalign(), and still be able to compute proper
+- address argument for later munmap in free() and realloc().
+- */
+-
+- front_misalign = (size_t)chunk2mem(mm) & MALLOC_ALIGN_MASK;
+- if (front_misalign > 0) {
+- correction = MALLOC_ALIGNMENT - front_misalign;
+- p = (mchunkptr)(mm + correction);
+- p->prev_size = correction;
+- set_head(p, (size - correction) |IS_MMAPPED);
+- }
+- else {
+- p = (mchunkptr)mm;
+- p->prev_size = 0;
+- set_head(p, size|IS_MMAPPED);
+- }
++ Round up size to nearest page. For mmapped chunks, the overhead
++ is one (sizeof(size_t)) unit larger than for normal chunks, because there
++ is no following chunk whose prev_size field could be used.
++ */
++ size = (nb + (sizeof(size_t)) + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
++
++ /* Don't try if size wraps around 0 */
++ if ((unsigned long)(size) > (unsigned long)(nb)) {
++
++ mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE));
++
++ if (mm != (char*)(MORECORE_FAILURE)) {
++
++ /*
++ The offset to the start of the mmapped region is stored
++ in the prev_size field of the chunk. This allows us to adjust
++ returned start address to meet alignment requirements here
++ and in memalign(), and still be able to compute proper
++ address argument for later munmap in free() and realloc().
++ */
++
++ front_misalign = (size_t)chunk2mem(mm) & MALLOC_ALIGN_MASK;
++ if (front_misalign > 0) {
++ correction = MALLOC_ALIGNMENT - front_misalign;
++ p = (mchunkptr)(mm + correction);
++ p->prev_size = correction;
++ set_head(p, (size - correction) |IS_MMAPPED);
++ }
++ else {
++ p = (mchunkptr)mm;
++ p->prev_size = 0;
++ set_head(p, size|IS_MMAPPED);
++ }
++
++ /* update statistics */
++
++ if (++av->n_mmaps > av->max_n_mmaps)
++ av->max_n_mmaps = av->n_mmaps;
++
++ sum = av->mmapped_mem += size;
++ if (sum > (unsigned long)(av->max_mmapped_mem))
++ av->max_mmapped_mem = sum;
++ sum += av->sbrked_mem;
++ if (sum > (unsigned long)(av->max_total_mem))
++ av->max_total_mem = sum;
+
+- /* update statistics */
++ check_chunk(p);
+
+- if (++av->n_mmaps > av->max_n_mmaps)
+- av->max_n_mmaps = av->n_mmaps;
+-
+- sum = av->mmapped_mem += size;
+- if (sum > (unsigned long)(av->max_mmapped_mem))
+- av->max_mmapped_mem = sum;
+- sum += av->sbrked_mem;
+- if (sum > (unsigned long)(av->max_total_mem))
+- av->max_total_mem = sum;
+-
+- check_chunk(p);
+-
+- return chunk2mem(p);
+- }
+- }
++ return chunk2mem(p);
++ }
++ }
+ }
+
+ /* Record incoming configuration of top */
+@@ -462,8 +459,8 @@ static void* __malloc_alloc(size_t nb, m
+ * be at least MINSIZE and to have prev_inuse set. */
+
+ assert((old_top == initial_top(av) && old_size == 0) ||
+- ((unsigned long) (old_size) >= MINSIZE &&
+- prev_inuse(old_top)));
++ ((unsigned long) (old_size) >= MINSIZE &&
++ prev_inuse(old_top)));
+
+ /* Precondition: not enough current space to satisfy nb request */
+ assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
+@@ -477,272 +474,272 @@ static void* __malloc_alloc(size_t nb, m
+ size = nb + av->top_pad + MINSIZE;
+
+ /*
+- If contiguous, we can subtract out existing space that we hope to
+- combine with new space. We add it back later only if
+- we don't actually get contiguous space.
+- */
++ If contiguous, we can subtract out existing space that we hope to
++ combine with new space. We add it back later only if
++ we don't actually get contiguous space.
++ */
+
+ if (contiguous(av))
+- size -= old_size;
++ size -= old_size;
+
+ /*
+- Round to a multiple of page size.
+- If MORECORE is not contiguous, this ensures that we only call it
+- with whole-page arguments. And if MORECORE is contiguous and
+- this is not first time through, this preserves page-alignment of
+- previous calls. Otherwise, we correct to page-align below.
+- */
++ Round to a multiple of page size.
++ If MORECORE is not contiguous, this ensures that we only call it
++ with whole-page arguments. And if MORECORE is contiguous and
++ this is not first time through, this preserves page-alignment of
++ previous calls. Otherwise, we correct to page-align below.
++ */
+
+ size = (size + pagemask) & ~pagemask;
+
+ /*
+- Don't try to call MORECORE if argument is so big as to appear
+- negative. Note that since mmap takes size_t arg, it may succeed
+- below even if we cannot call MORECORE.
+- */
++ Don't try to call MORECORE if argument is so big as to appear
++ negative. Note that since mmap takes size_t arg, it may succeed
++ below even if we cannot call MORECORE.
++ */
+
+ if (size > 0)
+- brk = (char*)(MORECORE(size));
++ brk = (char*)(MORECORE(size));
+
+ /*
+- If have mmap, try using it as a backup when MORECORE fails or
+- cannot be used. This is worth doing on systems that have "holes" in
+- address space, so sbrk cannot extend to give contiguous space, but
+- space is available elsewhere. Note that we ignore mmap max count
+- and threshold limits, since the space will not be used as a
+- segregated mmap region.
+- */
++ If have mmap, try using it as a backup when MORECORE fails or
++ cannot be used. This is worth doing on systems that have "holes" in
++ address space, so sbrk cannot extend to give contiguous space, but
++ space is available elsewhere. Note that we ignore mmap max count
++ and threshold limits, since the space will not be used as a
++ segregated mmap region.
++ */
+
+ if (brk == (char*)(MORECORE_FAILURE)) {
+
+- /* Cannot merge with old top, so add its size back in */
+- if (contiguous(av))
+- size = (size + old_size + pagemask) & ~pagemask;
+-
+- /* If we are relying on mmap as backup, then use larger units */
+- if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
+- size = MMAP_AS_MORECORE_SIZE;
+-
+- /* Don't try if size wraps around 0 */
+- if ((unsigned long)(size) > (unsigned long)(nb)) {
+-
+- brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE));
+-
+- if (brk != (char*)(MORECORE_FAILURE)) {
+-
+- /* We do not need, and cannot use, another sbrk call to find end */
+- snd_brk = brk + size;
+-
+- /* Record that we no longer have a contiguous sbrk region.
+- After the first time mmap is used as backup, we do not
+- ever rely on contiguous space since this could incorrectly
+- bridge regions.
+- */
+- set_noncontiguous(av);
+- }
+- }
++ /* Cannot merge with old top, so add its size back in */
++ if (contiguous(av))
++ size = (size + old_size + pagemask) & ~pagemask;
++
++ /* If we are relying on mmap as backup, then use larger units */
++ if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
++ size = MMAP_AS_MORECORE_SIZE;
++
++ /* Don't try if size wraps around 0 */
++ if ((unsigned long)(size) > (unsigned long)(nb)) {
++
++ brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE));
++
++ if (brk != (char*)(MORECORE_FAILURE)) {
++
++ /* We do not need, and cannot use, another sbrk call to find end */
++ snd_brk = brk + size;
++
++ /* Record that we no longer have a contiguous sbrk region.
++ After the first time mmap is used as backup, we do not
++ ever rely on contiguous space since this could incorrectly
++ bridge regions.
++ */
++ set_noncontiguous(av);
++ }
++ }
+ }
+
+ if (brk != (char*)(MORECORE_FAILURE)) {
+- av->sbrked_mem += size;
++ av->sbrked_mem += size;
+
+- /*
+- If MORECORE extends previous space, we can likewise extend top size.
+- */
+-
+- if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
+- set_head(old_top, (size + old_size) | PREV_INUSE);
+- }
+-
+- /*
+- Otherwise, make adjustments:
+-
+- * If the first time through or noncontiguous, we need to call sbrk
+- just to find out where the end of memory lies.
+-
+- * We need to ensure that all returned chunks from malloc will meet
+- MALLOC_ALIGNMENT
+-
+- * If there was an intervening foreign sbrk, we need to adjust sbrk
+- request size to account for fact that we will not be able to
+- combine new space with existing space in old_top.
+-
+- * Almost all systems internally allocate whole pages at a time, in
+- which case we might as well use the whole last page of request.
+- So we allocate enough more memory to hit a page boundary now,
+- which in turn causes future contiguous calls to page-align.
+- */
+-
+- else {
+- front_misalign = 0;
+- end_misalign = 0;
+- correction = 0;
+- aligned_brk = brk;
+-
+- /*
+- If MORECORE returns an address lower than we have seen before,
+- we know it isn't really contiguous. This and some subsequent
+- checks help cope with non-conforming MORECORE functions and
+- the presence of "foreign" calls to MORECORE from outside of
+- malloc or by other threads. We cannot guarantee to detect
+- these in all cases, but cope with the ones we do detect.
+- */
+- if (contiguous(av) && old_size != 0 && brk < old_end) {
+- set_noncontiguous(av);
+- }
+-
+- /* handle contiguous cases */
+- if (contiguous(av)) {
+-
+- /* We can tolerate forward non-contiguities here (usually due
+- to foreign calls) but treat them as part of our space for
+- stats reporting. */
+- if (old_size != 0)
+- av->sbrked_mem += brk - old_end;
+-
+- /* Guarantee alignment of first new chunk made from this space */
+-
+- front_misalign = (size_t)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+- if (front_misalign > 0) {
+-
+- /*
+- Skip over some bytes to arrive at an aligned position.
+- We don't need to specially mark these wasted front bytes.
+- They will never be accessed anyway because
+- prev_inuse of av->top (and any chunk created from its start)
+- is always true after initialization.
+- */
++ /*
++ If MORECORE extends previous space, we can likewise extend top size.
++ */
+
+- correction = MALLOC_ALIGNMENT - front_misalign;
+- aligned_brk += correction;
++ if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
++ set_head(old_top, (size + old_size) | PREV_INUSE);
+ }
+
+ /*
+- If this isn't adjacent to existing space, then we will not
+- be able to merge with old_top space, so must add to 2nd request.
+- */
+-
+- correction += old_size;
+-
+- /* Extend the end address to hit a page boundary */
+- end_misalign = (size_t)(brk + size + correction);
+- correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
+-
+- assert(correction >= 0);
+- snd_brk = (char*)(MORECORE(correction));
+-
+- if (snd_brk == (char*)(MORECORE_FAILURE)) {
+- /*
+- If can't allocate correction, try to at least find out current
+- brk. It might be enough to proceed without failing.
+- */
+- correction = 0;
+- snd_brk = (char*)(MORECORE(0));
+- }
+- else if (snd_brk < brk) {
+- /*
+- If the second call gives noncontiguous space even though
+- it says it won't, the only course of action is to ignore
+- results of second call, and conservatively estimate where
+- the first call left us. Also set noncontiguous, so this
+- won't happen again, leaving at most one hole.
+-
+- Note that this check is intrinsically incomplete. Because
+- MORECORE is allowed to give more space than we ask for,
+- there is no reliable way to detect a noncontiguity
+- producing a forward gap for the second call.
+- */
+- snd_brk = brk + size;
+- correction = 0;
+- set_noncontiguous(av);
+- }
+-
+- }
+-
+- /* handle non-contiguous cases */
+- else {
+- /* MORECORE/mmap must correctly align */
+- assert(aligned_OK(chunk2mem(brk)));
+-
+- /* Find out current end of memory */
+- if (snd_brk == (char*)(MORECORE_FAILURE)) {
+- snd_brk = (char*)(MORECORE(0));
+- av->sbrked_mem += snd_brk - brk - size;
+- }
+- }
+-
+- /* Adjust top based on results of second sbrk */
+- if (snd_brk != (char*)(MORECORE_FAILURE)) {
+- av->top = (mchunkptr)aligned_brk;
+- set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
+- av->sbrked_mem += correction;
++ Otherwise, make adjustments:
+
+- /*
+- If not the first time through, we either have a
+- gap due to foreign sbrk or a non-contiguous region. Insert a
+- double fencepost at old_top to prevent consolidation with space
+- we don't own. These fenceposts are artificial chunks that are
+- marked as inuse and are in any case too small to use. We need
+- two to make sizes and alignments work out.
+- */
+-
+- if (old_size != 0) {
+- /* Shrink old_top to insert fenceposts, keeping size a
+- multiple of MALLOC_ALIGNMENT. We know there is at least
+- enough space in old_top to do this.
+- */
+- old_size = (old_size - 3*(sizeof(size_t))) & ~MALLOC_ALIGN_MASK;
+- set_head(old_top, old_size | PREV_INUSE);
+-
+- /*
+- Note that the following assignments completely overwrite
+- old_top when old_size was previously MINSIZE. This is
+- intentional. We need the fencepost, even if old_top otherwise gets
+- lost.
+- */
+- chunk_at_offset(old_top, old_size )->size =
+- (sizeof(size_t))|PREV_INUSE;
+-
+- chunk_at_offset(old_top, old_size + (sizeof(size_t)))->size =
+- (sizeof(size_t))|PREV_INUSE;
+-
+- /* If possible, release the rest, suppressing trimming. */
+- if (old_size >= MINSIZE) {
+- size_t tt = av->trim_threshold;
+- av->trim_threshold = (size_t)(-1);
+- free(chunk2mem(old_top));
+- av->trim_threshold = tt;
+- }
+- }
+- }
+- }
+-
+- /* Update statistics */
+- sum = av->sbrked_mem;
+- if (sum > (unsigned long)(av->max_sbrked_mem))
+- av->max_sbrked_mem = sum;
+-
+- sum += av->mmapped_mem;
+- if (sum > (unsigned long)(av->max_total_mem))
+- av->max_total_mem = sum;
+-
+- check_malloc_state();
+-
+- /* finally, do the allocation */
+-
+- p = av->top;
+- size = chunksize(p);
+-
+- /* check that one of the above allocation paths succeeded */
+- if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
+- remainder_size = size - nb;
+- remainder = chunk_at_offset(p, nb);
+- av->top = remainder;
+- set_head(p, nb | PREV_INUSE);
+- set_head(remainder, remainder_size | PREV_INUSE);
+- check_malloced_chunk(p, nb);
+- return chunk2mem(p);
+- }
++ * If the first time through or noncontiguous, we need to call sbrk
++ just to find out where the end of memory lies.
++
++ * We need to ensure that all returned chunks from malloc will meet
++ MALLOC_ALIGNMENT
++
++ * If there was an intervening foreign sbrk, we need to adjust sbrk
++ request size to account for fact that we will not be able to
++ combine new space with existing space in old_top.
++
++ * Almost all systems internally allocate whole pages at a time, in
++ which case we might as well use the whole last page of request.
++ So we allocate enough more memory to hit a page boundary now,
++ which in turn causes future contiguous calls to page-align.
++ */
++
++ else {
++ front_misalign = 0;
++ end_misalign = 0;
++ correction = 0;
++ aligned_brk = brk;
++
++ /*
++ If MORECORE returns an address lower than we have seen before,
++ we know it isn't really contiguous. This and some subsequent
++ checks help cope with non-conforming MORECORE functions and
++ the presence of "foreign" calls to MORECORE from outside of
++ malloc or by other threads. We cannot guarantee to detect
++ these in all cases, but cope with the ones we do detect.
++ */
++ if (contiguous(av) && old_size != 0 && brk < old_end) {
++ set_noncontiguous(av);
++ }
++
++ /* handle contiguous cases */
++ if (contiguous(av)) {
++
++ /* We can tolerate forward non-contiguities here (usually due
++ to foreign calls) but treat them as part of our space for
++ stats reporting. */
++ if (old_size != 0)
++ av->sbrked_mem += brk - old_end;
++
++ /* Guarantee alignment of first new chunk made from this space */
++
++ front_misalign = (size_t)chunk2mem(brk) & MALLOC_ALIGN_MASK;
++ if (front_misalign > 0) {
++
++ /*
++ Skip over some bytes to arrive at an aligned position.
++ We don't need to specially mark these wasted front bytes.
++ They will never be accessed anyway because
++ prev_inuse of av->top (and any chunk created from its start)
++ is always true after initialization.
++ */
++
++ correction = MALLOC_ALIGNMENT - front_misalign;
++ aligned_brk += correction;
++ }
++
++ /*
++ If this isn't adjacent to existing space, then we will not
++ be able to merge with old_top space, so must add to 2nd request.
++ */
++
++ correction += old_size;
++
++ /* Extend the end address to hit a page boundary */
++ end_misalign = (size_t)(brk + size + correction);
++ correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
++
++ assert(correction >= 0);
++ snd_brk = (char*)(MORECORE(correction));
++
++ if (snd_brk == (char*)(MORECORE_FAILURE)) {
++ /*
++ If can't allocate correction, try to at least find out current
++ brk. It might be enough to proceed without failing.
++ */
++ correction = 0;
++ snd_brk = (char*)(MORECORE(0));
++ }
++ else if (snd_brk < brk) {
++ /*
++ If the second call gives noncontiguous space even though
++ it says it won't, the only course of action is to ignore
++ results of second call, and conservatively estimate where
++ the first call left us. Also set noncontiguous, so this
++ won't happen again, leaving at most one hole.
++
++ Note that this check is intrinsically incomplete. Because
++ MORECORE is allowed to give more space than we ask for,
++ there is no reliable way to detect a noncontiguity
++ producing a forward gap for the second call.
++ */
++ snd_brk = brk + size;
++ correction = 0;
++ set_noncontiguous(av);
++ }
++
++ }
++
++ /* handle non-contiguous cases */
++ else {
++ /* MORECORE/mmap must correctly align */
++ assert(aligned_OK(chunk2mem(brk)));
++
++ /* Find out current end of memory */
++ if (snd_brk == (char*)(MORECORE_FAILURE)) {
++ snd_brk = (char*)(MORECORE(0));
++ av->sbrked_mem += snd_brk - brk - size;
++ }
++ }
++
++ /* Adjust top based on results of second sbrk */
++ if (snd_brk != (char*)(MORECORE_FAILURE)) {
++ av->top = (mchunkptr)aligned_brk;
++ set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
++ av->sbrked_mem += correction;
++
++ /*
++ If not the first time through, we either have a
++ gap due to foreign sbrk or a non-contiguous region. Insert a
++ double fencepost at old_top to prevent consolidation with space
++ we don't own. These fenceposts are artificial chunks that are
++ marked as inuse and are in any case too small to use. We need
++ two to make sizes and alignments work out.
++ */
++
++ if (old_size != 0) {
++ /* Shrink old_top to insert fenceposts, keeping size a
++ multiple of MALLOC_ALIGNMENT. We know there is at least
++ enough space in old_top to do this.
++ */
++ old_size = (old_size - 3*(sizeof(size_t))) & ~MALLOC_ALIGN_MASK;
++ set_head(old_top, old_size | PREV_INUSE);
++
++ /*
++ Note that the following assignments completely overwrite
++ old_top when old_size was previously MINSIZE. This is
++ intentional. We need the fencepost, even if old_top otherwise gets
++ lost.
++ */
++ chunk_at_offset(old_top, old_size )->size =
++ (sizeof(size_t))|PREV_INUSE;
++
++ chunk_at_offset(old_top, old_size + (sizeof(size_t)))->size =
++ (sizeof(size_t))|PREV_INUSE;
++
++ /* If possible, release the rest, suppressing trimming. */
++ if (old_size >= MINSIZE) {
++ size_t tt = av->trim_threshold;
++ av->trim_threshold = (size_t)(-1);
++ free(chunk2mem(old_top));
++ av->trim_threshold = tt;
++ }
++ }
++ }
++ }
++
++ /* Update statistics */
++ sum = av->sbrked_mem;
++ if (sum > (unsigned long)(av->max_sbrked_mem))
++ av->max_sbrked_mem = sum;
++
++ sum += av->mmapped_mem;
++ if (sum > (unsigned long)(av->max_total_mem))
++ av->max_total_mem = sum;
++
++ check_malloc_state();
++
++ /* finally, do the allocation */
++
++ p = av->top;
++ size = chunksize(p);
++
++ /* check that one of the above allocation paths succeeded */
++ if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
++ remainder_size = size - nb;
++ remainder = chunk_at_offset(p, nb);
++ av->top = remainder;
++ set_head(p, nb | PREV_INUSE);
++ set_head(remainder, remainder_size | PREV_INUSE);
++ check_malloced_chunk(p, nb);
++ return chunk2mem(p);
++ }
+
+ }
+
+@@ -767,25 +764,25 @@ static int __malloc_largebin_index(unsig
+ #if defined(__GNUC__) && defined(i386)
+
+ __asm__("bsrl %1,%0\n\t"
+- : "=r" (m)
+- : "g" (x));
++ : "=r" (m)
++ : "g" (x));
+
+ #else
+ {
+- /*
+- Based on branch-free nlz algorithm in chapter 5 of Henry
+- S. Warren Jr's book "Hacker's Delight".
+- */
+-
+- unsigned int n = ((x - 0x100) >> 16) & 8;
+- x <<= n;
+- m = ((x - 0x1000) >> 16) & 4;
+- n += m;
+- x <<= m;
+- m = ((x - 0x4000) >> 16) & 2;
+- n += m;
+- x = (x << m) >> 14;
+- m = 13 - n + (x & ~(x>>1));
++ /*
++ Based on branch-free nlz algorithm in chapter 5 of Henry
++ S. Warren Jr's book "Hacker's Delight".
++ */
++
++ unsigned int n = ((x - 0x100) >> 16) & 8;
++ x <<= n;
++ m = ((x - 0x1000) >> 16) & 4;
++ n += m;
++ x <<= m;
++ m = ((x - 0x4000) >> 16) & 2;
++ n += m;
++ x = (x << m) >> 14;
++ m = 13 - n + (x & ~(x>>1));
+ }
+ #endif
+
+@@ -826,69 +823,70 @@ void* malloc(size_t bytes)
+ mchunkptr fwd; /* misc temp for linking */
+ mchunkptr bck; /* misc temp for linking */
+ void * sysmem;
++ void * retval;
+
+ #if !defined(__MALLOC_GLIBC_COMPAT__)
+ if (!bytes) return NULL;
+ #endif
+
+- LOCK;
++ __MALLOC_LOCK;
+ av = get_malloc_state();
+ /*
+- Convert request size to internal form by adding (sizeof(size_t)) bytes
+- overhead plus possibly more to obtain necessary alignment and/or
+- to obtain a size of at least MINSIZE, the smallest allocatable
+- size. Also, checked_request2size traps (returning 0) request sizes
+- that are so large that they wrap around zero when padded and
+- aligned.
+- */
++ Convert request size to internal form by adding (sizeof(size_t)) bytes
++ overhead plus possibly more to obtain necessary alignment and/or
++ to obtain a size of at least MINSIZE, the smallest allocatable
++ size. Also, checked_request2size traps (returning 0) request sizes
++ that are so large that they wrap around zero when padded and
++ aligned.
++ */
+
+ checked_request2size(bytes, nb);
+
+ /*
+- Bypass search if no frees yet
+- */
++ Bypass search if no frees yet
++ */
+ if (!have_anychunks(av)) {
+- if (av->max_fast == 0) /* initialization check */
+- __malloc_consolidate(av);
+- goto use_top;
++ if (av->max_fast == 0) /* initialization check */
++ __malloc_consolidate(av);
++ goto use_top;
+ }
+
+ /*
+- If the size qualifies as a fastbin, first check corresponding bin.
+- */
++ If the size qualifies as a fastbin, first check corresponding bin.
++ */
+
+ if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
+- fb = &(av->fastbins[(fastbin_index(nb))]);
+- if ( (victim = *fb) != 0) {
+- *fb = victim->fd;
+- check_remalloced_chunk(victim, nb);
+- UNLOCK;
+- return chunk2mem(victim);
+- }
++ fb = &(av->fastbins[(fastbin_index(nb))]);
++ if ( (victim = *fb) != 0) {
++ *fb = victim->fd;
++ check_remalloced_chunk(victim, nb);
++ retval = chunk2mem(victim);
++ goto DONE;
++ }
+ }
+
+ /*
+- If a small request, check regular bin. Since these "smallbins"
+- hold one size each, no searching within bins is necessary.
+- (For a large request, we need to wait until unsorted chunks are
+- processed to find best fit. But for small ones, fits are exact
+- anyway, so we can check now, which is faster.)
+- */
++ If a small request, check regular bin. Since these "smallbins"
++ hold one size each, no searching within bins is necessary.
++ (For a large request, we need to wait until unsorted chunks are
++ processed to find best fit. But for small ones, fits are exact
++ anyway, so we can check now, which is faster.)
++ */
+
+ if (in_smallbin_range(nb)) {
+- idx = smallbin_index(nb);
+- bin = bin_at(av,idx);
++ idx = smallbin_index(nb);
++ bin = bin_at(av,idx);
+
+- if ( (victim = last(bin)) != bin) {
+- bck = victim->bk;
+- set_inuse_bit_at_offset(victim, nb);
+- bin->bk = bck;
+- bck->fd = bin;
+-
+- check_malloced_chunk(victim, nb);
+- UNLOCK;
+- return chunk2mem(victim);
+- }
++ if ( (victim = last(bin)) != bin) {
++ bck = victim->bk;
++ set_inuse_bit_at_offset(victim, nb);
++ bin->bk = bck;
++ bck->fd = bin;
++
++ check_malloced_chunk(victim, nb);
++ retval = chunk2mem(victim);
++ goto DONE;
++ }
+ }
+
+ /* If this is a large request, consolidate fastbins before continuing.
+@@ -899,154 +897,154 @@ void* malloc(size_t bytes)
+ large requests, but less often mixtures, so consolidation is not
+ invoked all that often in most programs. And the programs that
+ it is called frequently in otherwise tend to fragment.
+- */
++ */
+
+ else {
+- idx = __malloc_largebin_index(nb);
+- if (have_fastchunks(av))
+- __malloc_consolidate(av);
++ idx = __malloc_largebin_index(nb);
++ if (have_fastchunks(av))
++ __malloc_consolidate(av);
+ }
+
+ /*
+- Process recently freed or remaindered chunks, taking one only if
+- it is exact fit, or, if this a small request, the chunk is remainder from
+- the most recent non-exact fit. Place other traversed chunks in
+- bins. Note that this step is the only place in any routine where
+- chunks are placed in bins.
+- */
++ Process recently freed or remaindered chunks, taking one only if
++ it is exact fit, or, if this a small request, the chunk is remainder from
++ the most recent non-exact fit. Place other traversed chunks in
++ bins. Note that this step is the only place in any routine where
++ chunks are placed in bins.
++ */
+
+ while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
+- bck = victim->bk;
+- size = chunksize(victim);
++ bck = victim->bk;
++ size = chunksize(victim);
++
++ /* If a small request, try to use last remainder if it is the
++ only chunk in unsorted bin. This helps promote locality for
++ runs of consecutive small requests. This is the only
++ exception to best-fit, and applies only when there is
++ no exact fit for a small chunk.
++ */
++
++ if (in_smallbin_range(nb) &&
++ bck == unsorted_chunks(av) &&
++ victim == av->last_remainder &&
++ (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
++
++ /* split and reattach remainder */
++ remainder_size = size - nb;
++ remainder = chunk_at_offset(victim, nb);
++ unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
++ av->last_remainder = remainder;
++ remainder->bk = remainder->fd = unsorted_chunks(av);
++
++ set_head(victim, nb | PREV_INUSE);
++ set_head(remainder, remainder_size | PREV_INUSE);
++ set_foot(remainder, remainder_size);
++
++ check_malloced_chunk(victim, nb);
++ retval = chunk2mem(victim);
++ goto DONE;
++ }
++
++ /* remove from unsorted list */
++ unsorted_chunks(av)->bk = bck;
++ bck->fd = unsorted_chunks(av);
++
++ /* Take now instead of binning if exact fit */
++
++ if (size == nb) {
++ set_inuse_bit_at_offset(victim, size);
++ check_malloced_chunk(victim, nb);
++ retval = chunk2mem(victim);
++ goto DONE;
++ }
++
++ /* place chunk in bin */
+
+- /* If a small request, try to use last remainder if it is the
+- only chunk in unsorted bin. This helps promote locality for
+- runs of consecutive small requests. This is the only
+- exception to best-fit, and applies only when there is
+- no exact fit for a small chunk.
+- */
+-
+- if (in_smallbin_range(nb) &&
+- bck == unsorted_chunks(av) &&
+- victim == av->last_remainder &&
+- (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
+-
+- /* split and reattach remainder */
+- remainder_size = size - nb;
+- remainder = chunk_at_offset(victim, nb);
+- unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
+- av->last_remainder = remainder;
+- remainder->bk = remainder->fd = unsorted_chunks(av);
+-
+- set_head(victim, nb | PREV_INUSE);
+- set_head(remainder, remainder_size | PREV_INUSE);
+- set_foot(remainder, remainder_size);
+-
+- check_malloced_chunk(victim, nb);
+- UNLOCK;
+- return chunk2mem(victim);
+- }
+-
+- /* remove from unsorted list */
+- unsorted_chunks(av)->bk = bck;
+- bck->fd = unsorted_chunks(av);
+-
+- /* Take now instead of binning if exact fit */
+-
+- if (size == nb) {
+- set_inuse_bit_at_offset(victim, size);
+- check_malloced_chunk(victim, nb);
+- UNLOCK;
+- return chunk2mem(victim);
+- }
+-
+- /* place chunk in bin */
+-
+- if (in_smallbin_range(size)) {
+- victim_index = smallbin_index(size);
+- bck = bin_at(av, victim_index);
+- fwd = bck->fd;
+- }
+- else {
+- victim_index = __malloc_largebin_index(size);
+- bck = bin_at(av, victim_index);
+- fwd = bck->fd;
+-
+- if (fwd != bck) {
+- /* if smaller than smallest, place first */
+- if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
+- fwd = bck;
+- bck = bck->bk;
+- }
+- else if ((unsigned long)(size) >=
+- (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
+-
+- /* maintain large bins in sorted order */
+- size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
+- while ((unsigned long)(size) < (unsigned long)(fwd->size))
+- fwd = fwd->fd;
+- bck = fwd->bk;
+- }
+- }
+- }
+-
+- mark_bin(av, victim_index);
+- victim->bk = bck;
+- victim->fd = fwd;
+- fwd->bk = victim;
+- bck->fd = victim;
++ if (in_smallbin_range(size)) {
++ victim_index = smallbin_index(size);
++ bck = bin_at(av, victim_index);
++ fwd = bck->fd;
++ }
++ else {
++ victim_index = __malloc_largebin_index(size);
++ bck = bin_at(av, victim_index);
++ fwd = bck->fd;
++
++ if (fwd != bck) {
++ /* if smaller than smallest, place first */
++ if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
++ fwd = bck;
++ bck = bck->bk;
++ }
++ else if ((unsigned long)(size) >=
++ (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
++
++ /* maintain large bins in sorted order */
++ size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
++ while ((unsigned long)(size) < (unsigned long)(fwd->size))
++ fwd = fwd->fd;
++ bck = fwd->bk;
++ }
++ }
++ }
++
++ mark_bin(av, victim_index);
++ victim->bk = bck;
++ victim->fd = fwd;
++ fwd->bk = victim;
++ bck->fd = victim;
+ }
+
+ /*
+- If a large request, scan through the chunks of current bin to
+- find one that fits. (This will be the smallest that fits unless
+- FIRST_SORTED_BIN_SIZE has been changed from default.) This is
+- the only step where an unbounded number of chunks might be
+- scanned without doing anything useful with them. However the
+- lists tend to be short.
+- */
++ If a large request, scan through the chunks of current bin to
++ find one that fits. (This will be the smallest that fits unless
++ FIRST_SORTED_BIN_SIZE has been changed from default.) This is
++ the only step where an unbounded number of chunks might be
++ scanned without doing anything useful with them. However the
++ lists tend to be short.
++ */
+
+ if (!in_smallbin_range(nb)) {
+- bin = bin_at(av, idx);
+-
+- for (victim = last(bin); victim != bin; victim = victim->bk) {
+- size = chunksize(victim);
++ bin = bin_at(av, idx);
+
+- if ((unsigned long)(size) >= (unsigned long)(nb)) {
+- remainder_size = size - nb;
+- unlink(victim, bck, fwd);
++ for (victim = last(bin); victim != bin; victim = victim->bk) {
++ size = chunksize(victim);
+
+- /* Exhaust */
+- if (remainder_size < MINSIZE) {
+- set_inuse_bit_at_offset(victim, size);
+- check_malloced_chunk(victim, nb);
+- UNLOCK;
+- return chunk2mem(victim);
+- }
+- /* Split */
+- else {
+- remainder = chunk_at_offset(victim, nb);
+- unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
+- remainder->bk = remainder->fd = unsorted_chunks(av);
+- set_head(victim, nb | PREV_INUSE);
+- set_head(remainder, remainder_size | PREV_INUSE);
+- set_foot(remainder, remainder_size);
+- check_malloced_chunk(victim, nb);
+- UNLOCK;
+- return chunk2mem(victim);
++ if ((unsigned long)(size) >= (unsigned long)(nb)) {
++ remainder_size = size - nb;
++ unlink(victim, bck, fwd);
++
++ /* Exhaust */
++ if (remainder_size < MINSIZE) {
++ set_inuse_bit_at_offset(victim, size);
++ check_malloced_chunk(victim, nb);
++ retval = chunk2mem(victim);
++ goto DONE;
++ }
++ /* Split */
++ else {
++ remainder = chunk_at_offset(victim, nb);
++ unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
++ remainder->bk = remainder->fd = unsorted_chunks(av);
++ set_head(victim, nb | PREV_INUSE);
++ set_head(remainder, remainder_size | PREV_INUSE);
++ set_foot(remainder, remainder_size);
++ check_malloced_chunk(victim, nb);
++ retval = chunk2mem(victim);
++ goto DONE;
++ }
++ }
+ }
+- }
+- }
+ }
+
+ /*
+- Search for a chunk by scanning bins, starting with next largest
+- bin. This search is strictly by best-fit; i.e., the smallest
+- (with ties going to approximately the least recently used) chunk
+- that fits is selected.
++ Search for a chunk by scanning bins, starting with next largest
++ bin. This search is strictly by best-fit; i.e., the smallest
++ (with ties going to approximately the least recently used) chunk
++ that fits is selected.
+
+- The bitmap avoids needing to check that most blocks are nonempty.
+- */
++ The bitmap avoids needing to check that most blocks are nonempty.
++ */
+
+ ++idx;
+ bin = bin_at(av,idx);
+@@ -1056,109 +1054,111 @@ void* malloc(size_t bytes)
+
+ for (;;) {
+
+- /* Skip rest of block if there are no more set bits in this block. */
+- if (bit > map || bit == 0) {
+- do {
+- if (++block >= BINMAPSIZE) /* out of bins */
+- goto use_top;
+- } while ( (map = av->binmap[block]) == 0);
+-
+- bin = bin_at(av, (block << BINMAPSHIFT));
+- bit = 1;
+- }
+-
+- /* Advance to bin with set bit. There must be one. */
+- while ((bit & map) == 0) {
+- bin = next_bin(bin);
+- bit <<= 1;
+- assert(bit != 0);
+- }
+-
+- /* Inspect the bin. It is likely to be non-empty */
+- victim = last(bin);
+-
+- /* If a false alarm (empty bin), clear the bit. */
+- if (victim == bin) {
+- av->binmap[block] = map &= ~bit; /* Write through */
+- bin = next_bin(bin);
+- bit <<= 1;
+- }
+-
+- else {
+- size = chunksize(victim);
+-
+- /* We know the first chunk in this bin is big enough to use. */
+- assert((unsigned long)(size) >= (unsigned long)(nb));
+-
+- remainder_size = size - nb;
+-
+- /* unlink */
+- bck = victim->bk;
+- bin->bk = bck;
+- bck->fd = bin;
+-
+- /* Exhaust */
+- if (remainder_size < MINSIZE) {
+- set_inuse_bit_at_offset(victim, size);
+- check_malloced_chunk(victim, nb);
+- UNLOCK;
+- return chunk2mem(victim);
+- }
++ /* Skip rest of block if there are no more set bits in this block. */
++ if (bit > map || bit == 0) {
++ do {
++ if (++block >= BINMAPSIZE) /* out of bins */
++ goto use_top;
++ } while ( (map = av->binmap[block]) == 0);
+
+- /* Split */
+- else {
+- remainder = chunk_at_offset(victim, nb);
++ bin = bin_at(av, (block << BINMAPSHIFT));
++ bit = 1;
++ }
+
+- unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
+- remainder->bk = remainder->fd = unsorted_chunks(av);
+- /* advertise as last remainder */
+- if (in_smallbin_range(nb))
+- av->last_remainder = remainder;
++ /* Advance to bin with set bit. There must be one. */
++ while ((bit & map) == 0) {
++ bin = next_bin(bin);
++ bit <<= 1;
++ assert(bit != 0);
++ }
+
+- set_head(victim, nb | PREV_INUSE);
+- set_head(remainder, remainder_size | PREV_INUSE);
+- set_foot(remainder, remainder_size);
+- check_malloced_chunk(victim, nb);
+- UNLOCK;
+- return chunk2mem(victim);
+- }
+- }
++ /* Inspect the bin. It is likely to be non-empty */
++ victim = last(bin);
++
++ /* If a false alarm (empty bin), clear the bit. */
++ if (victim == bin) {
++ av->binmap[block] = map &= ~bit; /* Write through */
++ bin = next_bin(bin);
++ bit <<= 1;
++ }
++
++ else {
++ size = chunksize(victim);
++
++ /* We know the first chunk in this bin is big enough to use. */
++ assert((unsigned long)(size) >= (unsigned long)(nb));
++
++ remainder_size = size - nb;
++
++ /* unlink */
++ bck = victim->bk;
++ bin->bk = bck;
++ bck->fd = bin;
++
++ /* Exhaust */
++ if (remainder_size < MINSIZE) {
++ set_inuse_bit_at_offset(victim, size);
++ check_malloced_chunk(victim, nb);
++ retval = chunk2mem(victim);
++ goto DONE;
++ }
++
++ /* Split */
++ else {
++ remainder = chunk_at_offset(victim, nb);
++
++ unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
++ remainder->bk = remainder->fd = unsorted_chunks(av);
++ /* advertise as last remainder */
++ if (in_smallbin_range(nb))
++ av->last_remainder = remainder;
++
++ set_head(victim, nb | PREV_INUSE);
++ set_head(remainder, remainder_size | PREV_INUSE);
++ set_foot(remainder, remainder_size);
++ check_malloced_chunk(victim, nb);
++ retval = chunk2mem(victim);
++ goto DONE;
++ }
++ }
+ }
+
+-use_top:
++ use_top:
+ /*
+- If large enough, split off the chunk bordering the end of memory
+- (held in av->top). Note that this is in accord with the best-fit
+- search rule. In effect, av->top is treated as larger (and thus
+- less well fitting) than any other available chunk since it can
+- be extended to be as large as necessary (up to system
+- limitations).
+-
+- We require that av->top always exists (i.e., has size >=
+- MINSIZE) after initialization, so if it would otherwise be
+- exhuasted by current request, it is replenished. (The main
+- reason for ensuring it exists is that we may need MINSIZE space
+- to put in fenceposts in sysmalloc.)
+- */
++ If large enough, split off the chunk bordering the end of memory
++ (held in av->top). Note that this is in accord with the best-fit
++ search rule. In effect, av->top is treated as larger (and thus
++ less well fitting) than any other available chunk since it can
++ be extended to be as large as necessary (up to system
++ limitations).
++
++ We require that av->top always exists (i.e., has size >=
++ MINSIZE) after initialization, so if it would otherwise be
++ exhuasted by current request, it is replenished. (The main
++ reason for ensuring it exists is that we may need MINSIZE space
++ to put in fenceposts in sysmalloc.)
++ */
+
+ victim = av->top;
+ size = chunksize(victim);
+
+ if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
+- remainder_size = size - nb;
+- remainder = chunk_at_offset(victim, nb);
+- av->top = remainder;
+- set_head(victim, nb | PREV_INUSE);
+- set_head(remainder, remainder_size | PREV_INUSE);
+-
+- check_malloced_chunk(victim, nb);
+- UNLOCK;
+- return chunk2mem(victim);
++ remainder_size = size - nb;
++ remainder = chunk_at_offset(victim, nb);
++ av->top = remainder;
++ set_head(victim, nb | PREV_INUSE);
++ set_head(remainder, remainder_size | PREV_INUSE);
++
++ check_malloced_chunk(victim, nb);
++ retval = chunk2mem(victim);
++ goto DONE;
+ }
+
+ /* If no space in top, relay to handle system-dependent cases */
+ sysmem = __malloc_alloc(nb, av);
+- UNLOCK;
+- return sysmem;
++ retval = sysmem;
++ DONE:
++ __MALLOC_UNLOCK;
++ return retval;
+ }
+
+diff --git a/libc/stdlib/malloc-standard/malloc.h b/libc/stdlib/malloc-standard/malloc.h
+index fbc1492..14a0dd9 100644
+--- a/libc/stdlib/malloc-standard/malloc.h
++++ b/libc/stdlib/malloc-standard/malloc.h
+@@ -22,16 +22,12 @@
+ #include <malloc.h>
+ #include <stdlib.h>
+
++#include <bits/uClibc_mutex.h>
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-extern pthread_mutex_t __malloc_lock;
+-# define LOCK __pthread_mutex_lock(&__malloc_lock)
+-# define UNLOCK __pthread_mutex_unlock(&__malloc_lock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
++__UCLIBC_MUTEX_EXTERN(__malloc_lock);
++
++#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
++#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
+
+
+
+diff --git a/libc/stdlib/malloc-standard/mallopt.c b/libc/stdlib/malloc-standard/mallopt.c
+index e287920..41aa614 100644
+--- a/libc/stdlib/malloc-standard/mallopt.c
++++ b/libc/stdlib/malloc-standard/mallopt.c
+@@ -8,7 +8,7 @@
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+- ftp://gee.cs.oswego.edu/pub/misc/malloc.c
++ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+@@ -25,40 +25,40 @@ int mallopt(int param_number, int value)
+
+ ret = 0;
+
+- LOCK;
++ __MALLOC_LOCK;
+ av = get_malloc_state();
+ /* Ensure initialization/consolidation */
+ __malloc_consolidate(av);
+
+ switch(param_number) {
+- case M_MXFAST:
+- if (value >= 0 && value <= MAX_FAST_SIZE) {
+- set_max_fast(av, value);
+- ret = 1;
+- }
+- break;
+-
+- case M_TRIM_THRESHOLD:
+- av->trim_threshold = value;
+- ret = 1;
+- break;
+-
+- case M_TOP_PAD:
+- av->top_pad = value;
+- ret = 1;
+- break;
+-
+- case M_MMAP_THRESHOLD:
+- av->mmap_threshold = value;
+- ret = 1;
+- break;
+-
+- case M_MMAP_MAX:
+- av->n_mmaps_max = value;
+- ret = 1;
+- break;
++ case M_MXFAST:
++ if (value >= 0 && value <= MAX_FAST_SIZE) {
++ set_max_fast(av, value);
++ ret = 1;
++ }
++ break;
++
++ case M_TRIM_THRESHOLD:
++ av->trim_threshold = value;
++ ret = 1;
++ break;
++
++ case M_TOP_PAD:
++ av->top_pad = value;
++ ret = 1;
++ break;
++
++ case M_MMAP_THRESHOLD:
++ av->mmap_threshold = value;
++ ret = 1;
++ break;
++
++ case M_MMAP_MAX:
++ av->n_mmaps_max = value;
++ ret = 1;
++ break;
+ }
+- UNLOCK;
++ __MALLOC_UNLOCK;
+ return ret;
+ }
+
+diff --git a/libc/stdlib/malloc-standard/memalign.c b/libc/stdlib/malloc-standard/memalign.c
+index bd95362..e78d752 100644
+--- a/libc/stdlib/malloc-standard/memalign.c
++++ b/libc/stdlib/malloc-standard/memalign.c
+@@ -8,7 +8,7 @@
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+- ftp://gee.cs.oswego.edu/pub/misc/malloc.c
++ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+@@ -35,6 +35,7 @@ void* memalign(size_t alignment, size_t
+ mchunkptr remainder; /* spare room at end to split off */
+ unsigned long remainder_size; /* its size */
+ size_t size;
++ void *retval;
+
+ /* If need less alignment than we give anyway, just relay to malloc */
+
+@@ -46,12 +47,12 @@ void* memalign(size_t alignment, size_t
+
+ /* Make sure alignment is power of 2 (in case MINSIZE is not). */
+ if ((alignment & (alignment - 1)) != 0) {
+- size_t a = MALLOC_ALIGNMENT * 2;
+- while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
+- alignment = a;
++ size_t a = MALLOC_ALIGNMENT * 2;
++ while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
++ alignment = a;
+ }
+
+- LOCK;
++ __MALLOC_LOCK;
+ checked_request2size(bytes, nb);
+
+ /* Strategy: find a spot within that chunk that meets the alignment
+@@ -63,64 +64,67 @@ void* memalign(size_t alignment, size_t
+ m = (char*)(malloc(nb + alignment + MINSIZE));
+
+ if (m == 0) {
+- UNLOCK;
+- return 0; /* propagate failure */
++ retval = 0; /* propagate failure */
++ goto DONE;
+ }
+
+ p = mem2chunk(m);
+
+ if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
+
+- /*
+- Find an aligned spot inside chunk. Since we need to give back
+- leading space in a chunk of at least MINSIZE, if the first
+- calculation places us at a spot with less than MINSIZE leader,
+- we can move to the next aligned spot -- we've allocated enough
+- total room so that this is always possible.
+- */
+-
+- brk = (char*)mem2chunk((unsigned long)(((unsigned long)(m + alignment - 1)) &
+- -((signed long) alignment)));
+- if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
+- brk += alignment;
+-
+- newp = (mchunkptr)brk;
+- leadsize = brk - (char*)(p);
+- newsize = chunksize(p) - leadsize;
+-
+- /* For mmapped chunks, just adjust offset */
+- if (chunk_is_mmapped(p)) {
+- newp->prev_size = p->prev_size + leadsize;
+- set_head(newp, newsize|IS_MMAPPED);
+- UNLOCK;
+- return chunk2mem(newp);
+- }
+-
+- /* Otherwise, give back leader, use the rest */
+- set_head(newp, newsize | PREV_INUSE);
+- set_inuse_bit_at_offset(newp, newsize);
+- set_head_size(p, leadsize);
+- free(chunk2mem(p));
+- p = newp;
++ /*
++ Find an aligned spot inside chunk. Since we need to give back
++ leading space in a chunk of at least MINSIZE, if the first
++ calculation places us at a spot with less than MINSIZE leader,
++ we can move to the next aligned spot -- we've allocated enough
++ total room so that this is always possible.
++ */
++
++ brk = (char*)mem2chunk((unsigned long)(((unsigned long)(m + alignment - 1)) &
++ -((signed long) alignment)));
++ if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
++ brk += alignment;
++
++ newp = (mchunkptr)brk;
++ leadsize = brk - (char*)(p);
++ newsize = chunksize(p) - leadsize;
++
++ /* For mmapped chunks, just adjust offset */
++ if (chunk_is_mmapped(p)) {
++ newp->prev_size = p->prev_size + leadsize;
++ set_head(newp, newsize|IS_MMAPPED);
++ retval = chunk2mem(newp);
++ goto DONE;
++ }
++
++ /* Otherwise, give back leader, use the rest */
++ set_head(newp, newsize | PREV_INUSE);
++ set_inuse_bit_at_offset(newp, newsize);
++ set_head_size(p, leadsize);
++ free(chunk2mem(p));
++ p = newp;
+
+- assert (newsize >= nb &&
+- (((unsigned long)(chunk2mem(p))) % alignment) == 0);
++ assert (newsize >= nb &&
++ (((unsigned long)(chunk2mem(p))) % alignment) == 0);
+ }
+
+ /* Also give back spare room at the end */
+ if (!chunk_is_mmapped(p)) {
+- size = chunksize(p);
+- if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
+- remainder_size = size - nb;
+- remainder = chunk_at_offset(p, nb);
+- set_head(remainder, remainder_size | PREV_INUSE);
+- set_head_size(p, nb);
+- free(chunk2mem(remainder));
+- }
++ size = chunksize(p);
++ if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
++ remainder_size = size - nb;
++ remainder = chunk_at_offset(p, nb);
++ set_head(remainder, remainder_size | PREV_INUSE);
++ set_head_size(p, nb);
++ free(chunk2mem(remainder));
++ }
+ }
+
+ check_inuse_chunk(p);
+- UNLOCK;
+- return chunk2mem(p);
++ retval = chunk2mem(p);
++
++ DONE:
++ __MALLOC_UNLOCK;
++ return retval;
+ }
+
+diff --git a/libc/stdlib/malloc-standard/realloc.c b/libc/stdlib/malloc-standard/realloc.c
+index 1950130..9ca4b26 100644
+--- a/libc/stdlib/malloc-standard/realloc.c
++++ b/libc/stdlib/malloc-standard/realloc.c
+@@ -8,7 +8,7 @@
+ VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
+
+ Note: There may be an updated version of this malloc obtainable at
+- ftp://gee.cs.oswego.edu/pub/misc/malloc.c
++ ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+ Check before installing!
+
+ Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
+@@ -23,14 +23,14 @@ void* realloc(void* oldmem, size_t bytes
+ {
+ mstate av;
+
+- size_t nb; /* padded request size */
++ size_t nb; /* padded request size */
+
+ mchunkptr oldp; /* chunk corresponding to oldmem */
+- size_t oldsize; /* its size */
++ size_t oldsize; /* its size */
+
+ mchunkptr newp; /* chunk to return */
+- size_t newsize; /* its size */
+- void* newmem; /* corresponding user mem */
++ size_t newsize; /* its size */
++ void* newmem; /* corresponding user mem */
+
+ mchunkptr next; /* next contiguous chunk after oldp */
+
+@@ -40,21 +40,23 @@ void* realloc(void* oldmem, size_t bytes
+ mchunkptr bck; /* misc temp for linking */
+ mchunkptr fwd; /* misc temp for linking */
+
+- unsigned long copysize; /* bytes to copy */
++ unsigned long copysize; /* bytes to copy */
+ unsigned int ncopies; /* size_t words to copy */
+- size_t* s; /* copy source */
+- size_t* d; /* copy destination */
++ size_t* s; /* copy source */
++ size_t* d; /* copy destination */
++
++ void *retval;
+
+
+ /* Check for special cases. */
+ if (! oldmem)
+- return malloc(bytes);
++ return malloc(bytes);
+ if (! bytes) {
+- free (oldmem);
+- return malloc(bytes);
++ free (oldmem);
++ return malloc(bytes);
+ }
+
+- LOCK;
++ __MALLOC_LOCK;
+ av = get_malloc_state();
+ checked_request2size(bytes, nb);
+
+@@ -65,173 +67,176 @@ void* realloc(void* oldmem, size_t bytes
+
+ if (!chunk_is_mmapped(oldp)) {
+
+- if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
+- /* already big enough; split below */
+- newp = oldp;
+- newsize = oldsize;
+- }
+-
+- else {
+- next = chunk_at_offset(oldp, oldsize);
+-
+- /* Try to expand forward into top */
+- if (next == av->top &&
+- (unsigned long)(newsize = oldsize + chunksize(next)) >=
+- (unsigned long)(nb + MINSIZE)) {
+- set_head_size(oldp, nb);
+- av->top = chunk_at_offset(oldp, nb);
+- set_head(av->top, (newsize - nb) | PREV_INUSE);
+- UNLOCK;
+- return chunk2mem(oldp);
+- }
+-
+- /* Try to expand forward into next chunk; split off remainder below */
+- else if (next != av->top &&
+- !inuse(next) &&
+- (unsigned long)(newsize = oldsize + chunksize(next)) >=
+- (unsigned long)(nb)) {
+- newp = oldp;
+- unlink(next, bck, fwd);
+- }
+-
+- /* allocate, copy, free */
+- else {
+- newmem = malloc(nb - MALLOC_ALIGN_MASK);
+- if (newmem == 0) {
+- UNLOCK;
+- return 0; /* propagate failure */
+- }
+-
+- newp = mem2chunk(newmem);
+- newsize = chunksize(newp);
+-
+- /*
+- Avoid copy if newp is next chunk after oldp.
+- */
+- if (newp == next) {
+- newsize += oldsize;
+- newp = oldp;
++ if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
++ /* already big enough; split below */
++ newp = oldp;
++ newsize = oldsize;
+ }
++
+ else {
+- /*
+- Unroll copy of <= 36 bytes (72 if 8byte sizes)
+- We know that contents have an odd number of
+- size_t-sized words; minimally 3.
+- */
+-
+- copysize = oldsize - (sizeof(size_t));
+- s = (size_t*)(oldmem);
+- d = (size_t*)(newmem);
+- ncopies = copysize / sizeof(size_t);
+- assert(ncopies >= 3);
+-
+- if (ncopies > 9)
+- memcpy(d, s, copysize);
+-
+- else {
+- *(d+0) = *(s+0);
+- *(d+1) = *(s+1);
+- *(d+2) = *(s+2);
+- if (ncopies > 4) {
+- *(d+3) = *(s+3);
+- *(d+4) = *(s+4);
+- if (ncopies > 6) {
+- *(d+5) = *(s+5);
+- *(d+6) = *(s+6);
+- if (ncopies > 8) {
+- *(d+7) = *(s+7);
+- *(d+8) = *(s+8);
++ next = chunk_at_offset(oldp, oldsize);
++
++ /* Try to expand forward into top */
++ if (next == av->top &&
++ (unsigned long)(newsize = oldsize + chunksize(next)) >=
++ (unsigned long)(nb + MINSIZE)) {
++ set_head_size(oldp, nb);
++ av->top = chunk_at_offset(oldp, nb);
++ set_head(av->top, (newsize - nb) | PREV_INUSE);
++ retval = chunk2mem(oldp);
++ goto DONE;
++ }
++
++ /* Try to expand forward into next chunk; split off remainder below */
++ else if (next != av->top &&
++ !inuse(next) &&
++ (unsigned long)(newsize = oldsize + chunksize(next)) >=
++ (unsigned long)(nb)) {
++ newp = oldp;
++ unlink(next, bck, fwd);
++ }
++
++ /* allocate, copy, free */
++ else {
++ newmem = malloc(nb - MALLOC_ALIGN_MASK);
++ if (newmem == 0) {
++ retval = 0; /* propagate failure */
++ goto DONE;
++ }
++
++ newp = mem2chunk(newmem);
++ newsize = chunksize(newp);
++
++ /*
++ Avoid copy if newp is next chunk after oldp.
++ */
++ if (newp == next) {
++ newsize += oldsize;
++ newp = oldp;
++ }
++ else {
++ /*
++ Unroll copy of <= 36 bytes (72 if 8byte sizes)
++ We know that contents have an odd number of
++ size_t-sized words; minimally 3.
++ */
++
++ copysize = oldsize - (sizeof(size_t));
++ s = (size_t*)(oldmem);
++ d = (size_t*)(newmem);
++ ncopies = copysize / sizeof(size_t);
++ assert(ncopies >= 3);
++
++ if (ncopies > 9)
++ memcpy(d, s, copysize);
++
++ else {
++ *(d+0) = *(s+0);
++ *(d+1) = *(s+1);
++ *(d+2) = *(s+2);
++ if (ncopies > 4) {
++ *(d+3) = *(s+3);
++ *(d+4) = *(s+4);
++ if (ncopies > 6) {
++ *(d+5) = *(s+5);
++ *(d+6) = *(s+6);
++ if (ncopies > 8) {
++ *(d+7) = *(s+7);
++ *(d+8) = *(s+8);
++ }
++ }
++ }
++ }
++
++ free(oldmem);
++ check_inuse_chunk(newp);
++ retval = chunk2mem(newp);
++ goto DONE;
+ }
+- }
+ }
+- }
++ }
++
++ /* If possible, free extra space in old or extended chunk */
++
++ assert((unsigned long)(newsize) >= (unsigned long)(nb));
++
++ remainder_size = newsize - nb;
+
+- free(oldmem);
+- check_inuse_chunk(newp);
+- UNLOCK;
+- return chunk2mem(newp);
+- }
+- }
+- }
+-
+- /* If possible, free extra space in old or extended chunk */
+-
+- assert((unsigned long)(newsize) >= (unsigned long)(nb));
+-
+- remainder_size = newsize - nb;
+-
+- if (remainder_size < MINSIZE) { /* not enough extra to split off */
+- set_head_size(newp, newsize);
+- set_inuse_bit_at_offset(newp, newsize);
+- }
+- else { /* split remainder */
+- remainder = chunk_at_offset(newp, nb);
+- set_head_size(newp, nb);
+- set_head(remainder, remainder_size | PREV_INUSE);
+- /* Mark remainder as inuse so free() won't complain */
+- set_inuse_bit_at_offset(remainder, remainder_size);
+- free(chunk2mem(remainder));
+- }
+-
+- check_inuse_chunk(newp);
+- UNLOCK;
+- return chunk2mem(newp);
++ if (remainder_size < MINSIZE) { /* not enough extra to split off */
++ set_head_size(newp, newsize);
++ set_inuse_bit_at_offset(newp, newsize);
++ }
++ else { /* split remainder */
++ remainder = chunk_at_offset(newp, nb);
++ set_head_size(newp, nb);
++ set_head(remainder, remainder_size | PREV_INUSE);
++ /* Mark remainder as inuse so free() won't complain */
++ set_inuse_bit_at_offset(remainder, remainder_size);
++ free(chunk2mem(remainder));
++ }
++
++ check_inuse_chunk(newp);
++ retval = chunk2mem(newp);
++ goto DONE;
+ }
+
+ /*
+- Handle mmap cases
+- */
++ Handle mmap cases
++ */
+
+ else {
+- size_t offset = oldp->prev_size;
+- size_t pagemask = av->pagesize - 1;
+- char *cp;
+- unsigned long sum;
+-
+- /* Note the extra (sizeof(size_t)) overhead */
+- newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;
+-
+- /* don't need to remap if still within same page */
+- if (oldsize == newsize - offset) {
+- UNLOCK;
+- return oldmem;
+- }
+-
+- cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
+-
+- if (cp != (char*)MORECORE_FAILURE) {
+-
+- newp = (mchunkptr)(cp + offset);
+- set_head(newp, (newsize - offset)|IS_MMAPPED);
+-
+- assert(aligned_OK(chunk2mem(newp)));
+- assert((newp->prev_size == offset));
+-
+- /* update statistics */
+- sum = av->mmapped_mem += newsize - oldsize;
+- if (sum > (unsigned long)(av->max_mmapped_mem))
+- av->max_mmapped_mem = sum;
+- sum += av->sbrked_mem;
+- if (sum > (unsigned long)(av->max_total_mem))
+- av->max_total_mem = sum;
+-
+- UNLOCK;
+- return chunk2mem(newp);
+- }
+-
+- /* Note the extra (sizeof(size_t)) overhead. */
+- if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
+- newmem = oldmem; /* do nothing */
+- else {
+- /* Must alloc, copy, free. */
+- newmem = malloc(nb - MALLOC_ALIGN_MASK);
+- if (newmem != 0) {
+- memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
+- free(oldmem);
+- }
+- }
+- UNLOCK;
+- return newmem;
++ size_t offset = oldp->prev_size;
++ size_t pagemask = av->pagesize - 1;
++ char *cp;
++ unsigned long sum;
++
++ /* Note the extra (sizeof(size_t)) overhead */
++ newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;
++
++ /* don't need to remap if still within same page */
++ if (oldsize == newsize - offset) {
++ retval = oldmem;
++ goto DONE;
++ }
++
++ cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
++
++ if (cp != (char*)MORECORE_FAILURE) {
++
++ newp = (mchunkptr)(cp + offset);
++ set_head(newp, (newsize - offset)|IS_MMAPPED);
++
++ assert(aligned_OK(chunk2mem(newp)));
++ assert((newp->prev_size == offset));
++
++ /* update statistics */
++ sum = av->mmapped_mem += newsize - oldsize;
++ if (sum > (unsigned long)(av->max_mmapped_mem))
++ av->max_mmapped_mem = sum;
++ sum += av->sbrked_mem;
++ if (sum > (unsigned long)(av->max_total_mem))
++ av->max_total_mem = sum;
++
++ retval = chunk2mem(newp);
++ goto DONE;
++ }
++
++ /* Note the extra (sizeof(size_t)) overhead. */
++ if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
++ newmem = oldmem; /* do nothing */
++ else {
++ /* Must alloc, copy, free. */
++ newmem = malloc(nb - MALLOC_ALIGN_MASK);
++ if (newmem != 0) {
++ memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
++ free(oldmem);
++ }
++ }
++ retval = newmem;
+ }
++
++ DONE:
++ __MALLOC_UNLOCK;
++ return retval;
+ }
+
+diff --git a/libc/stdlib/random.c b/libc/stdlib/random.c
+index b0a00e1..1bd63bc 100644
+--- a/libc/stdlib/random.c
++++ b/libc/stdlib/random.c
+@@ -27,16 +27,14 @@
+ #include <limits.h>
+ #include <stddef.h>
+ #include <stdlib.h>
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
++
+ /* POSIX.1c requires that there is mutual exclusion for the `rand' and
+ `srand' functions to prevent concurrent calls from modifying common
+ data. */
+-static pthread_mutex_t lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+-#else
+-#define __pthread_mutex_lock(x)
+-#define __pthread_mutex_unlock(x)
+-#endif
++
++#include <bits/uClibc_mutex.h>
++
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
+
+ /* An improved random number generation package. In addition to the standard
+ rand()/srand() like interface, this package also has a special state info
+@@ -184,9 +182,9 @@ static struct random_data unsafe_state =
+ for default usage relies on values produced by this routine. */
+ void srandom (unsigned int x)
+ {
+- __pthread_mutex_lock(&lock);
++ __UCLIBC_MUTEX_LOCK(mylock);
+ srandom_r (x, &unsafe_state);
+- __pthread_mutex_unlock(&lock);
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ }
+ weak_alias (srandom, srand)
+
+@@ -205,10 +203,10 @@ char * initstate (unsigned int seed, cha
+ {
+ int32_t *ostate;
+
+- __pthread_mutex_lock(&lock);
++ __UCLIBC_MUTEX_LOCK(mylock);
+ ostate = &unsafe_state.state[-1];
+ initstate_r (seed, arg_state, n, &unsafe_state);
+- __pthread_mutex_unlock(&lock);
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return (char *) ostate;
+ }
+
+@@ -224,11 +222,11 @@ char * setstate (char *arg_state)
+ {
+ int32_t *ostate;
+
+- __pthread_mutex_lock(&lock);
++ __UCLIBC_MUTEX_LOCK(mylock);
+ ostate = &unsafe_state.state[-1];
+ if (setstate_r (arg_state, &unsafe_state) < 0)
+ ostate = NULL;
+- __pthread_mutex_unlock(&lock);
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return (char *) ostate;
+ }
+
+@@ -247,9 +245,9 @@ long int random ()
+ {
+ int32_t retval;
+
+- __pthread_mutex_lock(&lock);
++ __UCLIBC_MUTEX_LOCK(mylock);
+ random_r (&unsafe_state, &retval);
+- __pthread_mutex_unlock(&lock);
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return retval;
+ }
+
+diff --git a/libc/stdlib/setenv.c b/libc/stdlib/setenv.c
+index d0cfe52..2d899cc 100644
+--- a/libc/stdlib/setenv.c
++++ b/libc/stdlib/setenv.c
+@@ -17,7 +17,7 @@
+ 02111-1307 USA.
+
+ modified for uClibc by Erik Andersen <andersen@codepoet.org>
+- */
++*/
+
+ #define _GNU_SOURCE
+ #include <features.h>
+@@ -26,16 +26,9 @@
+ #include <string.h>
+ #include <unistd.h>
+
+-#ifdef __UCLIBC_HAS_THREADS__
+-#include <pthread.h>
+-static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
+-# define LOCK __pthread_mutex_lock(&mylock)
+-# define UNLOCK __pthread_mutex_unlock(&mylock);
+-#else
+-# define LOCK
+-# define UNLOCK
+-#endif
++#include <bits/uClibc_mutex.h>
+
++__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
+
+ /* If this variable is not a null pointer we allocated the current
+ environment. */
+@@ -49,14 +42,15 @@ static char **last_environ;
+ to reuse values once generated for a `setenv' call since we can never
+ free the strings. */
+ int __add_to_environ (const char *name, const char *value,
+- const char *combined, int replace)
++ const char *combined, int replace)
+ {
+ register char **ep;
+ register size_t size;
+ const size_t namelen = strlen (name);
+ const size_t vallen = value != NULL ? strlen (value) + 1 : 0;
++ int rv = -1;
+
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+
+ /* We have to get the pointer now that we have the lock and not earlier
+ since another thread might have created a new environment. */
+@@ -64,72 +58,72 @@ int __add_to_environ (const char *name,
+
+ size = 0;
+ if (ep != NULL) {
+- for (; *ep != NULL; ++ep) {
+- if (!strncmp (*ep, name, namelen) && (*ep)[namelen] == '=')
+- break;
+- else
+- ++size;
+- }
++ for (; *ep != NULL; ++ep) {
++ if (!strncmp (*ep, name, namelen) && (*ep)[namelen] == '=')
++ break;
++ else
++ ++size;
++ }
+ }
+
+ if (ep == NULL || *ep == NULL) {
+- char **new_environ;
++ char **new_environ;
+
+- /* We allocated this space; we can extend it. */
+- new_environ = (char **) realloc (last_environ,
+- (size + 2) * sizeof (char *));
+- if (new_environ == NULL) {
+- UNLOCK;
+- return -1;
+- }
+-
+- /* If the whole entry is given add it. */
+- if (combined != NULL) {
+- /* We must not add the string to the search tree since it belongs
+- to the user. */
+- new_environ[size] = (char *) combined;
+- } else {
+- /* See whether the value is already known. */
+- new_environ[size] = (char *) malloc (namelen + 1 + vallen);
+- if (new_environ[size] == NULL) {
+- __set_errno (ENOMEM);
+- UNLOCK;
+- return -1;
+- }
+-
+- memcpy (new_environ[size], name, namelen);
+- new_environ[size][namelen] = '=';
+- memcpy (&new_environ[size][namelen + 1], value, vallen);
+- }
+-
+- if (__environ != last_environ) {
+- memcpy ((char *) new_environ, (char *) __environ,
+- size * sizeof (char *));
+- }
++ /* We allocated this space; we can extend it. */
++ new_environ = (char **) realloc (last_environ,
++ (size + 2) * sizeof (char *));
++ if (new_environ == NULL) {
++ goto DONE;
++ }
++
++ /* If the whole entry is given add it. */
++ if (combined != NULL) {
++ /* We must not add the string to the search tree since it belongs
++ to the user. */
++ new_environ[size] = (char *) combined;
++ } else {
++ /* See whether the value is already known. */
++ new_environ[size] = (char *) malloc (namelen + 1 + vallen);
++ if (new_environ[size] == NULL) {
++ __set_errno (ENOMEM);
++ goto DONE;
++ }
++
++ memcpy (new_environ[size], name, namelen);
++ new_environ[size][namelen] = '=';
++ memcpy (&new_environ[size][namelen + 1], value, vallen);
++ }
++
++ if (__environ != last_environ) {
++ memcpy ((char *) new_environ, (char *) __environ,
++ size * sizeof (char *));
++ }
+
+- new_environ[size + 1] = NULL;
+- last_environ = __environ = new_environ;
++ new_environ[size + 1] = NULL;
++ last_environ = __environ = new_environ;
+ } else if (replace) {
+- char *np;
++ char *np;
+
+- /* Use the user string if given. */
+- if (combined != NULL) {
+- np = (char *) combined;
+- } else {
+- np = malloc (namelen + 1 + vallen);
+- if (np == NULL) {
+- UNLOCK;
+- return -1;
+- }
+- memcpy (np, name, namelen);
+- np[namelen] = '=';
+- memcpy (&np[namelen + 1], value, vallen);
+- }
+- *ep = np;
+- }
+-
+- UNLOCK;
+- return 0;
++ /* Use the user string if given. */
++ if (combined != NULL) {
++ np = (char *) combined;
++ } else {
++ np = malloc (namelen + 1 + vallen);
++ if (np == NULL) {
++ goto DONE;
++ }
++ memcpy (np, name, namelen);
++ np[namelen] = '=';
++ memcpy (&np[namelen + 1], value, vallen);
++ }
++ *ep = np;
++ }
++
++ rv = 0;
++
++ DONE:
++ __UCLIBC_MUTEX_UNLOCK(mylock);
++ return rv;
+ }
+
+ int setenv (const char *name, const char *value, int replace)
+@@ -143,26 +137,26 @@ int unsetenv (const char *name)
+ char **ep;
+
+ if (name == NULL || *name == '\0' || strchr (name, '=') != NULL) {
+- __set_errno (EINVAL);
+- return -1;
++ __set_errno (EINVAL);
++ return -1;
+ }
+
+ len = strlen (name);
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ ep = __environ;
+ while (*ep != NULL) {
+- if (!strncmp (*ep, name, len) && (*ep)[len] == '=') {
+- /* Found it. Remove this pointer by moving later ones back. */
+- char **dp = ep;
+- do {
+- dp[0] = dp[1];
+- } while (*dp++);
+- /* Continue the loop in case NAME appears again. */
+- } else {
+- ++ep;
+- }
++ if (!strncmp (*ep, name, len) && (*ep)[len] == '=') {
++ /* Found it. Remove this pointer by moving later ones back. */
++ char **dp = ep;
++ do {
++ dp[0] = dp[1];
++ } while (*dp++);
++ /* Continue the loop in case NAME appears again. */
++ } else {
++ ++ep;
++ }
+ }
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return 0;
+ }
+
+@@ -171,15 +165,15 @@ int unsetenv (const char *name)
+ for Fortran 77) requires this function. */
+ int clearenv (void)
+ {
+- LOCK;
++ __UCLIBC_MUTEX_LOCK(mylock);
+ if (__environ == last_environ && __environ != NULL) {
+- /* We allocated this environment so we can free it. */
+- free (__environ);
+- last_environ = NULL;
++ /* We allocated this environment so we can free it. */
++ free (__environ);
++ last_environ = NULL;
+ }
+ /* Clear the environment pointer removes the whole environment. */
+ __environ = NULL;
+- UNLOCK;
++ __UCLIBC_MUTEX_UNLOCK(mylock);
+ return 0;
+ }
+
+@@ -190,10 +184,10 @@ int putenv (char *string)
+ const char *const name_end = strchr (string, '=');
+
+ if (name_end != NULL) {
+- char *name = strndup(string, name_end - string);
+- result = __add_to_environ (name, NULL, string, 1);
+- free(name);
+- return(result);
++ char *name = strndup(string, name_end - string);
++ result = __add_to_environ (name, NULL, string, 1);
++ free(name);
++ return(result);
+ }
+ unsetenv (string);
+ return 0;
+diff --git a/libc/sysdeps/linux/common/bits/uClibc_stdio.h b/libc/sysdeps/linux/common/bits/uClibc_stdio.h
+index 40cd5fe..3c6911e 100644
+--- a/libc/sysdeps/linux/common/bits/uClibc_stdio.h
++++ b/libc/sysdeps/linux/common/bits/uClibc_stdio.h
+@@ -116,9 +116,7 @@
+ #endif
+
+ /**********************************************************************/
+-#ifdef __UCLIBC_HAS_THREADS__
+-/* Need this for pthread_mutex_t. */
+-#include <bits/pthreadtypes.h>
++#include <bits/uClibc_mutex.h>
+
+ /* user_locking
+ * 0 : do auto locking/unlocking
+@@ -132,43 +130,37 @@
+ * This way, we avoid calling the weak lock/unlock functions.
+ */
+
+-#define __STDIO_AUTO_THREADLOCK_VAR int __infunc_user_locking
+-
+-#define __STDIO_AUTO_THREADLOCK(__stream) \
+- if ((__infunc_user_locking = (__stream)->__user_locking) == 0) { \
+- __pthread_mutex_lock(&(__stream)->__lock); \
+- }
+-
+-#define __STDIO_AUTO_THREADUNLOCK(__stream) \
+- if (__infunc_user_locking == 0) { \
+- __pthread_mutex_unlock(&(__stream)->__lock); \
+- }
++#define __STDIO_AUTO_THREADLOCK_VAR \
++ __UCLIBC_MUTEX_AUTO_LOCK_VAR(__infunc_user_locking)
+
+-#define __STDIO_SET_USER_LOCKING(__stream) ((__stream)->__user_locking = 1)
++#define __STDIO_AUTO_THREADLOCK(__stream) \
++ __UCLIBC_MUTEX_AUTO_LOCK((__stream)->__lock, __infunc_user_locking, \
++ (__stream)->__user_locking)
+
+-#define __STDIO_ALWAYS_THREADLOCK(__stream) \
+- __pthread_mutex_lock(&(__stream)->__lock)
++#define __STDIO_AUTO_THREADUNLOCK(__stream) \
++ __UCLIBC_MUTEX_AUTO_UNLOCK((__stream)->__lock, __infunc_user_locking)
+
+-#define __STDIO_ALWAYS_THREADTRYLOCK(__stream) \
+- __pthread_mutex_trylock(&(__stream)->__lock)
++#define __STDIO_ALWAYS_THREADLOCK(__stream) \
++ __UCLIBC_MUTEX_LOCK((__stream)->__lock)
+
+-#define __STDIO_ALWAYS_THREADUNLOCK(__stream) \
+- __pthread_mutex_unlock(&(__stream)->__lock)
++#define __STDIO_ALWAYS_THREADUNLOCK(__stream) \
++ __UCLIBC_MUTEX_UNLOCK((__stream)->__lock)
+
+-#else /* __UCLIBC_HAS_THREADS__ */
++#define __STDIO_ALWAYS_THREADLOCK_CANCEL_UNSAFE(__stream) \
++ __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE((__stream)->__lock)
+
+-#define __STDIO_AUTO_THREADLOCK_VAR ((void)0)
++#define __STDIO_ALWAYS_THREADTRYLOCK_CANCEL_UNSAFE(__stream) \
++ __UCLIBC_MUTEX_TRYLOCK_CANCEL_UNSAFE((__stream)->__lock)
+
+-#define __STDIO_AUTO_THREADLOCK(__stream) ((void)0)
+-#define __STDIO_AUTO_THREADUNLOCK(__stream) ((void)0)
++#define __STDIO_ALWAYS_THREADUNLOCK_CANCEL_UNSAFE(__stream) \
++ __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE((__stream)->__lock)
+
++#ifdef __UCLIBC_HAS_THREADS__
++#define __STDIO_SET_USER_LOCKING(__stream) ((__stream)->__user_locking = 1)
++#else
+ #define __STDIO_SET_USER_LOCKING(__stream) ((void)0)
++#endif
+
+-#define __STDIO_ALWAYS_THREADLOCK(__stream) ((void)0)
+-#define __STDIO_ALWAYS_THREADTRYLOCK(__stream) (0) /* Always succeed. */
+-#define __STDIO_ALWAYS_THREADUNLOCK(__stream) ((void)0)
+-
+-#endif /* __UCLIBC_HAS_THREADS__ */
+ /**********************************************************************/
+
+ #define __STDIO_IOFBF 0 /* Fully buffered. */
+@@ -283,7 +275,7 @@ struct __STDIO_FILE_STRUCT {
+ #endif
+ #ifdef __UCLIBC_HAS_THREADS__
+ int __user_locking;
+- pthread_mutex_t __lock;
++ __UCLIBC_MUTEX(__lock);
+ #endif
+ /* Everything after this is unimplemented... and may be trashed. */
+ #if __STDIO_BUILTIN_BUF_SIZE > 0
+@@ -358,10 +350,14 @@ extern void _stdio_term(void);
+ extern struct __STDIO_FILE_STRUCT *_stdio_openlist;
+
+ #ifdef __UCLIBC_HAS_THREADS__
+-extern pthread_mutex_t _stdio_openlist_lock;
+-extern int _stdio_openlist_delflag;
++__UCLIBC_MUTEX_EXTERN(_stdio_openlist_add_lock);
++#ifdef __STDIO_BUFFERS
++__UCLIBC_MUTEX_EXTERN(_stdio_openlist_del_lock);
++extern volatile int _stdio_openlist_use_count; /* _stdio_openlist_del_lock */
++extern int _stdio_openlist_del_count; /* _stdio_openlist_del_lock */
++#endif
+ extern int _stdio_user_locking;
+-extern void __stdio_init_mutex(pthread_mutex_t *m);
++extern void __stdio_init_mutex(__UCLIBC_MUTEX_TYPE *m);
+ #endif
+
+ #endif
+diff --git a/libc/sysdeps/linux/common/getdents.c b/libc/sysdeps/linux/common/getdents.c
+index ab6a276..23463e5 100644
+--- a/libc/sysdeps/linux/common/getdents.c
++++ b/libc/sysdeps/linux/common/getdents.c
+@@ -30,8 +30,6 @@
+ #include <sys/syscall.h>
+
+
+-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+-
+ struct kernel_dirent
+ {
+ long d_ino;
+diff --git a/libc/sysdeps/linux/common/sigprocmask.c b/libc/sysdeps/linux/common/sigprocmask.c
+index 70ff366..565318d 100644
+--- a/libc/sysdeps/linux/common/sigprocmask.c
++++ b/libc/sysdeps/linux/common/sigprocmask.c
+@@ -23,6 +23,8 @@ int sigprocmask(int how, const sigset_t
+ if (set &&
+ #if (SIG_BLOCK == 0) && (SIG_UNBLOCK == 1) && (SIG_SETMASK == 2)
+ (((unsigned int) how) > 2)
++#elif (SIG_BLOCK == 1) && (SIG_UNBLOCK == 2) && (SIG_SETMASK == 3)
++ (((unsigned int)(how-1)) > 2)
+ #else
+ #warning "compile time assumption violated.. slow path..."
+ ((how != SIG_BLOCK) && (how != SIG_UNBLOCK)
+@@ -48,6 +50,8 @@ int sigprocmask(int how, const sigset_t
+ if (set &&
+ #if (SIG_BLOCK == 0) && (SIG_UNBLOCK == 1) && (SIG_SETMASK == 2)
+ (((unsigned int) how) > 2)
++#elif (SIG_BLOCK == 1) && (SIG_UNBLOCK == 2) && (SIG_SETMASK == 3)
++ (((unsigned int)(how-1)) > 2)
+ #else
+ #warning "compile time assumption violated.. slow path..."
+ ((how != SIG_BLOCK) && (how != SIG_UNBLOCK)
+diff --git a/libc/sysdeps/linux/mips/bits/kernel_sigaction.h b/libc/sysdeps/linux/mips/bits/kernel_sigaction.h
+index b6f52cc..317e5b3 100644
+--- a/libc/sysdeps/linux/mips/bits/kernel_sigaction.h
++++ b/libc/sysdeps/linux/mips/bits/kernel_sigaction.h
+@@ -38,3 +38,6 @@ struct kernel_sigaction {
+ void (*sa_restorer)(void);
+ int s_resv[1]; /* reserved */
+ };
++
++extern int __syscall_rt_sigaction (int, const struct kernel_sigaction *__unbounded,
++ struct kernel_sigaction *__unbounded, size_t);
+diff --git a/libc/sysdeps/linux/mips/pipe.S b/libc/sysdeps/linux/mips/pipe.S
+index c3afae5..cd88074 100644
+--- a/libc/sysdeps/linux/mips/pipe.S
++++ b/libc/sysdeps/linux/mips/pipe.S
+@@ -7,25 +7,36 @@
+ #include <asm/unistd.h>
+ #include <asm/regdef.h>
+
+- .globl pipe
+- .ent pipe, 0
++ .globl pipe
++ .ent pipe, 0
+ pipe:
+- addiu sp,sp,-24
+- sw a0,16(sp)
+- li v0,__NR_pipe
+- syscall
+- beqz a3, 1f
+- la t3, errno
+- sw v0, (t3)
+- li v0, -1
+- b 2f
++ .frame sp, 24, sp
++#ifdef __PIC__
++ .set noreorder
++ .cpload $25
++ .set reorder
++ addiu sp,sp,-24
++ .cprestore 16
++#else
++ addiu sp,sp,-24
++#endif
++ sw a0,16(sp)
++ li v0,__NR_pipe
++ syscall
++ beqz a3, 1f
++#ifdef __PIC__
++ la t0, __syscall_error
++ jr t9
++#else
++ j __syscall_error
++#endif
+ 1:
+- lw a0, 16(sp)
+- sw v0, 0(a0)
+- sw v1, 4(a0)
+- li v0, 0
++ lw a0, 16(sp)
++ sw v0, 0(a0)
++ sw v1, 4(a0)
++ li v0, 0
+ 2:
+- addiu sp,sp,24
+- j ra
+- .end pipe
+- .size pipe,.-pipe
++ addiu sp,sp,24
++ j ra
++ .end pipe
++ .size pipe,.-pipe
+diff --git a/libcrypt/des.c b/libcrypt/des.c
+index 3b49a7a..f7a6be1 100644
+--- a/libcrypt/des.c
++++ b/libcrypt/des.c
+@@ -504,7 +504,7 @@ do_des( u_int32_t l_in, u_int32_t r_in,
+ kl = kl1;
+ kr = kr1;
+ round = 16;
+- while (round--) {
++ do {
+ /*
+ * Expand R to 48 bits (simulate the E-box).
+ */
+@@ -540,7 +540,7 @@ do_des( u_int32_t l_in, u_int32_t r_in,
+ f ^= l;
+ l = r;
+ r = f;
+- }
++ } while (--round);
+ r = l;
+ l = f;
+ }
+diff --git a/libpthread/linuxthreads/ptfork.c b/libpthread/linuxthreads/ptfork.c
+index eb544f3..cfec2b7 100644
+--- a/libpthread/linuxthreads/ptfork.c
++++ b/libpthread/linuxthreads/ptfork.c
+@@ -26,6 +26,15 @@
+ #include "pthread.h"
+ #include "internals.h"
+
++#warning hack alert... should be sufficent for system(), but what about other libc mutexes?
++#include <bits/uClibc_mutex.h>
++
++__UCLIBC_MUTEX_EXTERN(__malloc_lock);
++
++#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
++#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
++#warning hack alert block end
++
+ struct handler_list {
+ void (*handler)(void);
+ struct handler_list * next;
+@@ -91,9 +100,18 @@ pid_t __fork(void)
+ parent = pthread_atfork_parent;
+ pthread_mutex_unlock(&pthread_atfork_lock);
+ pthread_call_handlers(prepare);
++
++#warning hack alert
++ __MALLOC_LOCK;
++
+ pid = __libc_fork();
++
++#warning hack alert
++ __MALLOC_UNLOCK;
++
+ if (pid == 0) {
+ __pthread_reset_main_thread();
++#warning need to reconsider __fresetlockfiles!
+ __fresetlockfiles();
+ pthread_call_handlers(child);
+ } else {
+diff -urN -x .git uClibc-0.9.28/libc/sysdeps/linux/common/bits/uClibc_mutex.h uClibc-mjn3/libc/sysdeps/linux/common/bits/uClibc_mutex.h
+--- uClibc-0.9.28/libc/sysdeps/linux/common/bits/uClibc_mutex.h 1969-12-31 17:00:00.000000000 -0700
++++ uClibc-mjn3/libc/sysdeps/linux/common/bits/uClibc_mutex.h 2006-03-08 11:21:58.000000000 -0700
+@@ -0,0 +1,87 @@
++/* Copyright (C) 2006 Manuel Novoa III <mjn3@codepoet.org>
++ *
++ * GNU Library General Public License (LGPL) version 2 or later.
++ *
++ * Dedicated to Toni. See uClibc/DEDICATION.mjn3 for details.
++ */
++
++#ifndef _UCLIBC_MUTEX_H
++#define _UCLIBC_MUTEX_H
++
++#include <features.h>
++
++#ifdef __UCLIBC_HAS_THREADS__
++
++#include <pthread.h>
++
++#define __UCLIBC_MUTEX_TYPE pthread_mutex_t
++
++#define __UCLIBC_MUTEX(M) pthread_mutex_t M
++#define __UCLIBC_MUTEX_INIT(M,I) pthread_mutex_t M = I
++#define __UCLIBC_MUTEX_STATIC(M,I) static pthread_mutex_t M = I
++#define __UCLIBC_MUTEX_EXTERN(M) extern pthread_mutex_t M
++
++#define __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(M) \
++ __pthread_mutex_lock(&(M))
++
++#define __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE(M) \
++ __pthread_mutex_unlock(&(M))
++
++#define __UCLIBC_MUTEX_TRYLOCK_CANCEL_UNSAFE(M) \
++ __pthread_mutex_trylock(&(M))
++
++#define __UCLIBC_MUTEX_CONDITIONAL_LOCK(M,C) \
++ do { \
++ struct _pthread_cleanup_buffer __infunc_pthread_cleanup_buffer; \
++ if (C) { \
++ _pthread_cleanup_push_defer(&__infunc_pthread_cleanup_buffer, \
++ __pthread_mutex_unlock, \
++ &(M)); \
++ __pthread_mutex_lock(&(M)); \
++ } \
++ ((void)0)
++
++#define __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M,C) \
++ if (C) { \
++ _pthread_cleanup_pop_restore(&__infunc_pthread_cleanup_buffer,1);\
++ } \
++ } while (0)
++
++#define __UCLIBC_MUTEX_AUTO_LOCK_VAR(A) int A
++
++#define __UCLIBC_MUTEX_AUTO_LOCK(M,A,V) \
++ __UCLIBC_MUTEX_CONDITIONAL_LOCK(M,((A=(V)) == 0))
++
++#define __UCLIBC_MUTEX_AUTO_UNLOCK(M,A) \
++ __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M,(A == 0))
++
++#define __UCLIBC_MUTEX_LOCK(M) \
++ __UCLIBC_MUTEX_CONDITIONAL_LOCK(M, 1)
++
++#define __UCLIBC_MUTEX_UNLOCK(M) \
++ __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M, 1)
++
++#else
++
++#define __UCLIBC_MUTEX(M) void *__UCLIBC_MUTEX_DUMMY_ ## M
++#define __UCLIBC_MUTEX_INIT(M,I) extern void *__UCLIBC_MUTEX_DUMMY_ ## M
++#define __UCLIBC_MUTEX_STATIC(M) extern void *__UCLIBC_MUTEX_DUMMY_ ## M
++#define __UCLIBC_MUTEX_EXTERN(M) extern void *__UCLIBC_MUTEX_DUMMY_ ## M
++
++#define __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(M) ((void)0)
++#define __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE(M) ((void)0)
++#define __UCLIBC_MUTEX_TRYLOCK_CANCEL_UNSAFE(M) (0) /* Always succeed? */
++
++#define __UCLIBC_MUTEX_CONDITIONAL_LOCK(M,C) ((void)0)
++#define __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M,C) ((void)0)
++
++#define __UCLIBC_MUTEX_AUTO_LOCK_VAR(A) ((void)0)
++#define __UCLIBC_MUTEX_AUTO_LOCK(M,A,V) ((void)0)
++#define __UCLIBC_MUTEX_AUTO_UNLOCK(M,A) ((void)0)
++
++#define __UCLIBC_MUTEX_LOCK(M) ((void)0)
++#define __UCLIBC_MUTEX_UNLOCK(M) ((void)0)
++
++#endif
++
++#endif /* _UCLIBC_MUTEX_H */
+diff -urN -x .git uClibc-0.9.28/libc/sysdeps/linux/mips/pipe.c uClibc-mjn3/libc/sysdeps/linux/mips/pipe.c
+--- uClibc-0.9.28/libc/sysdeps/linux/mips/pipe.c 2005-08-17 16:49:44.000000000 -0600
++++ uClibc-mjn3/libc/sysdeps/linux/mips/pipe.c 1969-12-31 17:00:00.000000000 -0700
+@@ -1,23 +0,0 @@
+-/* pipe system call for Linux/MIPS */
+-
+-/*see uClibc's sh/pipe.c and glibc-2.2.4's mips/pipe.S */
+-
+-#include <errno.h>
+-#include <unistd.h>
+-#include <syscall.h>
+-
+-int pipe(int *fd)
+-{
+- register long int res __asm__ ("$2"); // v0
+- register long int res2 __asm__ ("$3"); // v1
+-
+- asm ("move\t$4,%2\n\t" // $4 = a0
+- "syscall" /* Perform the system call. */
+- : "=r" (res)
+- : "0" (__NR_pipe), "r" (fd)
+- : "$4", "$7");
+-
+- fd[0] = res;
+- fd[1] = res2;
+- return(0);
+-}