diff --git a/.gitignore b/.gitignore index 0f2f3ddeec1..b75c5d48905 100644 --- a/.gitignore +++ b/.gitignore @@ -81,6 +81,9 @@ perl/Makefile.config /tests/common.sh /tests/dummy /tests/result* +/tests/restricted-innocent +/tests/shell +/tests/shell.drv # /tests/lang/ /tests/lang/*.out diff --git a/Makefile b/Makefile index 834f84b286b..45a3338ed21 100644 --- a/Makefile +++ b/Makefile @@ -5,17 +5,7 @@ makefiles = \ src/libmain/local.mk \ src/libexpr/local.mk \ src/nix/local.mk \ - src/nix-store/local.mk \ - src/nix-instantiate/local.mk \ - src/nix-env/local.mk \ - src/nix-daemon/local.mk \ - src/nix-collect-garbage/local.mk \ - src/nix-copy-closure/local.mk \ - src/nix-prefetch-url/local.mk \ src/resolve-system-dependencies/local.mk \ - src/nix-channel/local.mk \ - src/nix-build/local.mk \ - src/build-remote/local.mk \ scripts/local.mk \ corepkgs/local.mk \ misc/systemd/local.mk \ diff --git a/Makefile.config.in b/Makefile.config.in index a9785dc7395..59730b64638 100644 --- a/Makefile.config.in +++ b/Makefile.config.in @@ -1,4 +1,6 @@ +AR = @AR@ BDW_GC_LIBS = @BDW_GC_LIBS@ +BUILD_SHARED_LIBS = @BUILD_SHARED_LIBS@ CC = @CC@ CFLAGS = @CFLAGS@ CXX = @CXX@ @@ -6,7 +8,6 @@ CXXFLAGS = @CXXFLAGS@ ENABLE_S3 = @ENABLE_S3@ HAVE_SODIUM = @HAVE_SODIUM@ HAVE_READLINE = @HAVE_READLINE@ -HAVE_BROTLI = @HAVE_BROTLI@ HAVE_SECCOMP = @HAVE_SECCOMP@ LIBCURL_LIBS = @LIBCURL_LIBS@ OPENSSL_LIBS = @OPENSSL_LIBS@ @@ -16,9 +17,9 @@ SODIUM_LIBS = @SODIUM_LIBS@ LIBLZMA_LIBS = @LIBLZMA_LIBS@ SQLITE3_LIBS = @SQLITE3_LIBS@ LIBBROTLI_LIBS = @LIBBROTLI_LIBS@ +EDITLINE_LIBS = @EDITLINE_LIBS@ bash = @bash@ bindir = @bindir@ -brotli = @brotli@ lsof = @lsof@ datadir = @datadir@ datarootdir = @datarootdir@ diff --git a/config/config.guess b/config/config.guess index 137bedf2e28..d4fb3213ec7 100755 --- a/config/config.guess +++ b/config/config.guess @@ -1,14 +1,12 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012 Free Software Foundation, Inc. +# Copyright 1992-2018 Free Software Foundation, Inc. -timestamp='2012-08-14' +timestamp='2018-08-02' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or +# the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but @@ -17,24 +15,22 @@ timestamp='2012-08-14' # General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, see . +# along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - - -# Originally written by Per Bothner. Please send patches (context -# diff format) to and include a ChangeLog -# entry. +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). # -# This script attempts to guess a canonical system name similar to -# config.sub. If it succeeds, it prints the system name on stdout, and -# exits with 0. Otherwise, it exits with 1. +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + me=`echo "$0" | sed -e 's,.*/,,'` @@ -43,7 +39,7 @@ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. -Operation modes: +Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit @@ -54,9 +50,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, -2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 -Free Software Foundation, Inc. +Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -90,8 +84,6 @@ if test $# != 0; then exit 1 fi -trap 'exit 1' 1 2 15 - # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a @@ -102,34 +94,39 @@ trap 'exit 1' 1 2 15 # Portable tmp directory creation inspired by the Autoconf team. -set_cc_for_build=' -trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; -trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; -: ${TMPDIR=/tmp} ; - { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || - { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || - { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || - { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; -dummy=$tmp/dummy ; -tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; -case $CC_FOR_BUILD,$HOST_CC,$CC in - ,,) echo "int x;" > $dummy.c ; - for c in cc gcc c89 c99 ; do - if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then - CC_FOR_BUILD="$c"; break ; - fi ; - done ; - if test x"$CC_FOR_BUILD" = x ; then - CC_FOR_BUILD=no_compiler_found ; - fi - ;; - ,,*) CC_FOR_BUILD=$CC ;; - ,*,*) CC_FOR_BUILD=$HOST_CC ;; -esac ; set_cc_for_build= ;' +tmp= +# shellcheck disable=SC2172 +trap 'test -z "$tmp" || rm -fr "$tmp"' 1 2 13 15 +trap 'exitcode=$?; test -z "$tmp" || rm -fr "$tmp"; exit $exitcode' 0 + +set_cc_for_build() { + : "${TMPDIR=/tmp}" + # shellcheck disable=SC2039 + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } + dummy=$tmp/dummy + case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in + ,,) echo "int x;" > "$dummy.c" + for driver in cc gcc c89 c99 ; do + if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then + CC_FOR_BUILD="$driver" + break + fi + done + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; + esac +} # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) -if (test -f /.attbin/uname) >/dev/null 2>&1 ; then +if test -f /.attbin/uname ; then PATH=$PATH:/.attbin ; export PATH fi @@ -138,9 +135,37 @@ UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown +case "$UNAME_SYSTEM" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + set_cc_for_build + cat <<-EOF > "$dummy.c" + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`" + + # If ldd exists, use it to detect musl libc. + if command -v ldd >/dev/null && \ + ldd --version 2>&1 | grep -q ^musl + then + LIBC=musl + fi + ;; +esac + # Note: order is significant - the case branches are not exclusive. -case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in +case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, @@ -153,21 +178,31 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" - UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ - /usr/sbin/$sysctl 2>/dev/null || echo unknown)` - case "${UNAME_MACHINE_ARCH}" in + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + "/sbin/$sysctl" 2>/dev/null || \ + "/usr/sbin/$sysctl" 2>/dev/null || \ + echo unknown)` + case "$UNAME_MACHINE_ARCH" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; - *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + earmv*) + arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` + machine="${arch}${endian}"-unknown + ;; + *) machine="$UNAME_MACHINE_ARCH"-unknown ;; esac # The Operating System including object format, if it has switched - # to ELF recently, or will in the future. - case "${UNAME_MACHINE_ARCH}" in + # to ELF recently (or will in the future) and ABI. + case "$UNAME_MACHINE_ARCH" in + earm*) + os=netbsdelf + ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) - eval $set_cc_for_build + set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then @@ -182,44 +217,67 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in os=netbsd ;; esac + # Determine ABI tags. + case "$UNAME_MACHINE_ARCH" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` + ;; + esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. - case "${UNAME_VERSION}" in + case "$UNAME_VERSION" in Debian*) release='-gnu' ;; *) - release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + release=`echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "${machine}-${os}${release}" + echo "$machine-${os}${release}${abi-}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" + exit ;; + *:MidnightBSD:*:*) + echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" exit ;; *:ekkoBSD:*:*) - echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" exit ;; *:SolidBSD:*:*) - echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" exit ;; macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd${UNAME_RELEASE} + echo powerpc-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:MirBSD:*:*) - echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:Sortix:*:*) + echo "$UNAME_MACHINE"-unknown-sortix + exit ;; + *:Redox:*:*) + echo "$UNAME_MACHINE"-unknown-redox exit ;; + mips:OSF1:*.*) + echo mips-dec-osf1 + exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) @@ -236,63 +294,54 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "EV4.5 (21064)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") - UNAME_MACHINE="alpha" ;; + UNAME_MACHINE=alpha ;; "EV5 (21164)") - UNAME_MACHINE="alphaev5" ;; + UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") - UNAME_MACHINE="alphaev56" ;; + UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") - UNAME_MACHINE="alphapca56" ;; + UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") - UNAME_MACHINE="alphapca57" ;; + UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") - UNAME_MACHINE="alphaev6" ;; + UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") - UNAME_MACHINE="alphaev67" ;; + UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") - UNAME_MACHINE="alphaev68" ;; + UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") - UNAME_MACHINE="alphaev69" ;; + UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") - UNAME_MACHINE="alphaev7" ;; + UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") - UNAME_MACHINE="alphaev79" ;; + UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. - echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`" # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; - Alpha\ *:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # Should we change UNAME_MACHINE based on the output of uname instead - # of the specific Alpha model? - echo alpha-pc-interix - exit ;; - 21064:Windows_NT:50:3) - echo alpha-dec-winnt3.5 - exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-amigaos + echo "$UNAME_MACHINE"-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) - echo ${UNAME_MACHINE}-unknown-morphos + echo "$UNAME_MACHINE"-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition @@ -304,9 +353,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix${UNAME_RELEASE} + echo arm-acorn-riscix"$UNAME_RELEASE" exit ;; - arm:riscos:*:*|arm:RISCOS:*:*) + arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) @@ -331,38 +380,33 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) - echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux${UNAME_RELEASE} + echo i386-pc-auroraux"$UNAME_RELEASE" exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) - eval $set_cc_for_build - SUN_ARCH="i386" - # If there is a compiler, see if it is configured for 64-bit objects. - # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. - # This test works for both compilers. - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - SUN_ARCH="x86_64" - fi - fi - echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + UNAME_REL="`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" + case `isainfo -b` in + 32) + echo i386-pc-solaris2"$UNAME_REL" + ;; + 64) + echo x86_64-pc-solaris2"$UNAME_REL" + ;; + esac exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in @@ -371,25 +415,25 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`" exit ;; sun3*:SunOS:*:*) - echo m68k-sun-sunos${UNAME_RELEASE} + echo m68k-sun-sunos"$UNAME_RELEASE" exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` - test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) - echo m68k-sun-sunos${UNAME_RELEASE} + echo m68k-sun-sunos"$UNAME_RELEASE" ;; sun4) - echo sparc-sun-sunos${UNAME_RELEASE} + echo sparc-sun-sunos"$UNAME_RELEASE" ;; esac exit ;; aushp:SunOS:*:*) - echo sparc-auspex-sunos${UNAME_RELEASE} + echo sparc-auspex-sunos"$UNAME_RELEASE" exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not @@ -400,44 +444,44 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint${UNAME_RELEASE} + echo m68k-atari-mint"$UNAME_RELEASE" exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint${UNAME_RELEASE} + echo m68k-milan-mint"$UNAME_RELEASE" exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint${UNAME_RELEASE} + echo m68k-hades-mint"$UNAME_RELEASE" exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint${UNAME_RELEASE} + echo m68k-unknown-mint"$UNAME_RELEASE" exit ;; m68k:machten:*:*) - echo m68k-apple-machten${UNAME_RELEASE} + echo m68k-apple-machten"$UNAME_RELEASE" exit ;; powerpc:machten:*:*) - echo powerpc-apple-machten${UNAME_RELEASE} + echo powerpc-apple-machten"$UNAME_RELEASE" exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) - echo mips-dec-ultrix${UNAME_RELEASE} + echo mips-dec-ultrix"$UNAME_RELEASE" exit ;; VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix${UNAME_RELEASE} + echo vax-dec-ultrix"$UNAME_RELEASE" exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix${UNAME_RELEASE} + echo clipper-intergraph-clix"$UNAME_RELEASE" exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { @@ -446,23 +490,23 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) - printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) - printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) - printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF - $CC_FOR_BUILD -o $dummy $dummy.c && - dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && - SYSTEM_NAME=`$dummy $dummyarg` && + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && + dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`"$dummy" "$dummyarg"` && { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos${UNAME_RELEASE} + echo mips-mips-riscos"$UNAME_RELEASE" exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax @@ -488,17 +532,17 @@ EOF AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ] then - if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ - [ ${TARGET_BINARY_INTERFACE}x = x ] + if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \ + [ "$TARGET_BINARY_INTERFACE"x = x ] then - echo m88k-dg-dgux${UNAME_RELEASE} + echo m88k-dg-dgux"$UNAME_RELEASE" else - echo m88k-dg-dguxbcs${UNAME_RELEASE} + echo m88k-dg-dguxbcs"$UNAME_RELEASE" fi else - echo i586-dg-dgux${UNAME_RELEASE} + echo i586-dg-dgux"$UNAME_RELEASE" fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) @@ -515,7 +559,7 @@ EOF echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) - echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`" exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id @@ -527,14 +571,14 @@ EOF if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi - echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" #include main() @@ -545,7 +589,7 @@ EOF exit(0); } EOF - if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` then echo "$SYSTEM_NAME" else @@ -559,26 +603,27 @@ EOF exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` - if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi - if [ -x /usr/bin/oslevel ] ; then - IBM_REV=`/usr/bin/oslevel` + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else - IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi - echo ${IBM_ARCH}-ibm-aix${IBM_REV} + echo "$IBM_ARCH"-ibm-aix"$IBM_REV" exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; - ibmrt:4.4BSD:*|romp-ibm:BSD:*) + ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx @@ -593,28 +638,28 @@ EOF echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - case "${UNAME_MACHINE}" in - 9000/31? ) HP_ARCH=m68000 ;; - 9000/[34]?? ) HP_ARCH=m68k ;; + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + case "$UNAME_MACHINE" in + 9000/31?) HP_ARCH=m68000 ;; + 9000/[34]??) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "${sc_cpu_version}" in - 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 - 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + case "$sc_cpu_version" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 - case "${sc_kernel_bits}" in - 32) HP_ARCH="hppa2.0n" ;; - 64) HP_ARCH="hppa2.0w" ;; - '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + case "$sc_kernel_bits" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi - if [ "${HP_ARCH}" = "" ]; then - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + if [ "$HP_ARCH" = "" ]; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" #define _HPUX_SOURCE #include @@ -647,13 +692,13 @@ EOF exit (0); } EOF - (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=`"$dummy"` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac - if [ ${HP_ARCH} = "hppa2.0w" ] + if [ "$HP_ARCH" = hppa2.0w ] then - eval $set_cc_for_build + set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler @@ -664,23 +709,23 @@ EOF # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 - if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then - HP_ARCH="hppa2.0w" + HP_ARCH=hppa2.0w else - HP_ARCH="hppa64" + HP_ARCH=hppa64 fi fi - echo ${HP_ARCH}-hp-hpux${HPUX_REV} + echo "$HP_ARCH"-hp-hpux"$HPUX_REV" exit ;; ia64:HP-UX:*:*) - HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux${HPUX_REV} + HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux"$HPUX_REV" exit ;; 3050*:HI-UX:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" #include int main () @@ -705,11 +750,11 @@ EOF exit (0); } EOF - $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; - 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) @@ -718,7 +763,7 @@ EOF *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; - hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) @@ -726,9 +771,9 @@ EOF exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then - echo ${UNAME_MACHINE}-unknown-osf1mk + echo "$UNAME_MACHINE"-unknown-osf1mk else - echo ${UNAME_MACHINE}-unknown-osf1 + echo "$UNAME_MACHINE"-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) @@ -753,127 +798,120 @@ EOF echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) - echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) - echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) - echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) - FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) - FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` - FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" exit ;; sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi${UNAME_RELEASE} + echo sparc-unknown-bsdi"$UNAME_RELEASE" exit ;; *:BSD/OS:*:*) - echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" + exit ;; + arm*:FreeBSD:*:*) + UNAME_PROCESSOR=`uname -p` + set_cc_for_build + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabi + else + echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabihf + fi exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` - case ${UNAME_PROCESSOR} in + case "$UNAME_PROCESSOR" in amd64) - echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; - *) - echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; esac + echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; i*:CYGWIN*:*) - echo ${UNAME_MACHINE}-pc-cygwin + echo "$UNAME_MACHINE"-pc-cygwin exit ;; *:MINGW64*:*) - echo ${UNAME_MACHINE}-pc-mingw64 + echo "$UNAME_MACHINE"-pc-mingw64 exit ;; *:MINGW*:*) - echo ${UNAME_MACHINE}-pc-mingw32 + echo "$UNAME_MACHINE"-pc-mingw32 exit ;; - i*:MSYS*:*) - echo ${UNAME_MACHINE}-pc-msys - exit ;; - i*:windows32*:*) - # uname -m includes "-pc" on this system. - echo ${UNAME_MACHINE}-mingw32 + *:MSYS*:*) + echo "$UNAME_MACHINE"-pc-msys exit ;; i*:PW*:*) - echo ${UNAME_MACHINE}-pc-pw32 + echo "$UNAME_MACHINE"-pc-pw32 exit ;; *:Interix*:*) - case ${UNAME_MACHINE} in + case "$UNAME_MACHINE" in x86) - echo i586-pc-interix${UNAME_RELEASE} + echo i586-pc-interix"$UNAME_RELEASE" exit ;; authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix${UNAME_RELEASE} + echo x86_64-unknown-interix"$UNAME_RELEASE" exit ;; IA64) - echo ia64-unknown-interix${UNAME_RELEASE} + echo ia64-unknown-interix"$UNAME_RELEASE" exit ;; esac ;; - [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) - echo i${UNAME_MACHINE}-pc-mks - exit ;; - 8664:Windows_NT:*) - echo x86_64-pc-mks - exit ;; - i*:Windows_NT*:* | Pentium*:Windows_NT*:*) - # How do we know it's Interix rather than the generic POSIX subsystem? - # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we - # UNAME_MACHINE based on the output of uname instead of i386? - echo i586-pc-interix - exit ;; i*:UWIN*:*) - echo ${UNAME_MACHINE}-pc-uwin + echo "$UNAME_MACHINE"-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; - p*:CYGWIN*:*) - echo powerpcle-unknown-cygwin - exit ;; prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" exit ;; *:GNU:*:*) # the GNU system - echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`" exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland - echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC" exit ;; - i*86:Minix:*:*) - echo ${UNAME_MACHINE}-pc-minix + *:Minix:*:*) + echo "$UNAME_MACHINE"-unknown-minix exit ;; aarch64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in @@ -886,63 +924,64 @@ EOF EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 - if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi - echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arm*:Linux:*:*) - eval $set_cc_for_build + set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then - echo ${UNAME_MACHINE}-unknown-linux-gnueabi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi else - echo ${UNAME_MACHINE}-unknown-linux-gnueabihf + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf fi fi exit ;; avr32*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; cris:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-gnu + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; crisv32:Linux:*:*) - echo ${UNAME_MACHINE}-axis-linux-gnu + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + e2k:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; frv:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; hexagon:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:Linux:*:*) - LIBC=gnu - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c - #ifdef __dietlibc__ - LIBC=dietlibc - #endif -EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` - echo "${UNAME_MACHINE}-pc-linux-${LIBC}" + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; ia64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + k1om:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m32r*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m68*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; mips:Linux:*:* | mips64:Linux:*:*) - eval $set_cc_for_build - sed 's/^ //' << EOF >$dummy.c + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el @@ -956,55 +995,70 @@ EOF #endif #endif EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` - test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } + eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU'`" + test "x$CPU" != x && { echo "$CPU-unknown-linux-$LIBC"; exit; } ;; - or32:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + mips64el:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-"$LIBC" + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; padre:Linux:*:*) - echo sparc-unknown-linux-gnu + echo sparc-unknown-linux-"$LIBC" exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-gnu + echo hppa64-unknown-linux-"$LIBC" exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-gnu ;; - PA8*) echo hppa2.0-unknown-linux-gnu ;; - *) echo hppa-unknown-linux-gnu ;; + PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; + PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; + *) echo hppa-unknown-linux-"$LIBC" ;; esac exit ;; ppc64:Linux:*:*) - echo powerpc64-unknown-linux-gnu + echo powerpc64-unknown-linux-"$LIBC" exit ;; ppc:Linux:*:*) - echo powerpc-unknown-linux-gnu + echo powerpc-unknown-linux-"$LIBC" + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-"$LIBC" + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-"$LIBC" + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; s390:Linux:*:* | s390x:Linux:*:*) - echo ${UNAME_MACHINE}-ibm-linux + echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" exit ;; sh64*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sh*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; tile*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; vax:Linux:*:*) - echo ${UNAME_MACHINE}-dec-linux-gnu + echo "$UNAME_MACHINE"-dec-linux-"$LIBC" exit ;; x86_64:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; xtensa*:Linux:*:*) - echo ${UNAME_MACHINE}-unknown-linux-gnu + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. @@ -1018,34 +1072,34 @@ EOF # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. - echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. - echo ${UNAME_MACHINE}-pc-os2-emx + echo "$UNAME_MACHINE"-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) - echo ${UNAME_MACHINE}-unknown-stop + echo "$UNAME_MACHINE"-unknown-stop exit ;; i*86:atheos:*:*) - echo ${UNAME_MACHINE}-unknown-atheos + echo "$UNAME_MACHINE"-unknown-atheos exit ;; i*86:syllable:*:*) - echo ${UNAME_MACHINE}-pc-syllable + echo "$UNAME_MACHINE"-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos${UNAME_RELEASE} + echo i386-unknown-lynxos"$UNAME_RELEASE" exit ;; i*86:*DOS:*:*) - echo ${UNAME_MACHINE}-pc-msdosdjgpp + echo "$UNAME_MACHINE"-pc-msdosdjgpp exit ;; - i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) - UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + i*86:*:4.*:*) + UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" else - echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" fi exit ;; i*86:*:5:[678]*) @@ -1055,12 +1109,12 @@ EOF *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac - echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}{$UNAME_VERSION}" exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 @@ -1070,9 +1124,9 @@ EOF && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 - echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" else - echo ${UNAME_MACHINE}-pc-sysv32 + echo "$UNAME_MACHINE"-pc-sysv32 fi exit ;; pc:*:*:*) @@ -1080,7 +1134,7 @@ EOF # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub - # prints for the "djgpp" host, or else GDB configury will decide that + # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; @@ -1092,9 +1146,9 @@ EOF exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) @@ -1114,9 +1168,9 @@ EOF test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; @@ -1125,28 +1179,28 @@ EOF test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ - && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ - && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos${UNAME_RELEASE} + echo m68k-unknown-lynxos"$UNAME_RELEASE" exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos${UNAME_RELEASE} + echo sparc-unknown-lynxos"$UNAME_RELEASE" exit ;; rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos${UNAME_RELEASE} + echo rs6000-unknown-lynxos"$UNAME_RELEASE" exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos${UNAME_RELEASE} + echo powerpc-unknown-lynxos"$UNAME_RELEASE" exit ;; SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv${UNAME_RELEASE} + echo mips-dde-sysv"$UNAME_RELEASE" exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 @@ -1157,7 +1211,7 @@ EOF *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo ${UNAME_MACHINE}-sni-sysv4 + echo "$UNAME_MACHINE"-sni-sysv4 else echo ns32k-sni-sysv fi @@ -1177,23 +1231,23 @@ EOF exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. - echo ${UNAME_MACHINE}-stratus-vos + echo "$UNAME_MACHINE"-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) - echo m68k-apple-aux${UNAME_RELEASE} + echo m68k-apple-aux"$UNAME_RELEASE" exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then - echo mips-nec-sysv${UNAME_RELEASE} + echo mips-nec-sysv"$UNAME_RELEASE" else - echo mips-unknown-sysv${UNAME_RELEASE} + echo mips-unknown-sysv"$UNAME_RELEASE" fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. @@ -1212,65 +1266,93 @@ EOF echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) - echo sx4-nec-superux${UNAME_RELEASE} + echo sx4-nec-superux"$UNAME_RELEASE" exit ;; SX-5:SUPER-UX:*:*) - echo sx5-nec-superux${UNAME_RELEASE} + echo sx5-nec-superux"$UNAME_RELEASE" exit ;; SX-6:SUPER-UX:*:*) - echo sx6-nec-superux${UNAME_RELEASE} + echo sx6-nec-superux"$UNAME_RELEASE" exit ;; SX-7:SUPER-UX:*:*) - echo sx7-nec-superux${UNAME_RELEASE} + echo sx7-nec-superux"$UNAME_RELEASE" exit ;; SX-8:SUPER-UX:*:*) - echo sx8-nec-superux${UNAME_RELEASE} + echo sx8-nec-superux"$UNAME_RELEASE" exit ;; SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux${UNAME_RELEASE} + echo sx8r-nec-superux"$UNAME_RELEASE" + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux"$UNAME_RELEASE" exit ;; Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody${UNAME_RELEASE} + echo powerpc-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Rhapsody:*:*) - echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown - case $UNAME_PROCESSOR in - i386) - eval $set_cc_for_build - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - UNAME_PROCESSOR="x86_64" - fi - fi ;; - unknown) UNAME_PROCESSOR=powerpc ;; - esac - echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test "`echo "$UNAME_RELEASE" | sed -e 's/\..*//'`" -le 10 ; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc + if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_PPC >/dev/null + then + UNAME_PROCESSOR=powerpc + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` - if test "$UNAME_PROCESSOR" = "x86"; then + if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi - echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; - NEO-?:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk${UNAME_RELEASE} + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk"$UNAME_RELEASE" exit ;; NSE-*:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk${UNAME_RELEASE} + echo nse-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk"$UNAME_RELEASE" exit ;; - NSR-?:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk${UNAME_RELEASE} + NSV-*:NONSTOP_KERNEL:*:*) + echo nsv-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk"$UNAME_RELEASE" exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux @@ -1279,18 +1361,19 @@ EOF echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) - echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. - if test "$cputype" = "386"; then + # shellcheck disable=SC2154 + if test "$cputype" = 386; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi - echo ${UNAME_MACHINE}-unknown-plan9 + echo "$UNAME_MACHINE"-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 @@ -1311,14 +1394,14 @@ EOF echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) - echo mips-sei-seiux${UNAME_RELEASE} + echo mips-sei-seiux"$UNAME_RELEASE" exit ;; *:DragonFly:*:*) - echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "${UNAME_MACHINE}" in + case "$UNAME_MACHINE" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; @@ -1327,182 +1410,48 @@ EOF echo i386-pc-xenix exit ;; i*86:skyos:*:*) - echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`" exit ;; i*86:rdos:*:*) - echo ${UNAME_MACHINE}-pc-rdos + echo "$UNAME_MACHINE"-pc-rdos exit ;; i*86:AROS:*:*) - echo ${UNAME_MACHINE}-pc-aros + echo "$UNAME_MACHINE"-pc-aros exit ;; x86_64:VMkernel:*:*) - echo ${UNAME_MACHINE}-unknown-esx + echo "$UNAME_MACHINE"-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs exit ;; esac -eval $set_cc_for_build -cat >$dummy.c < -# include -#endif -main () -{ -#if defined (sony) -#if defined (MIPSEB) - /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, - I don't know.... */ - printf ("mips-sony-bsd\n"); exit (0); -#else -#include - printf ("m68k-sony-newsos%s\n", -#ifdef NEWSOS4 - "4" -#else - "" -#endif - ); exit (0); -#endif -#endif - -#if defined (__arm) && defined (__acorn) && defined (__unix) - printf ("arm-acorn-riscix\n"); exit (0); -#endif +echo "$0: unable to guess system type" >&2 -#if defined (hp300) && !defined (hpux) - printf ("m68k-hp-bsd\n"); exit (0); -#endif +case "$UNAME_MACHINE:$UNAME_SYSTEM" in + mips:Linux | mips64:Linux) + # If we got here on MIPS GNU/Linux, output extra information. + cat >&2 </dev/null`; - if (version < 4) - printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); - else - printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); - exit (0); -#endif - -#if defined (MULTIMAX) || defined (n16) -#if defined (UMAXV) - printf ("ns32k-encore-sysv\n"); exit (0); -#else -#if defined (CMU) - printf ("ns32k-encore-mach\n"); exit (0); -#else - printf ("ns32k-encore-bsd\n"); exit (0); -#endif -#endif -#endif - -#if defined (__386BSD__) - printf ("i386-pc-bsd\n"); exit (0); -#endif - -#if defined (sequent) -#if defined (i386) - printf ("i386-sequent-dynix\n"); exit (0); -#endif -#if defined (ns32000) - printf ("ns32k-sequent-dynix\n"); exit (0); -#endif -#endif - -#if defined (_SEQUENT_) - struct utsname un; - - uname(&un); - - if (strncmp(un.version, "V2", 2) == 0) { - printf ("i386-sequent-ptx2\n"); exit (0); - } - if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ - printf ("i386-sequent-ptx1\n"); exit (0); - } - printf ("i386-sequent-ptx\n"); exit (0); - -#endif - -#if defined (vax) -# if !defined (ultrix) -# include -# if defined (BSD) -# if BSD == 43 - printf ("vax-dec-bsd4.3\n"); exit (0); -# else -# if BSD == 199006 - printf ("vax-dec-bsd4.3reno\n"); exit (0); -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# endif -# else - printf ("vax-dec-bsd\n"); exit (0); -# endif -# else - printf ("vax-dec-ultrix\n"); exit (0); -# endif -#endif - -#if defined (alliant) && defined (i860) - printf ("i860-alliant-bsd\n"); exit (0); -#endif - - exit (1); -} +NOTE: MIPS GNU/Linux systems require a C compiler to fully recognize +the system type. Please install a C compiler and try again. EOF - -$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && - { echo "$SYSTEM_NAME"; exit; } - -# Apollos put the system type in the environment. - -test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } - -# Convex versions that predate uname can use getsysinfo(1) - -if [ -x /usr/convex/getsysinfo ] -then - case `getsysinfo -f cpu_type` in - c1*) - echo c1-convex-bsd - exit ;; - c2*) - if getsysinfo -f scalar_acc - then echo c32-convex-bsd - else echo c2-convex-bsd - fi - exit ;; - c34*) - echo c34-convex-bsd - exit ;; - c38*) - echo c38-convex-bsd - exit ;; - c4*) - echo c4-convex-bsd - exit ;; - esac -fi + ;; +esac cat >&2 < in order to provide the needed -information to handle your system. +If $0 has already been updated, send the following data and any +information you think might be pertinent to config-patches@gnu.org to +provide the necessary information to handle your system. config.guess timestamp = $timestamp @@ -1521,16 +1470,16 @@ hostinfo = `(hostinfo) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` -UNAME_MACHINE = ${UNAME_MACHINE} -UNAME_RELEASE = ${UNAME_RELEASE} -UNAME_SYSTEM = ${UNAME_SYSTEM} -UNAME_VERSION = ${UNAME_VERSION} +UNAME_MACHINE = "$UNAME_MACHINE" +UNAME_RELEASE = "$UNAME_RELEASE" +UNAME_SYSTEM = "$UNAME_SYSTEM" +UNAME_VERSION = "$UNAME_VERSION" EOF exit 1 # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" diff --git a/config/config.sub b/config/config.sub index bdda9e4a32c..c19e671805a 100755 --- a/config/config.sub +++ b/config/config.sub @@ -1,36 +1,31 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, -# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -# 2011, 2012 Free Software Foundation, Inc. +# Copyright 1992-2018 Free Software Foundation, Inc. -timestamp='2012-08-18' +timestamp='2018-08-13' -# This file is (in principle) common to ALL GNU software. -# The presence of a machine in this file suggests that SOME GNU software -# can handle that machine. It does not imply ALL GNU software can. -# -# This file is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, see . +# along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). -# Please send patches to . Submit a context -# diff and a properly formatted GNU ChangeLog entry. +# Please send patches to . # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. @@ -38,7 +33,7 @@ timestamp='2012-08-18' # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD +# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases @@ -58,12 +53,11 @@ timestamp='2012-08-18' me=`echo "$0" | sed -e 's,.*/,,'` usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS - $0 [OPTION] ALIAS +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. -Operation modes: +Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit @@ -73,9 +67,7 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, -2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 -Free Software Foundation, Inc. +Copyright 1992-2018 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -102,7 +94,7 @@ while test $# -gt 0 ; do *local*) # First pass through any local machine types. - echo $1 + echo "$1" exit ;; * ) @@ -118,162 +110,596 @@ case $# in exit 1;; esac -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | \ - kopensolaris*-gnu* | \ - storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - android-linux) - os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac +# Split fields of configuration type +IFS="-" read -r field1 field2 field3 field4 <&2 + exit 1 ;; - -wrs) - os=-vxworks - basic_machine=$1 + *-*-*-*) + basic_machine=$field1-$field2 + os=$field3-$field4 ;; - -chorusos*) - os=-chorusos - basic_machine=$1 + *-*-*) + # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two + # parts + maybe_os=$field2-$field3 + case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc \ + | linux-newlib* | linux-musl* | linux-uclibc* | uclinux-uclibc* \ + | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ + | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ + | storm-chaos* | os2-emx* | rtmk-nova*) + basic_machine=$field1 + os=$maybe_os + ;; + android-linux) + basic_machine=$field1-unknown + os=linux-android + ;; + *) + basic_machine=$field1-$field2 + os=$field3 + ;; + esac ;; - -chorusrdb) - os=-chorusrdb - basic_machine=$1 + *-*) + # Second component is usually, but not always the OS + case $field2 in + # Prevent following clause from handling this valid os + sun*os*) + basic_machine=$field1 + os=$field2 + ;; + # Manufacturers + dec* | mips* | sequent* | encore* | pc532* | sgi* | sony* \ + | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ + | unicom* | ibm* | next | hp | isi* | apollo | altos* \ + | convergent* | ncr* | news | 32* | 3600* | 3100* | hitachi* \ + | c[123]* | convex* | sun | crds | omron* | dg | ultra | tti* \ + | harris | dolphin | highlevel | gould | cbm | ns | masscomp \ + | apple | axis | knuth | cray | microblaze* \ + | sim | cisco | oki | wec | wrs | winbond) + basic_machine=$field1-$field2 + os= + ;; + *) + basic_machine=$field1 + os=$field2 + ;; + esac ;; - -hiux*) - os=-hiuxwe2 + *) + # Convert single-component short-hands not valid as part of + # multi-component configurations. + case $field1 in + 386bsd) + basic_machine=i386-pc + os=bsd + ;; + a29khif) + basic_machine=a29k-amd + os=udi + ;; + adobe68k) + basic_machine=m68010-adobe + os=scout + ;; + alliant) + basic_machine=fx80-alliant + os= + ;; + altos | altos3068) + basic_machine=m68k-altos + os= + ;; + am29k) + basic_machine=a29k-none + os=bsd + ;; + amdahl) + basic_machine=580-amdahl + os=sysv + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=bsd + ;; + aros) + basic_machine=i386-pc + os=aros + ;; + aux) + basic_machine=m68k-apple + os=aux + ;; + balance) + basic_machine=ns32k-sequent + os=dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=linux + ;; + cegcc) + basic_machine=arm-unknown + os=cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=bsd + ;; + convex-c2) + basic_machine=c2-convex + os=bsd + ;; + convex-c32) + basic_machine=c32-convex + os=bsd + ;; + convex-c34) + basic_machine=c34-convex + os=bsd + ;; + convex-c38) + basic_machine=c38-convex + os=bsd + ;; + cray) + basic_machine=j90-cray + os=unicos + ;; + crds | unos) + basic_machine=m68k-crds + os= + ;; + delta88) + basic_machine=m88k-motorola + os=sysv3 + ;; + dicos) + basic_machine=i686-pc + os=dicos + ;; + djgpp) + basic_machine=i586-pc + os=msdosdjgpp + ;; + ebmon29k) + basic_machine=a29k-amd + os=ebmon + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=ose + ;; + gmicro) + basic_machine=tron-gmicro + os=sysv + ;; + go32) + basic_machine=i386-pc + os=go32 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=hms + ;; + harris) + basic_machine=m88k-harris + os=sysv3 + ;; + hp300bsd) + basic_machine=m68k-hp + os=bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=hpux + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=proelf + ;; + i386mach) + basic_machine=i386-mach + os=mach + ;; + vsta) + basic_machine=i386-pc + os=vsta + ;; + isi68 | isi) + basic_machine=m68k-isi + os=sysv + ;; + m68knommu) + basic_machine=m68k-unknown + os=linux + ;; + magnum | m3230) + basic_machine=mips-mips + os=sysv + ;; + merlin) + basic_machine=ns32k-utek + os=sysv + ;; + mingw64) + basic_machine=x86_64-pc + os=mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=mingw32ce + ;; + monitor) + basic_machine=m68k-rom68k + os=coff + ;; + morphos) + basic_machine=powerpc-unknown + os=morphos + ;; + moxiebox) + basic_machine=moxie-unknown + os=moxiebox + ;; + msdos) + basic_machine=i386-pc + os=msdos + ;; + msys) + basic_machine=i686-pc + os=msys + ;; + mvs) + basic_machine=i370-ibm + os=mvs + ;; + nacl) + basic_machine=le32-unknown + os=nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=sysv4 + ;; + netbsd386) + basic_machine=i386-pc + os=netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=newsos + ;; + news1000) + basic_machine=m68030-sony + os=newsos + ;; + necv70) + basic_machine=v70-nec + os=sysv + ;; + nh3000) + basic_machine=m68k-harris + os=cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=cxux + ;; + nindy960) + basic_machine=i960-intel + os=nindy + ;; + mon960) + basic_machine=i960-intel + os=mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=nonstopux + ;; + os400) + basic_machine=powerpc-ibm + os=os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=ose + ;; + os68k) + basic_machine=m68k-none + os=os68k + ;; + paragon) + basic_machine=i860-intel + os=osf + ;; + parisc) + basic_machine=hppa-unknown + os=linux + ;; + pw32) + basic_machine=i586-unknown + os=pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=rdos + ;; + rdos32) + basic_machine=i386-pc + os=rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=coff + ;; + sa29200) + basic_machine=a29k-amd + os=udi + ;; + sei) + basic_machine=mips-sei + os=seiux + ;; + sps7) + basic_machine=m68k-bull + os=sysv2 + ;; + st2000) + basic_machine=m68k-tandem + os= + ;; + stratus) + basic_machine=i860-stratus + os=sysv4 + ;; + sun2) + basic_machine=m68000-sun + os= + ;; + sun2os3) + basic_machine=m68000-sun + os=sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=sunos4 + ;; + sun3) + basic_machine=m68k-sun + os= + ;; + sun3os3) + basic_machine=m68k-sun + os=sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=sunos4 + ;; + sun4) + basic_machine=sparc-sun + os= + ;; + sun4os3) + basic_machine=sparc-sun + os=sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=solaris2 + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + os= + ;; + sv1) + basic_machine=sv1-cray + os=unicos + ;; + symmetry) + basic_machine=i386-sequent + os=dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=unicos + ;; + t90) + basic_machine=t90-cray + os=unicos + ;; + toad1) + basic_machine=pdp10-xkl + os=tops20 + ;; + tpf) + basic_machine=s390x-ibm + os=tpf + ;; + udi29k) + basic_machine=a29k-amd + os=udi + ;; + ultra3) + basic_machine=a29k-nyu + os=sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=none + ;; + vaxv) + basic_machine=vax-dec + os=sysv + ;; + vms) + basic_machine=vax-dec + os=vms + ;; + vxworks960) + basic_machine=i960-wrs + os=vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=vxworks + ;; + xbox) + basic_machine=i686-pc + os=mingw32 + ;; + ymp) + basic_machine=ymp-cray + os=unicos + ;; + *) + basic_machine=$1 + os= + ;; + esac ;; - -sco6) - os=-sco5v6 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Here we handle the default manufacturer of certain CPU types. It is in + # some cases the only manufacturer, in others, it is the most popular. + craynv) + basic_machine=craynv-cray + os=${os:-unicosmp} ;; - -sco5) - os=-sco3.2v5 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + fx80) + basic_machine=fx80-alliant ;; - -sco4) - os=-sco3.2v4 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + w89k) + basic_machine=hppa1.1-winbond ;; - -sco3.2.[4-9]*) - os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + op50n) + basic_machine=hppa1.1-oki ;; - -sco3.2v[4-9]*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + op60c) + basic_machine=hppa1.1-oki ;; - -sco5v6*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + romp) + basic_machine=romp-ibm ;; - -sco*) - os=-sco3.2v2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + mmix) + basic_machine=mmix-knuth ;; - -udk*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + rs6000) + basic_machine=rs6000-ibm ;; - -isc) - os=-isc2.2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + vax) + basic_machine=vax-dec ;; - -clix*) - basic_machine=clipper-intergraph + pdp11) + basic_machine=pdp11-dec ;; - -isc*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + we32k) + basic_machine=we32k-att ;; - -lynx*178) - os=-lynxos178 + cydra) + basic_machine=cydra-cydrome ;; - -lynx*5) - os=-lynxos5 + i370-ibm* | ibm*) + basic_machine=i370-ibm ;; - -lynx*) - os=-lynxos + orion) + basic_machine=orion-highlevel ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + orion105) + basic_machine=clipper-highlevel ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` + mac | mpw | mac-mpw) + basic_machine=m68k-apple ;; - -psos*) - os=-psos + pmac | pmac-mpw) + basic_machine=powerpc-apple ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint + xps | xps100) + basic_machine=xps100-honeywell ;; -esac -# Decode aliases for certain CPU-COMPANY combinations. -case $basic_machine in # Recognize the basic CPU types without company name. # Some are omitted here because they have special meanings below. 1750a | 580 \ | a29k \ | aarch64 | aarch64_be \ + | abacus \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ - | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ - | be32 | be64 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv6m | armv[78][arm] \ + | avr | avr32 \ + | asmjs \ + | ba \ + | be32 | be64 \ | bfin \ - | c4x | clipper \ + | c4x | c8051 | clipper | csky \ | d10v | d30v | dlx | dsp16xx \ - | epiphany \ - | fido | fr30 | frv \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ - | i370 | i860 | i960 | ia64 \ + | i370 | i860 | i960 | ia16 | ia64 \ | ip2k | iq2000 \ + | k1om \ | le32 | le64 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | mcore | mep | metag \ + | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ @@ -287,26 +713,31 @@ case $basic_machine in | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nds32 | nds32le | nds32be \ - | nios | nios2 \ + | nfp \ + | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ - | open8 \ - | or32 \ - | pdp10 | pdp11 | pj | pjl \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ | pyramid \ + | riscv | riscv32 | riscv64 \ | rl78 | rx \ | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh[23]ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ @@ -314,8 +745,9 @@ case $basic_machine in | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ | ubicom32 \ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | we32k \ - | x86 | xc16x | xstormy16 | xtensa \ + | visium \ + | wasm32 \ + | x86 | xc16x | xstormy16 | xgate | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown ;; @@ -328,23 +760,23 @@ case $basic_machine in c6x) basic_machine=tic6x-unknown ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip) - basic_machine=$basic_machine-unknown - os=-none + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65) + ;; + m9s12z | m68hcs12z | hcs12z | s12z) + basic_machine=s12z-unknown ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + m9s12z-* | m68hcs12z-* | hcs12z-* | s12z-*) + basic_machine=s12z-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ms1) basic_machine=mt-unknown ;; - strongarm | thumb | xscale) basic_machine=arm-unknown ;; - xgate) - basic_machine=$basic_machine-unknown - os=-none - ;; xscaleeb) basic_machine=armeb-unknown ;; @@ -359,37 +791,40 @@ case $basic_machine in i*86 | x86_64) basic_machine=$basic_machine-pc ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; # Recognize the basic CPU types with company name. - 580-* \ + 1750a-* | 580-* \ | a29k-* \ | aarch64-* | aarch64_be-* \ + | abacus-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ - | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | alphapca5[67]-* | alpha64pca5[67]-* \ + | am33_2.0-* \ + | arc-* | arceb-* \ + | arm-* | arm[lb]e-* | arme[lb]-* | armv*-* \ | avr-* | avr32-* \ + | asmjs-* \ + | ba-* \ | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | clipper-* | craynv-* | cydra-* \ - | d10v-* | d30v-* | dlx-* \ - | elxsi-* \ - | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | c8051-* | clipper-* | craynv-* | csky-* | cydra-* \ + | d10v-* | d30v-* | dlx-* | dsp16xx-* \ + | e2k-* | elxsi-* | epiphany-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | ft32-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | hexagon-* \ - | i*86-* | i860-* | i960-* | ia64-* \ + | i370-* | i*86-* | i860-* | i960-* | ia16-* | ia64-* \ | ip2k-* | iq2000-* \ + | k1om-* \ | le32-* | le64-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ - | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ + | m5200-* | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* | v70-* | w65-* \ + | m6811-* | m68hc11-* | m6812-* | m68hc12-* | m68hcs12x-* | nvptx-* | picochip-* \ + | m88110-* | m88k-* | maxq-* | mb-* | mcore-* | mep-* | metag-* \ + | microblaze-* | microblazeel-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ @@ -403,37 +838,50 @@ case $basic_machine in | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ + | mn10200-* | mn10300-* \ + | moxie-* \ | mt-* \ | msp430-* \ | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* \ + | nfp-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ + | or1k*-* \ + | or32-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ | pyramid-* \ + | riscv-* | riscv32-* | riscv64-* \ | rl78-* | romp-* | rs6000-* | rx-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ - | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | score-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]ae[lb]-* | sh[23]e-* | she[lb]-* | sh[lb]e-* \ + | sh[1234]e[lb]-* | sh[12345][lb]e-* | sh[23]ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ + | spu-* \ | tahoe-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tile*-* \ | tron-* \ | ubicom32-* \ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ | vax-* \ + | visium-* \ + | wasm32-* \ | we32k-* \ - | x86-* | x86_64-* | xc16x-* | xps100-* \ + | x86-* | x86_64-* | xc16x-* | xgate-* | xps100-* \ | xstormy16-* | xtensa*-* \ | ymp-* \ | z8k-* | z80-*) @@ -444,141 +892,45 @@ case $basic_machine in ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) basic_machine=m68000-att ;; 3b*) basic_machine=we32k-att ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; - abacus) - basic_machine=abacus-unknown - ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; - alliant | fx80) - basic_machine=fx80-alliant - ;; - altos | altos3068) - basic_machine=m68k-altos - ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; amd64) basic_machine=x86_64-pc ;; amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv + basic_machine=x86_64-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; amiga | amiga-*) basic_machine=m68k-unknown ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aros) - basic_machine=i386-pc - os=-aros - ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=-linux - ;; blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux + basic_machine=bfin-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=linux ;; bluegene*) basic_machine=powerpc-ibm - os=-cnk + os=cnk ;; c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=tic54x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=tic55x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=tic6x-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; c90) basic_machine=c90-cray - os=-unicos - ;; - cegcc) - basic_machine=arm-unknown - os=-cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=-bsd - ;; - convex-c2) - basic_machine=c2-convex - os=-bsd - ;; - convex-c32) - basic_machine=c32-convex - os=-bsd - ;; - convex-c34) - basic_machine=c34-convex - os=-bsd - ;; - convex-c38) - basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp + os=${os:-unicos} ;; cr16 | cr16-*) basic_machine=cr16-unknown - os=-elf - ;; - crds | unos) - basic_machine=m68k-crds + os=${os:-elf} ;; crisv32 | crisv32-* | etraxfs*) basic_machine=crisv32-axis @@ -588,7 +940,7 @@ case $basic_machine in ;; crx) basic_machine=crx-unknown - os=-elf + os=${os:-elf} ;; da30 | da30-*) basic_machine=m68k-da30 @@ -598,50 +950,38 @@ case $basic_machine in ;; decsystem10* | dec10*) basic_machine=pdp10-dec - os=-tops10 + os=tops10 ;; decsystem20* | dec20*) basic_machine=pdp10-dec - os=-tops20 + os=tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) basic_machine=m68k-motorola ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - dicos) - basic_machine=i686-pc - os=-dicos - ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; dpx20 | dpx20-*) basic_machine=rs6000-bull - os=-bosx + os=${os:-bosx} ;; - dpx2* | dpx2*-bull) + dpx2*) basic_machine=m68k-bull - os=-sysv3 + os=sysv3 ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd + e500v[12]-*) + basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=$os"spe" ;; encore | umax | mmax) basic_machine=ns32k-encore ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose + elxsi) + basic_machine=elxsi-elxsi + os=${os:-bsd} ;; fx2800) basic_machine=i860-alliant @@ -649,45 +989,13 @@ case $basic_machine in genix) basic_machine=ns32k-ns ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 - ;; h3050r* | hiux*) basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 + os=hiuxwe2 ;; hp300-*) basic_machine=m68k-hp ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux - ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) basic_machine=hppa1.0-hp ;; @@ -717,193 +1025,79 @@ case $basic_machine in hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm - ;; i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=sysv32 ;; i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=sysv4 ;; i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=sysv ;; i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach + basic_machine=`echo "$1" | sed -e 's/86.*/86-pc/'` + os=solaris2 ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta + j90 | j90-cray) + basic_machine=j90-cray + os=${os:-unicos} ;; iris | iris4d) basic_machine=mips-sgi case $os in - -irix*) + irix*) ;; *) - os=-irix4 + os=irix4 ;; esac ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; - m68knommu) - basic_machine=m68k-unknown - os=-linux + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo "$basic_machine" | sed 's/-.*//'` ;; m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv + basic_machine=m68k-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=linux ;; - merlin) - basic_machine=ns32k-utek - os=-sysv - ;; - microblaze) + microblaze*) basic_machine=microblaze-xilinx ;; - mingw64) - basic_machine=x86_64-pc - os=-mingw64 - ;; - mingw32) - basic_machine=i386-pc - os=-mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=-mingw32ce - ;; miniframe) basic_machine=m68000-convergent ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) basic_machine=m68k-atari - os=-mint + os=mint ;; mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'` ;; mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - msdos) - basic_machine=i386-pc - os=-msdos + basic_machine=`echo "$basic_machine" | sed -e 's/mips3/mips64/'`-unknown ;; ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` - ;; - msys) - basic_machine=i386-pc - os=-msys - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - nacl) - basic_machine=le32-unknown - os=-nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos + basic_machine=`echo "$basic_machine" | sed -e 's/ms1-/mt-/'` ;; news-3600 | risc-news) basic_machine=mips-sony - os=-newsos - ;; - necv70) - basic_machine=v70-nec - os=-sysv + os=newsos ;; - next | m*-next ) + next | m*-next) basic_machine=m68k-next case $os in - -nextstep* ) + nextstep* ) ;; - -ns2*) - os=-nextstep2 + ns2*) + os=nextstep2 ;; *) - os=-nextstep3 + os=nextstep3 ;; esac ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; np1) basic_machine=np1-gould ;; @@ -916,40 +1110,26 @@ case $basic_machine in nsr-tandem) basic_machine=nsr-tandem ;; + nsv-tandem) + basic_machine=nsv-tandem + ;; + nsx-tandem) + basic_machine=nsx-tandem + ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki - os=-proelf + os=proelf ;; openrisc | openrisc-*) basic_machine=or32-unknown ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k - ;; pa-hitachi) basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - parisc) - basic_machine=hppa-unknown - os=-linux + os=hiuxwe2 ;; parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux + basic_machine=hppa-`echo "$basic_machine" | sed 's/^[^-]*-//'` + os=linux ;; pbd) basic_machine=sparc-tti @@ -964,7 +1144,7 @@ case $basic_machine in basic_machine=i386-pc ;; pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i386-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc @@ -979,16 +1159,16 @@ case $basic_machine in basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i586-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i686-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=i786-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould @@ -998,39 +1178,27 @@ case $basic_machine in ppc | ppcbe) basic_machine=powerpc-unknown ;; ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpc-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; - ppcle | powerpclittle | ppc-le | powerpc-little) + ppcle | powerpclittle) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpcle-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ppc64-*) basic_machine=powerpc64-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; - ppc64le | powerpc64little | ppc64-le | powerpc64-little) + ppc64le | powerpc64little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + basic_machine=powerpc64le-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rdos) - basic_machine=i386-pc - os=-rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff - ;; rm[46]00) basic_machine=mips-siemens ;; @@ -1043,10 +1211,6 @@ case $basic_machine in s390x | s390x-*) basic_machine=s390x-ibm ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; sb1) basic_machine=mipsisa64sb1-unknown ;; @@ -1055,105 +1219,32 @@ case $basic_machine in ;; sde) basic_machine=mipsisa32-sde - os=-elf - ;; - sei) - basic_machine=mips-sei - os=-seiux + os=${os:-elf} ;; sequent) basic_machine=i386-sequent ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; sh5el) basic_machine=sh5le-unknown ;; - sh64) - basic_machine=sh64-unknown + sh5el-*) + basic_machine=sh5le-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; - sparclite-wrs | simso-wrs) + simso-wrs) basic_machine=sparclite-wrs - os=-vxworks - ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 + os=vxworks ;; spur) basic_machine=spur-unknown ;; - st2000) - basic_machine=m68k-tandem - ;; - stratus) - basic_machine=i860-stratus - os=-sysv4 - ;; strongarm-* | thumb-*) - basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - sun2) - basic_machine=m68000-sun - ;; - sun2os3) - basic_machine=m68000-sun - os=-sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=-sunos4 + basic_machine=arm-`echo "$basic_machine" | sed 's/^[^-]*-//'` ;; - sun3os3) - basic_machine=m68k-sun - os=-sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=-sunos4 - ;; - sun4os3) - basic_machine=sparc-sun - os=-sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=-sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=-solaris2 - ;; - sun3 | sun3-*) - basic_machine=m68k-sun - ;; - sun4) - basic_machine=sparc-sun - ;; - sun386 | sun386i | roadrunner) - basic_machine=i386-sun - ;; - sv1) - basic_machine=sv1-cray - os=-unicos - ;; - symmetry) - basic_machine=i386-sequent - os=-dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=-unicos - ;; - t90) - basic_machine=t90-cray - os=-unicos + tile*-*) ;; tile*) basic_machine=$basic_machine-unknown - os=-linux-gnu + os=${os:-linux-gnu} ;; tx39) basic_machine=mipstx39-unknown @@ -1161,146 +1252,32 @@ case $basic_machine in tx39el) basic_machine=mipstx39el-unknown ;; - toad1) - basic_machine=pdp10-xkl - os=-tops20 - ;; tower | tower-32) basic_machine=m68k-ncr ;; - tpf) - basic_machine=s390x-ibm - os=-tpf - ;; - udi29k) - basic_machine=a29k-amd - os=-udi - ;; - ultra3) - basic_machine=a29k-nyu - os=-sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=-none - ;; - vaxv) - basic_machine=vax-dec - os=-sysv - ;; - vms) - basic_machine=vax-dec - os=-vms - ;; vpp*|vx|vx-*) basic_machine=f301-fujitsu ;; - vxworks960) - basic_machine=i960-wrs - os=-vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=-vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=-vxworks - ;; w65*) basic_machine=w65-wdc - os=-none + os=none ;; w89k-*) basic_machine=hppa1.1-winbond - os=-proelf - ;; - xbox) - basic_machine=i686-pc - os=-mingw32 + os=proelf ;; - xps | xps100) - basic_machine=xps100-honeywell + x64) + basic_machine=x86_64-pc ;; xscale-* | xscalee[bl]-*) - basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` - ;; - ymp) - basic_machine=ymp-cray - os=-unicos - ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - z80-*-coff) - basic_machine=z80-unknown - os=-sim + basic_machine=`echo "$basic_machine" | sed 's/^xscale/arm/'` ;; none) basic_machine=none-none - os=-none ;; -# Here we handle the default manufacturer of certain CPU types. It is in -# some cases the only manufacturer, in others, it is the most popular. - w89k) - basic_machine=hppa1.1-winbond - ;; - op50n) - basic_machine=hppa1.1-oki - ;; - op60c) - basic_machine=hppa1.1-oki - ;; - romp) - basic_machine=romp-ibm - ;; - mmix) - basic_machine=mmix-knuth - ;; - rs6000) - basic_machine=rs6000-ibm - ;; - vax) - basic_machine=vax-dec - ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; - pdp11) - basic_machine=pdp11-dec - ;; - we32k) - basic_machine=we32k-att - ;; - sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) - basic_machine=sh-unknown - ;; - sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) - basic_machine=sparc-sun - ;; - cydra) - basic_machine=cydra-cydrome - ;; - orion) - basic_machine=orion-highlevel - ;; - orion105) - basic_machine=clipper-highlevel - ;; - mac | mpw | mac-mpw) - basic_machine=m68k-apple - ;; - pmac | pmac-mpw) - basic_machine=powerpc-apple - ;; - *-unknown) - # Make sure to match an already-canonicalized machine name. - ;; *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + echo Invalid configuration \`"$1"\': machine \`"$basic_machine"\' not recognized 1>&2 exit 1 ;; esac @@ -1308,10 +1285,10 @@ esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + basic_machine=`echo "$basic_machine" | sed 's/digital.*/dec/'` ;; *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + basic_machine=`echo "$basic_machine" | sed 's/commodore.*/cbm/'` ;; *) ;; @@ -1319,200 +1296,246 @@ esac # Decode manufacturer-specific aliases for certain operating systems. -if [ x"$os" != x"" ] +if [ x$os != x ] then case $os in - # First match some system type aliases - # that might get confused with valid system types. - # -solaris* is a basic system type, with this one exception. - -auroraux) - os=-auroraux + # First match some system type aliases that might get confused + # with valid system types. + # solaris* is a basic system type, with this one exception. + auroraux) + os=auroraux ;; - -solaris1 | -solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` + bluegene*) + os=cnk ;; - -solaris) - os=-solaris2 + solaris1 | solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; - -svr4*) - os=-sysv4 + solaris) + os=solaris2 ;; - -unixware*) - os=-sysv4.2uw + unixware*) + os=sysv4.2uw ;; - -gnu/linux*) + gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; - # First accept the basic system types. + # es1800 is here to avoid being matched by es* (a different OS) + es1800*) + os=ose + ;; + # Some version numbers need modification + chorusos*) + os=chorusos + ;; + isc) + os=isc2.2 + ;; + sco6) + os=sco5v6 + ;; + sco5) + os=sco3.2v5 + ;; + sco4) + os=sco3.2v4 + ;; + sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + ;; + sco3.2v[4-9]* | sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + ;; + scout) + # Don't match below + ;; + sco*) + os=sco3.2v2 + ;; + psos*) + os=psos + ;; + # Now accept the basic system types. # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. - # -sysv* is not here because it comes later, after sysvr4. - -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ - | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ - | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* \ - | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* \ - | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ - | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -bitrig* | -openbsd* | -solidbsd* \ - | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ - | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ - | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ - | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* \ - | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-musl* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ - | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ - | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ - | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) + # Each alternative MUST end in a * to match a version number. + # sysv* is not here because it comes later, after sysvr4. + gnu* | bsd* | mach* | minix* | genix* | ultrix* | irix* \ + | *vms* | esix* | aix* | cnk* | sunos | sunos[34]*\ + | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \ + | sym* | kopensolaris* | plan9* \ + | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \ + | aos* | aros* | cloudabi* | sortix* \ + | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \ + | clix* | riscos* | uniplus* | iris* | isc* | rtu* | xenix* \ + | knetbsd* | mirbsd* | netbsd* \ + | bitrig* | openbsd* | solidbsd* | libertybsd* \ + | ekkobsd* | kfreebsd* | freebsd* | riscix* | lynxos* \ + | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \ + | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \ + | udi* | eabi* | lites* | ieee* | go32* | aux* | hcos* \ + | chorusrdb* | cegcc* | glidix* \ + | cygwin* | msys* | pe* | moss* | proelf* | rtems* \ + | midipix* | mingw32* | mingw64* | linux-gnu* | linux-android* \ + | linux-newlib* | linux-musl* | linux-uclibc* \ + | uxpv* | beos* | mpeix* | udk* | moxiebox* \ + | interix* | uwin* | mks* | rhapsody* | darwin* \ + | openstep* | oskit* | conix* | pw32* | nonstopux* \ + | storm-chaos* | tops10* | tenex* | tops20* | its* \ + | os2* | vos* | palmos* | uclinux* | nucleus* \ + | morphos* | superux* | rtmk* | windiss* \ + | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \ + | skyos* | haiku* | rdos* | toppers* | drops* | es* \ + | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \ + | midnightbsd*) # Remember, each alternative MUST END IN *, to match a version number. ;; - -qnx*) + qnx*) case $basic_machine in x86-* | i*86-*) ;; *) - os=-nto$os + os=nto-$os ;; esac ;; - -nto-qnx*) + hiux*) + os=hiuxwe2 ;; - -nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` + nto-qnx*) ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ - | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; - -mac*) - os=`echo $os | sed -e 's|mac|macos|'` + sim | xray | os68k* | v88r* \ + | windows* | osx | abug | netware* | os9* \ + | macos* | mpw* | magic* | mmixware* | mon960* | lnews*) ;; - -linux-dietlibc) - os=-linux-dietlibc + linux-dietlibc) + os=linux-dietlibc ;; - -linux*) + linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; - -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` + lynx*178) + os=lynxos178 ;; - -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` + lynx*5) + os=lynxos5 ;; - -opened*) - os=-openedition + lynx*) + os=lynxos ;; - -os400*) - os=-os400 + mac*) + os=`echo "$os" | sed -e 's|mac|macos|'` ;; - -wince*) - os=-wince + opened*) + os=openedition ;; - -osfrose*) - os=-osfrose + os400*) + os=os400 ;; - -osf*) - os=-osf + sunos5*) + os=`echo "$os" | sed -e 's|sunos5|solaris2|'` ;; - -utek*) - os=-bsd + sunos6*) + os=`echo "$os" | sed -e 's|sunos6|solaris3|'` ;; - -dynix*) - os=-bsd + wince*) + os=wince ;; - -acis*) - os=-aos + utek*) + os=bsd ;; - -atheos*) - os=-atheos + dynix*) + os=bsd ;; - -syllable*) - os=-syllable + acis*) + os=aos ;; - -386bsd) - os=-bsd + atheos*) + os=atheos ;; - -ctix* | -uts*) - os=-sysv + syllable*) + os=syllable ;; - -nova*) - os=-rtmk-nova + 386bsd) + os=bsd + ;; + ctix* | uts*) + os=sysv + ;; + nova*) + os=rtmk-nova ;; - -ns2 ) - os=-nextstep2 + ns2) + os=nextstep2 ;; - -nsk*) - os=-nsk + nsk*) + os=nsk ;; # Preserve the version number of sinix5. - -sinix5.*) + sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; - -sinix*) - os=-sysv4 + sinix*) + os=sysv4 ;; - -tpf*) - os=-tpf + tpf*) + os=tpf ;; - -triton*) - os=-sysv3 + triton*) + os=sysv3 ;; - -oss*) - os=-sysv3 + oss*) + os=sysv3 ;; - -svr4) - os=-sysv4 + svr4*) + os=sysv4 ;; - -svr3) - os=-sysv3 + svr3) + os=sysv3 ;; - -sysvr4) - os=-sysv4 + sysvr4) + os=sysv4 ;; - # This must come after -sysvr4. - -sysv*) + # This must come after sysvr4. + sysv*) ;; - -ose*) - os=-ose + ose*) + os=ose ;; - -es1800*) - os=-ose + *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) + os=mint ;; - -xenix) - os=-xenix + zvmoe) + os=zvmoe ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - os=-mint + dicos*) + os=dicos ;; - -aros*) - os=-aros - ;; - -kaos*) - os=-kaos + pikeos*) + # Until real need of OS specific support for + # particular features comes up, bare metal + # configurations are quite functional. + case $basic_machine in + arm*) + os=eabi + ;; + *) + os=elf + ;; + esac ;; - -zvmoe) - os=-zvmoe + nacl*) ;; - -dicos*) - os=-dicos + ios) ;; - -nacl*) + none) ;; - -none) + *-eabi) ;; *) - # Get rid of the `-' at the beginning of $os. - os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + echo Invalid configuration \`"$1"\': system \`"$os"\' not recognized 1>&2 exit 1 ;; esac @@ -1530,173 +1553,179 @@ else case $basic_machine in score-*) - os=-elf + os=elf ;; spu-*) - os=-elf + os=elf ;; *-acorn) - os=-riscix1.2 + os=riscix1.2 ;; arm*-rebel) - os=-linux + os=linux ;; arm*-semi) - os=-aout + os=aout ;; c4x-* | tic4x-*) - os=-coff + os=coff + ;; + c8051-*) + os=elf + ;; + clipper-intergraph) + os=clix ;; hexagon-*) - os=-elf + os=elf ;; tic54x-*) - os=-coff + os=coff ;; tic55x-*) - os=-coff + os=coff ;; tic6x-*) - os=-coff + os=coff ;; # This must come before the *-dec entry. pdp10-*) - os=-tops20 + os=tops20 ;; pdp11-*) - os=-none + os=none ;; *-dec | vax-*) - os=-ultrix4.2 + os=ultrix4.2 ;; m68*-apollo) - os=-domain + os=domain ;; i386-sun) - os=-sunos4.0.2 + os=sunos4.0.2 ;; m68000-sun) - os=-sunos3 + os=sunos3 ;; m68*-cisco) - os=-aout + os=aout ;; mep-*) - os=-elf + os=elf ;; mips*-cisco) - os=-elf + os=elf ;; mips*-*) - os=-elf + os=elf ;; or32-*) - os=-coff + os=coff ;; *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 + os=sysv3 ;; sparc-* | *-sun) - os=-sunos4.1.1 + os=sunos4.1.1 ;; - *-be) - os=-beos + pru-*) + os=elf ;; - *-haiku) - os=-haiku + *-be) + os=beos ;; *-ibm) - os=-aix + os=aix ;; *-knuth) - os=-mmixware + os=mmixware ;; *-wec) - os=-proelf + os=proelf ;; *-winbond) - os=-proelf + os=proelf ;; *-oki) - os=-proelf + os=proelf ;; *-hp) - os=-hpux + os=hpux ;; *-hitachi) - os=-hiux + os=hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv + os=sysv ;; *-cbm) - os=-amigaos + os=amigaos ;; *-dg) - os=-dgux + os=dgux ;; *-dolphin) - os=-sysv3 + os=sysv3 ;; m68k-ccur) - os=-rtu + os=rtu ;; m88k-omron*) - os=-luna + os=luna ;; - *-next ) - os=-nextstep + *-next) + os=nextstep ;; *-sequent) - os=-ptx + os=ptx ;; *-crds) - os=-unos + os=unos ;; *-ns) - os=-genix + os=genix ;; i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 + os=mvs ;; *-gould) - os=-sysv + os=sysv ;; *-highlevel) - os=-bsd + os=bsd ;; *-encore) - os=-bsd + os=bsd ;; *-sgi) - os=-irix + os=irix ;; *-siemens) - os=-sysv4 + os=sysv4 ;; *-masscomp) - os=-rtu + os=rtu ;; f30[01]-fujitsu | f700-fujitsu) - os=-uxpv + os=uxpv ;; *-rom68k) - os=-coff + os=coff ;; *-*bug) - os=-coff + os=coff ;; *-apple) - os=-macos + os=macos ;; *-atari*) - os=-mint + os=mint + ;; + *-wrs) + os=vxworks ;; *) - os=-none + os=none ;; esac fi @@ -1707,79 +1736,82 @@ vendor=unknown case $basic_machine in *-unknown) case $os in - -riscix*) + riscix*) vendor=acorn ;; - -sunos*) + sunos*) vendor=sun ;; - -cnk*|-aix*) + cnk*|-aix*) vendor=ibm ;; - -beos*) + beos*) vendor=be ;; - -hpux*) + hpux*) vendor=hp ;; - -mpeix*) + mpeix*) vendor=hp ;; - -hiux*) + hiux*) vendor=hitachi ;; - -unos*) + unos*) vendor=crds ;; - -dgux*) + dgux*) vendor=dg ;; - -luna*) + luna*) vendor=omron ;; - -genix*) + genix*) vendor=ns ;; - -mvs* | -opened*) + clix*) + vendor=intergraph + ;; + mvs* | opened*) vendor=ibm ;; - -os400*) + os400*) vendor=ibm ;; - -ptx*) + ptx*) vendor=sequent ;; - -tpf*) + tpf*) vendor=ibm ;; - -vxsim* | -vxworks* | -windiss*) + vxsim* | vxworks* | windiss*) vendor=wrs ;; - -aux*) + aux*) vendor=apple ;; - -hms*) + hms*) vendor=hitachi ;; - -mpw* | -macos*) + mpw* | macos*) vendor=apple ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) vendor=atari ;; - -vos*) + vos*) vendor=stratus ;; esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + basic_machine=`echo "$basic_machine" | sed "s/unknown/$vendor/"` ;; esac -echo $basic_machine$os +echo "$basic_machine-$os" exit # Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) +# eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" diff --git a/configure.ac b/configure.ac index c41a83c9764..410b20972f2 100644 --- a/configure.ac +++ b/configure.ac @@ -64,6 +64,7 @@ AC_PROG_CXX AC_PROG_CPP AX_CXX_COMPILE_STDCXX_14 +AC_CHECK_TOOL([AR], [ar]) # Use 64-bit file system calls so that we can support files > 2 GiB. AC_SYS_LARGEFILE @@ -127,8 +128,6 @@ NEED_PROG(bzip2, bzip2) NEED_PROG(gzip, gzip) NEED_PROG(xz, xz) AC_PATH_PROG(dot, dot) -AC_PATH_PROG(pv, pv, pv) -AC_PATH_PROGS(brotli, brotli bro, bro) AC_PATH_PROG(lsof, lsof, lsof) @@ -152,9 +151,9 @@ PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"] # Look for libbz2, a required dependency. AC_CHECK_LIB([bz2], [BZ2_bzWriteOpen], [true], - [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See http://www.bzip.org/.])]) + [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://web.archive.org/web/20180624184756/http://www.bzip.org/.])]) AC_CHECK_HEADERS([bzlib.h], [true], - [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See http://www.bzip.org/.])]) + [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://web.archive.org/web/20180624184756/http://www.bzip.org/.])]) # Look for SQLite, a required dependency. @@ -164,6 +163,8 @@ PKG_CHECK_MODULES([SQLITE3], [sqlite3 >= 3.6.19], [CXXFLAGS="$SQLITE3_CFLAGS $CX # Look for libcurl, a required dependency. PKG_CHECK_MODULES([LIBCURL], [libcurl], [CXXFLAGS="$LIBCURL_CFLAGS $CXXFLAGS"]) +# Look for editline, a required dependency. +PKG_CHECK_MODULES([EDITLINE], [libeditline], [CXXFLAGS="$EDITLINE_CFLAGS $CXXFLAGS"]) # Look for libsodium, an optional dependency. PKG_CHECK_MODULES([SODIUM], [libsodium], @@ -179,12 +180,9 @@ AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt], [AC_DEFINE([HAVE_LZMA_MT], [1], [xz multithreaded compression support])]) -# Look for libbrotli{enc,dec}, optional dependencies -PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], - [AC_DEFINE([HAVE_BROTLI], [1], [Whether to use libbrotli.]) - CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"] - have_brotli=1], [have_brotli=]) -AC_SUBST(HAVE_BROTLI, [$have_brotli]) +# Look for libbrotli{enc,dec}. +PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"]) + # Look for libseccomp, required for Linux sandboxing. if test "$sys_name" = linux; then @@ -232,12 +230,6 @@ if test "$gc" = yes; then fi -AC_ARG_ENABLE(init-state, AC_HELP_STRING([--disable-init-state], - [do not initialise DB etc. in `make install']), - init_state=$enableval, init_state=yes) -#AM_CONDITIONAL(INIT_STATE, test "$init_state" = "yes") - - # documentation generation switch AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen], [disable documentation generation]), @@ -260,11 +252,6 @@ if test "$(uname)" = "Darwin"; then fi -# Figure out the extension of dynamic libraries. -eval dynlib_suffix=$shrext_cmds -AC_SUBST(dynlib_suffix) - - # Do we have GNU tar? AC_MSG_CHECKING([if you have a recent GNU tar]) if $tar --version 2> /dev/null | grep -q GNU && tar cvf /dev/null --warning=no-timestamp ./config.log > /dev/null; then @@ -281,6 +268,15 @@ AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH], sandbox_shell=$withval) AC_SUBST(sandbox_shell) +AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared], + [Build shared libraries for Nix [default=yes]]), + shared=$enableval, shared=yes) +if test "$shared" = yes; then + AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.]) +else + AC_SUBST(BUILD_SHARED_LIBS, 0, [Whether to build shared libraries.]) +fi + # Expand all variables in config.status. test "$prefix" = NONE && prefix=$ac_default_prefix diff --git a/corepkgs/fetchurl.nix b/corepkgs/fetchurl.nix index 0ce1bab112f..a84777f5744 100644 --- a/corepkgs/fetchurl.nix +++ b/corepkgs/fetchurl.nix @@ -1,10 +1,14 @@ { system ? "" # obsolete , url +, hash ? "" # an SRI ash + +# Legacy hash specification , md5 ? "", sha1 ? "", sha256 ? "", sha512 ? "" , outputHash ? - if sha512 != "" then sha512 else if sha1 != "" then sha1 else if md5 != "" then md5 else sha256 + if hash != "" then hash else if sha512 != "" then sha512 else if sha1 != "" then sha1 else if md5 != "" then md5 else sha256 , outputHashAlgo ? - if sha512 != "" then "sha512" else if sha1 != "" then "sha1" else if md5 != "" then "md5" else "sha256" + if hash != "" then "" else if sha512 != "" then "sha512" else if sha1 != "" then "sha1" else if md5 != "" then "md5" else "sha256" + , executable ? false , unpack ? false , name ? baseNameOf (toString url) diff --git a/doc/manual/advanced-topics/advanced-topics.xml b/doc/manual/advanced-topics/advanced-topics.xml index 338aa6f3a23..b710f9f2b51 100644 --- a/doc/manual/advanced-topics/advanced-topics.xml +++ b/doc/manual/advanced-topics/advanced-topics.xml @@ -1,6 +1,7 @@ Advanced Topics diff --git a/doc/manual/advanced-topics/distributed-builds.xml b/doc/manual/advanced-topics/distributed-builds.xml index 20fd6a0cfb0..bbb573e3540 100644 --- a/doc/manual/advanced-topics/distributed-builds.xml +++ b/doc/manual/advanced-topics/distributed-builds.xml @@ -81,6 +81,7 @@ or a newline, e.g. Each machine specification consists of the following elements, separated by spaces. Only the first element is required. +To leave a field at its default, set it to -. diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml index 1865bb37c86..f0da1f612fe 100644 --- a/doc/manual/command-ref/conf-file.xml +++ b/doc/manual/command-ref/conf-file.xml @@ -135,7 +135,6 @@ false. - builders @@ -159,7 +158,6 @@ false. - build-users-group This options specifies the Unix group containing @@ -210,7 +208,6 @@ false. - connect-timeout @@ -243,7 +240,6 @@ false. - extra-sandbox-paths @@ -283,7 +279,6 @@ false. - fallback If set to true, Nix will fall @@ -293,7 +288,6 @@ false. - fsync-metadata If set to true, changes to the @@ -304,7 +298,6 @@ false. - hashed-mirrors A list of web servers used by @@ -367,10 +360,8 @@ builtins.fetchurl { options a store path was built), so by default this option is on. Turn it off to save a bit of disk space (or a lot if keep-outputs is also turned on). - - keep-env-derivations If false (default), derivations @@ -394,7 +385,6 @@ builtins.fetchurl { - keep-outputs If true, the garbage collector @@ -408,10 +398,8 @@ builtins.fetchurl { only at build time (e.g., the C compiler, or source tarballs downloaded from the network). To prevent it from doing so, set this option to true. - - max-build-log-size @@ -437,14 +425,15 @@ builtins.fetchurl { This option defines the maximum number of jobs that Nix will try to build in parallel. The default is 1. The special value auto - causes Nix to use the number of CPUs in your system. It can be + causes Nix to use the number of CPUs in your system. 0 + is useful when using remote builders to prevent any local builds (except for + preferLocalBuild derivation attribute which executes locally + regardless). It can be overridden using the () command line switch. - - max-silent-time @@ -524,7 +513,12 @@ password my-password For the exact syntax, see the - curl documentation. + curl documentation. + + This must be an absolute path, and ~ + is not resolved. For example, ~/.netrc won't + resolve to your home directory's .netrc. + @@ -595,7 +589,6 @@ password my-password - repeat How many times to repeat builds to check whether @@ -607,7 +600,6 @@ password my-password - require-sigs If set to true (the default), @@ -665,13 +657,13 @@ password my-password __noChroot attribute set to true do not run in sandboxes. - The default is false. + The default is true on Linux and + false on all other platforms. - sandbox-dev-shm-size This option determines the maximum size of the @@ -737,7 +729,6 @@ password my-password - substituters A list of URLs of substituters, separated by @@ -746,7 +737,6 @@ password my-password - system This option specifies the canonical Nix system @@ -768,6 +758,33 @@ password my-password + system-features + + A set of system “features” supported by this + machine, e.g. kvm. Derivations can express a + dependency on such features through the derivation attribute + requiredSystemFeatures. For example, the + attribute + + +requiredSystemFeatures = [ "kvm" ]; + + + ensures that the derivation can only be built on a machine with + the kvm feature. + + This setting by default includes kvm if + /dev/kvm is accessible, and the + pseudo-features nixos-test, + benchmark and big-parallel + that are used in Nixpkgs to route builds to specific + machines. + + + + + + timeout @@ -787,7 +804,6 @@ password my-password - trusted-public-keys A whitespace-separated list of public keys. When @@ -798,7 +814,6 @@ password my-password - trusted-substituters A list of URLs of substituters, separated by @@ -811,7 +826,6 @@ password my-password - trusted-users @@ -837,8 +851,177 @@ password my-password + + + + Deprecated Settings + + + + + + + binary-caches + + Deprecated: + binary-caches is now an alias to + . + + + + binary-cache-public-keys + + Deprecated: + binary-cache-public-keys is now an alias to + . + + + + build-compress-log + + Deprecated: + build-compress-log is now an alias to + . + + + + build-cores + Deprecated: + build-cores is now an alias to + . + + + + build-extra-chroot-dirs + + Deprecated: + build-extra-chroot-dirs is now an alias to + . + + + + build-extra-sandbox-paths + + Deprecated: + build-extra-sandbox-paths is now an alias to + . + + + + build-fallback + + Deprecated: + build-fallback is now an alias to + . + + + + build-max-jobs + + Deprecated: + build-max-jobs is now an alias to + . + + + + build-max-log-size + + Deprecated: + build-max-log-size is now an alias to + . + + + + build-max-silent-time + + Deprecated: + build-max-silent-time is now an alias to + . + + + + build-repeat + + Deprecated: + build-repeat is now an alias to + . + + + + build-timeout + + Deprecated: + build-timeout is now an alias to + . + + + + build-use-chroot + + Deprecated: + build-use-chroot is now an alias to + . + + + + build-use-sandbox + + Deprecated: + build-use-sandbox is now an alias to + . + + + + build-use-substitutes + + Deprecated: + build-use-substitutes is now an alias to + . + + + + gc-keep-derivations + + Deprecated: + gc-keep-derivations is now an alias to + . + + + + gc-keep-outputs + + Deprecated: + gc-keep-outputs is now an alias to + . + + + + env-keep-derivations + + Deprecated: + env-keep-derivations is now an alias to + . + + + + extra-binary-caches + + Deprecated: + extra-binary-caches is now an alias to + . + + + + trusted-binary-caches + + Deprecated: + trusted-binary-caches is now an alias to + . + + + diff --git a/doc/manual/command-ref/nix-copy-closure.xml b/doc/manual/command-ref/nix-copy-closure.xml index 800e1db6ab0..e6dcf180ad6 100644 --- a/doc/manual/command-ref/nix-copy-closure.xml +++ b/doc/manual/command-ref/nix-copy-closure.xml @@ -95,15 +95,6 @@ those paths. If this bothers you, use - - Also copy the outputs of store derivations diff --git a/doc/manual/command-ref/nix-env.xml b/doc/manual/command-ref/nix-env.xml index eac7739558b..56c466268ea 100644 --- a/doc/manual/command-ref/nix-env.xml +++ b/doc/manual/command-ref/nix-env.xml @@ -1346,11 +1346,12 @@ $ nix-env --list-generations This operation deletes the specified generations of the current profile. The generations can be a list of generation numbers, the special value old to delete all non-current -generations, or a value such as 30d to delete all +generations, a value such as 30d to delete all generations older than the specified number of days (except for the -generation that was active at that point in time). -Periodically deleting old generations is important to make garbage -collection effective. +generation that was active at that point in time), or a value such as. ++5 to only keep the specified items older than the +current generation. Periodically deleting old generations is important +to make garbage collection effective. @@ -1359,6 +1360,8 @@ collection effective. $ nix-env --delete-generations 3 4 8 +$ nix-env --delete-generations +5 + $ nix-env --delete-generations 30d $ nix-env -p other_profile --delete-generations old @@ -1458,7 +1461,7 @@ error: no generation older than the current (91) exists Environment variables - + NIX_PROFILE Location of the Nix profile. Defaults to the @@ -1472,6 +1475,6 @@ error: no generation older than the current (91) exists - + diff --git a/doc/manual/command-ref/nix-instantiate.xml b/doc/manual/command-ref/nix-instantiate.xml index 39c1282fcc3..53f06aed124 100644 --- a/doc/manual/command-ref/nix-instantiate.xml +++ b/doc/manual/command-ref/nix-instantiate.xml @@ -154,7 +154,9 @@ input. When used with , perform evaluation in read/write mode so nix language features that require it will still work (at the cost of needing to do - instantiation of every evaluated derivation). + instantiation of every evaluated derivation). If this option is + not enabled, there may be uninstantiated store paths in the final + output. diff --git a/doc/manual/command-ref/nix-shell.xml b/doc/manual/command-ref/nix-shell.xml index 62d026ac238..bb4a4e42012 100644 --- a/doc/manual/command-ref/nix-shell.xml +++ b/doc/manual/command-ref/nix-shell.xml @@ -32,6 +32,7 @@ cmd regexp + name @@ -165,6 +166,13 @@ also . + name + + When a shell is started, + keep the listed environment variables. + + + The following common options are supported: @@ -309,13 +317,28 @@ while (my $token = $p->get_tag("a")) { -Finally, the following Haskell script uses a specific branch of -Nixpkgs/NixOS (the 14.12 stable branch): +Sometimes you need to pass a simple Nix expression to customize +a package like Terraform: + + + +You must use double quotes (") when +passing a simple Nix expression in a nix-shell shebang. + + +Finally, using the merging of multiple nix-shell shebangs the +following Haskell script uses a specific branch of Nixpkgs/NixOS (the +18.03 stable branch): with , if an output path is not identical to the corresponding output from the previous build, the new output path is left in - /nix/store/name-check. + /nix/store/name.check. See also the configuration option, which repeats a derivation a number of times and prevents @@ -275,7 +275,7 @@ as a means of providing Nix store access to a restricted ssh user. Allow the connected client to request the realization of derivations. In effect, this can be used to make the host act - as a build slave. + as a remote builder. @@ -679,6 +679,18 @@ query is applied to the target of the symlink. + + + Prints the references graph of the store paths + paths in the GraphML file format. + This can be used to visualise dependency graphs. To obtain a + build-time dependency graph, apply this to a store derivation. To + obtain a runtime dependency graph, apply it to an output + path. + + + name name @@ -1270,6 +1282,7 @@ ktorrent-2.2.1/NEWS nix-store + paths @@ -1280,6 +1293,13 @@ Nix database to standard output. It can be loaded into an empty Nix store using . This is useful for making backups and when migrating to different database schemas. +By default, will dump the entire Nix +database. When one or more store paths is passed, only the subset of +the Nix database for those store paths is dumped. As with +, the user is responsible for passing all the +store paths for a closure. See for an +example. + diff --git a/doc/manual/expressions/advanced-attributes.xml b/doc/manual/expressions/advanced-attributes.xml index f3cf9837130..07b0d97d3f7 100644 --- a/doc/manual/expressions/advanced-attributes.xml +++ b/doc/manual/expressions/advanced-attributes.xml @@ -50,6 +50,40 @@ allowedRequisites = [ foobar ]; + disallowedReferences + + The optional attribute + disallowedReferences specifies a list of illegal + references (dependencies) of the output of the builder. For + example, + + +disallowedReferences = [ foo ]; + + + enforces that the output of a derivation cannot have a direct runtime + dependencies on the derivation foo. + + + + + disallowedRequisites + + This attribute is similar to + disallowedReferences, but it specifies illegal + requisites for the whole closure, so all the dependencies + recursively. For example, + + +disallowedRequisites = [ foobar ]; + + + enforces that the output of a derivation cannot have any + runtime dependency on foobar or any other derivation + depending recursively on foobar. + + + exportReferencesGraph @@ -112,7 +146,13 @@ impureEnvVars = [ "http_proxy" "https_proxy" ... ]; linkend="fixed-output-drvs">fixed-output derivations, where impurities such as these are okay since (the hash of) the output is known in advance. It is ignored for all other - derivations. + derivations. + + impureEnvVars implementation takes + environment variables from the current builder process. When a daemon is + building its environmental variables are used. Without the daemon, the + environmental variables come from the environment of the + nix-build. @@ -176,7 +216,7 @@ fetchurl { { stdenv, curl }: # The curl program is used for downloading. -{ url, md5 }: +{ url, sha256 }: stdenv.mkDerivation { name = baseNameOf (toString url); @@ -184,10 +224,10 @@ stdenv.mkDerivation { buildInputs = [ curl ]; # This is a fixed-output derivation; the output must be a regular - # file with MD5 hash md5. + # file with SHA256 hash sha256. outputHashMode = "flat"; - outputHashAlgo = "md5"; - outputHash = md5; + outputHashAlgo = "sha256"; + outputHash = sha256; inherit url; } @@ -197,8 +237,8 @@ stdenv.mkDerivation { The outputHashAlgo attribute specifies the hash algorithm used to compute the hash. It can currently be - "md5", "sha1" or - "sha256". + "sha1", "sha256" or + "sha512". The outputHashMode attribute determines how the hash is computed. It must be one of the following two @@ -211,7 +251,7 @@ stdenv.mkDerivation { The output must be a non-executable regular file. If it isn’t, the build fails. The hash is simply computed over the contents of that file (so it’s equal to what - Unix commands like md5sum or + Unix commands like sha256sum or sha1sum produce). This is the default. @@ -272,9 +312,7 @@ big = "a very long string"; preferLocalBuild If this attribute is set to - true, it has two effects. First, the - derivation will always be built, not substituted, even if a - substitute is available. Second, if true and distributed building is enabled, then, if possible, the derivaton will be built locally instead of forwarded to a remote machine. This is @@ -284,6 +322,19 @@ big = "a very long string"; + + allowSubstitutes + + If this attribute is set to + false, then Nix will always build this + derivation; it will not try to substitute its outputs. This is + useful for very trivial derivations (such as + writeText in Nixpkgs) that are cheaper to + build than to substitute from a binary cache. + + + + diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml index ac1fe7e2faf..22d998bd0ed 100644 --- a/doc/manual/expressions/builtins.xml +++ b/doc/manual/expressions/builtins.xml @@ -21,7 +21,8 @@ available as builtins.derivation. - abort s + + abort s Abort Nix expression evaluation, print error message s. @@ -29,8 +30,10 @@ available as builtins.derivation. - builtins.add - e1 e2 + + builtins.add + e1 e2 + Return the sum of the numbers e1 and @@ -39,8 +42,9 @@ available as builtins.derivation. - builtins.all - pred list + + builtins.all + pred list Return true if the function pred returns true @@ -50,8 +54,9 @@ available as builtins.derivation. - builtins.any - pred list + + builtins.any + pred list Return true if the function pred returns true @@ -61,8 +66,9 @@ available as builtins.derivation. - builtins.attrNames - set + + builtins.attrNames + set Return the names of the attributes in the set set in an alphabetically sorted list. For instance, @@ -72,8 +78,9 @@ available as builtins.derivation. - builtins.attrValues - set + + builtins.attrValues + set Return the values of the attributes in the set set in the order corresponding to the @@ -82,7 +89,8 @@ available as builtins.derivation. - baseNameOf s + + baseNameOf s Return the base name of the string s, that is, everything following @@ -92,7 +100,41 @@ available as builtins.derivation. - builtins + + builtins.bitAnd + e1 e2 + + Return the bitwise AND of the integers + e1 and + e2. + + + + + + builtins.bitOr + e1 e2 + + Return the bitwise OR of the integers + e1 and + e2. + + + + + + builtins.bitXor + e1 e2 + + Return the bitwise XOR of the integers + e1 and + e2. + + + + + + builtins The set builtins contains all the built-in functions and values. You can use @@ -109,8 +151,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - builtins.compareVersions - s1 s2 + + builtins.compareVersions + s1 s2 Compare two strings representing versions and return -1 if version @@ -126,8 +169,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - builtins.splitVersion - s + + builtins.splitVersion + s Split a string representing a version into its components, by the same version splitting logic underlying the @@ -137,16 +181,18 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - builtins.concatLists - lists + + builtins.concatLists + lists Concatenate a list of lists into a single list. - builtins.concatStringsSep - separator list + + builtins.concatStringsSep + separator list Concatenate a list of strings with a separator between each element, e.g. concatStringsSep "/" @@ -154,8 +200,8 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - builtins.currentSystem + + builtins.currentSystem The built-in value currentSystem evaluates to the Nix platform identifier for the Nix installation @@ -188,8 +234,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" --> - builtins.deepSeq - e1 e2 + + builtins.deepSeq + e1 e2 This is like seq e1 @@ -201,8 +248,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - derivation - attrs + + derivation + attrs derivation is described in . @@ -210,7 +258,8 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - dirOf s + + dirOf s Return the directory part of the string s, that is, everything before the final @@ -220,8 +269,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - builtins.div - e1 e2 + + builtins.div + e1 e2 Return the quotient of the numbers e1 and @@ -229,8 +279,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - builtins.elem - x xs + + builtins.elem + x xs Return true if a value equal to x occurs in the list @@ -240,8 +291,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - builtins.elemAt - xs n + + builtins.elemAt + xs n Return element n from the list xs. Elements are counted @@ -251,8 +303,9 @@ if builtins ? getEnv then builtins.getEnv "PATH" else "" - builtins.fetchurl - url + + builtins.fetchurl + url Download the specified URL and return the path of the downloaded file. This function is not available if - fetchTarball - url + + fetchTarball + url Download the specified URL, unpack it and return the path of the unpacked tree. The file must be a tape archive @@ -280,8 +334,17 @@ with import (fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixo stdenv.mkDerivation { … } + - Note that when obtaining the hash with nix-prefetch-url + The fetched tarball is cached for a certain amount of time + (1 hour by default) in ~/.cache/nix/tarballs/. + You can change the cache timeout either on the command line with + or + in the Nix configuration file with this option: + tarball-ttl number of seconds to cache. + + + Note that when obtaining the hash with nix-prefetch-url the option --unpack is required. @@ -307,7 +370,7 @@ stdenv.mkDerivation { … } - + builtins.fetchGit args @@ -359,6 +422,84 @@ stdenv.mkDerivation { … } + + + Fetching a private repository over SSH + builtins.fetchGit { + url = "git@github.com:my-secret/repository.git"; + ref = "master"; + rev = "adab8b916a45068c044658c4158d81878f9ed1c3"; +} + + + + Fetching a repository's specific commit on an arbitrary branch + + If the revision you're looking for is in the default branch + of the git repository you don't strictly need to specify + the branch name in the ref attribute. + + + However, if the revision you're looking for is in a future + branch for the non-default branch you will need to specify + the the ref attribute as well. + + builtins.fetchGit { + url = "https://github.com/nixos/nix.git"; + rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452"; + ref = "1.11-maintenance"; +} + + + It is nice to always specify the branch which a revision + belongs to. Without the branch being specified, the + fetcher might fail if the default branch changes. + Additionally, it can be confusing to try a commit from a + non-default branch and see the fetch fail. If the branch + is specified the fault is much more obvious. + + + + + + Fetching a repository's specific commit on the default branch + + If the revision you're looking for is in the default branch + of the git repository you may omit the + ref attribute. + + builtins.fetchGit { + url = "https://github.com/nixos/nix.git"; + rev = "841fcbd04755c7a2865c51c1e2d3b045976b7452"; +} + + + + Fetching a tag + builtins.fetchGit { + url = "https://github.com/nixos/nix.git"; + ref = "tags/1.9"; +} + Due to a bug (#2385), + only non-annotated tags can be fetched. + + + + Fetching the latest version of a remote branch + + builtins.fetchGit can behave impurely + fetch the latest version of a remote branch. + + Nix will refetch the branch in accordance to + . + This behavior is disabled in + Pure evaluation mode. + builtins.fetchGit { + url = "ssh://git@github.com/nixos/nix.git"; + ref = "master"; +} + @@ -420,14 +561,17 @@ stdenv.mkDerivation { "unknown" (for other kinds of files such as device nodes or fifos — but note that those cannot be copied to the Nix store, so if the predicate returns - true for them, the copy will fail). + true for them, the copy will fail). If you + exclude a directory, the entire corresponding subtree of + e2 will be excluded. - builtins.foldl’ + + builtins.foldl’ op nul list Reduce a list by applying a binary operator, from @@ -440,7 +584,8 @@ stdenv.mkDerivation { - builtins.functionArgs + + builtins.functionArgs f @@ -458,7 +603,8 @@ stdenv.mkDerivation { - builtins.fromJSON e + + builtins.fromJSON e Convert a JSON string to a Nix value. For example, @@ -468,14 +614,14 @@ builtins.fromJSON ''{"x": [1, 2, 3], "y": null}'' returns the value { x = [ 1 2 3 ]; y = null; - }. Floating point numbers are not - supported. + }. - builtins.genList - generator length + + builtins.genList + generator length Generate list of size length, with each element @@ -492,8 +638,9 @@ builtins.genList (x: x * x) 5 - builtins.getAttr - s set + + builtins.getAttr + s set getAttr returns the attribute named s from @@ -505,8 +652,9 @@ builtins.genList (x: x * x) 5 - builtins.getEnv - s + + builtins.getEnv + s getEnv returns the value of the environment variable s, or an empty @@ -523,8 +671,9 @@ builtins.genList (x: x * x) 5 - builtins.hasAttr - s set + + builtins.hasAttr + s set hasAttr returns true if set has an @@ -537,20 +686,22 @@ builtins.genList (x: x * x) 5 - builtins.hashString - type s + + builtins.hashString + type s Return a base-16 representation of the cryptographic hash of string s. The hash algorithm specified by type must - be one of "md5", "sha1" or - "sha256". + be one of "md5", "sha1", + "sha256" or "sha512". - builtins.head - list + + builtins.head + list Return the first element of a list; abort evaluation if the argument isn’t a list or is an empty list. You @@ -560,8 +711,9 @@ builtins.genList (x: x * x) 5 - import - path + + import + path Load, parse and return the Nix expression in the file path. If path @@ -615,8 +767,9 @@ x: x + 456 - builtins.intersectAttrs - e1 e2 + + builtins.intersectAttrs + e1 e2 Return a set consisting of the attributes in the set e2 that also exist in the set @@ -625,8 +778,9 @@ x: x + 456 - builtins.isAttrs - e + + builtins.isAttrs + e Return true if e evaluates to a set, and @@ -635,8 +789,9 @@ x: x + 456 - builtins.isList - e + + builtins.isList + e Return true if e evaluates to a list, and @@ -645,7 +800,7 @@ x: x + 456 - builtins.isFunction + builtins.isFunction e Return true if @@ -655,8 +810,9 @@ x: x + 456 - builtins.isString - e + + builtins.isString + e Return true if e evaluates to a string, and @@ -665,8 +821,9 @@ x: x + 456 - builtins.isInt - e + + builtins.isInt + e Return true if e evaluates to an int, and @@ -675,8 +832,9 @@ x: x + 456 - builtins.isFloat - e + + builtins.isFloat + e Return true if e evaluates to a float, and @@ -685,8 +843,9 @@ x: x + 456 - builtins.isBool - e + + builtins.isBool + e Return true if e evaluates to a bool, and @@ -695,8 +854,9 @@ x: x + 456 - isNull - e + + isNull + e Return true if e evaluates to null, @@ -710,8 +870,9 @@ x: x + 456 - builtins.length - e + + builtins.length + e Return the length of the list e. @@ -719,8 +880,9 @@ x: x + 456 - builtins.lessThan - e1 e2 + + builtins.lessThan + e1 e2 Return true if the number e1 is less than the number @@ -732,8 +894,9 @@ x: x + 456 - builtins.listToAttrs - e + + builtins.listToAttrs + e Construct a set from a list specifying the names and values of each attribute. Each element of the list should be @@ -759,8 +922,9 @@ builtins.listToAttrs - map - f list + + map + f list Apply the function f to each element in the list list. For @@ -775,14 +939,15 @@ map (x: "foo" + x) [ "bar" "bla" "abc" ] - builtins.match - regex str + + builtins.match + regex str - Returns a list if the extended - POSIX regular expression regex - matches str precisely, otherwise returns - null. Each item in the list is a regex group. + Returns a list if the extended + POSIX regular expression regex + matches str precisely, otherwise returns + null. Each item in the list is a regex group. builtins.match "ab" "abc" @@ -808,11 +973,12 @@ builtins.match "[[:space:]]+([[:upper:]]+)[[:space:]]+" " FOO " Evaluates to [ "foo" ]. - + - builtins.mul - e1 e2 + + builtins.mul + e1 e2 Return the product of the numbers e1 and @@ -821,8 +987,9 @@ Evaluates to [ "foo" ]. - builtins.parseDrvName - s + + builtins.parseDrvName + s Split the string s into a package name and version. The package name is everything up to @@ -835,7 +1002,7 @@ Evaluates to [ "foo" ]. - + builtins.path args @@ -905,32 +1072,20 @@ Evaluates to [ "foo" ]. - builtins.pathExists - path + + builtins.pathExists + path Return true if the path - path exists, and - false otherwise. One application of this - function is to conditionally include a Nix expression containing - user configuration: - - -let - fileName = builtins.getEnv "CONFIG_FILE"; - config = - if fileName != "" && builtins.pathExists (builtins.toPath fileName) - then import (builtins.toPath fileName) - else { someSetting = false; }; # default configuration -in config.someSetting - - (Note that CONFIG_FILE must be an absolute path for - this to work.) + path exists at evaluation time, and + false otherwise. - builtins.readDir - path + + builtins.readDir + path Return the contents of the directory path as a set mapping directory entries @@ -951,8 +1106,9 @@ in config.someSetting - builtins.readFile - path + + builtins.readFile + path Return the contents of the file path as a string. @@ -960,8 +1116,9 @@ in config.someSetting - removeAttrs - set list + + removeAttrs + set list Remove the attributes listed in list from @@ -976,8 +1133,9 @@ removeAttrs { x = 1; y = 2; z = 3; } [ "a" "x" "z" ] - builtins.replaceStrings - from to s + + builtins.replaceStrings + from to s Given string s, replace every occurrence of the strings in from @@ -993,8 +1151,9 @@ builtins.replaceStrings ["oo" "a"] ["a" "i"] "foobar" - builtins.seq - e1 e2 + + builtins.seq + e1 e2 Evaluate e1, then evaluate and return e2. This ensures @@ -1004,8 +1163,9 @@ builtins.replaceStrings ["oo" "a"] ["a" "i"] "foobar" - builtins.sort - comparator list + + builtins.sort + comparator list Return list in sorted order. It repeatedly calls the function @@ -1027,15 +1187,16 @@ builtins.sort builtins.lessThan [ 483 249 526 147 42 77 ] - builtins.split - regex str + + builtins.split + regex str - Returns a list composed of non matched strings interleaved - with the lists of the extended - POSIX regular expression regex matches - of str. Each item in the lists of matched - sequences is a regex group. + Returns a list composed of non matched strings interleaved + with the lists of the extended + POSIX regular expression regex matches + of str. Each item in the lists of matched + sequences is a regex group. builtins.split "(a)b" "abc" @@ -1061,11 +1222,12 @@ builtins.split "([[:upper:]]+)" " FOO " Evaluates to [ " " [ "FOO" ] " " ]. - + - builtins.stringLength - e + + builtins.stringLength + e Return the length of the string e. If e is @@ -1074,8 +1236,9 @@ Evaluates to [ " " [ "FOO" ] " " ]. - builtins.sub - e1 e2 + + builtins.sub + e1 e2 Return the difference between the numbers e1 and @@ -1084,9 +1247,10 @@ Evaluates to [ " " [ "FOO" ] " " ]. - builtins.substring - start len - s + + builtins.substring + start len + s Return the substring of s from character position @@ -1109,8 +1273,9 @@ builtins.substring 0 3 "nixos" - builtins.tail - list + + builtins.tail + list Return the second to last elements of a list; abort evaluation if the argument isn’t a list or is an empty @@ -1119,8 +1284,9 @@ builtins.substring 0 3 "nixos" - throw - s + + throw + s Throw an error message s. This usually aborts Nix expression @@ -1133,9 +1299,10 @@ builtins.substring 0 3 "nixos" - builtins.toFile - name s + + builtins.toFile + name + s Store the string s in a file in the Nix store and return its path. The file has suffix @@ -1204,14 +1371,15 @@ in foo This is not allowed because it would cause a cyclic dependency in the computation of the cryptographic hashes for foo and bar. - It is also not possible to reference the result of a derivation. - If you are using Nixpkgs, the writeTextFile function is able to + It is also not possible to reference the result of a derivation. + If you are using Nixpkgs, the writeTextFile function is able to do that. - builtins.toJSON e + + builtins.toJSON e Return a string containing a JSON representation of e. Strings, integers, floats, booleans, @@ -1224,20 +1392,19 @@ in foo - builtins.toPath s + + builtins.toPath s - Convert the string value - s into a path value. The string - s must represent an absolute path - (i.e., must start with /). The path need not - exist. The resulting path is canonicalised, e.g., - builtins.toPath "//foo/xyzzy/../bar/" returns - /foo/bar. + DEPRECATED. Use /. + "/path" + to convert a string into an absolute path. For relative paths, + use ./. + "/path". + - toString e + + toString e Convert the expression e to a string. @@ -1256,7 +1423,8 @@ in foo - builtins.toXML e + + builtins.toXML e Return a string containing an XML representation of e. The main application for @@ -1312,7 +1480,7 @@ stdenv.mkDerivation (rec { builder = builtins.toFile "builder.sh" " source $stdenv/setup mkdir $out - echo $servlets | xsltproc ${stylesheet} - > $out/server-conf.xml]]> $out/server-conf.xml]]> - builtins.trace - e1 e2 + + builtins.trace + e1 e2 Evaluate e1 and print its abstract syntax representation on standard error. Then return @@ -1381,8 +1550,9 @@ stdenv.mkDerivation (rec { - builtins.tryEval - e + + builtins.tryEval + e Try to evaluate e. Return a set containing the attributes success @@ -1395,8 +1565,9 @@ stdenv.mkDerivation (rec { - builtins.typeOf - e + + builtins.typeOf + e Return a string representing the type of the value e, namely "int", diff --git a/doc/manual/expressions/language-constructs.xml b/doc/manual/expressions/language-constructs.xml index 47d95f8a13e..f961ed921bc 100644 --- a/doc/manual/expressions/language-constructs.xml +++ b/doc/manual/expressions/language-constructs.xml @@ -41,7 +41,7 @@ encountered).. -Let-expressions +Let-expressions A let-expression allows you define local variables for an expression. For instance, diff --git a/doc/manual/glossary/glossary.xml b/doc/manual/glossary/glossary.xml index 4977825578f..e3162ed8d46 100644 --- a/doc/manual/glossary/glossary.xml +++ b/doc/manual/glossary/glossary.xml @@ -1,5 +1,6 @@ + xmlns:xlink="http://www.w3.org/1999/xlink" + xml:id="part-glossary"> Glossary diff --git a/doc/manual/hacking.xml b/doc/manual/hacking.xml index 183aed7adff..b671811d3a3 100644 --- a/doc/manual/hacking.xml +++ b/doc/manual/hacking.xml @@ -30,7 +30,7 @@ To build Nix itself in this shell: [nix-shell]$ configurePhase [nix-shell]$ make -To install it in $(pwd)/nix and test it: +To install it in $(pwd)/inst and test it: [nix-shell]$ make install [nix-shell]$ make installcheck diff --git a/doc/manual/installation/env-variables.xml b/doc/manual/installation/env-variables.xml index fc39cdd9dfe..d1ee0bb2e09 100644 --- a/doc/manual/installation/env-variables.xml +++ b/doc/manual/installation/env-variables.xml @@ -21,4 +21,51 @@ in your ~/.profile (or similar), like this: source prefix/etc/profile.d/nix.sh - \ No newline at end of file +
+ +<envar>NIX_SSL_CERT_FILE</envar> + +If you need to specify a custom certificate bundle to account +for an HTTPS-intercepting man in the middle proxy, you must specify +the path to the certificate bundle in the environment variable +NIX_SSL_CERT_FILE. + + +If you don't specify a NIX_SSL_CERT_FILE +manually, Nix will install and use its own certificate +bundle. + + + Set the environment variable and install Nix + +$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt +$ sh <(curl https://nixos.org/nix/install) + + + In the shell profile and rc files (for example, + /etc/bashrc, /etc/zshrc), + add the following line: + +export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt + + + + +You must not add the export and then do the install, as +the Nix installer will detect the presense of Nix configuration, and +abort. + +
+<envar>NIX_SSL_CERT_FILE</envar> with macOS and the Nix daemon + +On macOS you must specify the environment variable for the Nix +daemon service, then restart it: + + +$ sudo launchctl setenv NIX_SSL_CERT_FILE /etc/ssl/my-certificate-bundle.crt +$ sudo launchctl kickstart -k system/org.nixos.nix-daemon + +
+ +
+ diff --git a/doc/manual/installation/installing-binary.xml b/doc/manual/installation/installing-binary.xml index 7e8dfb0db3d..394d8053b94 100644 --- a/doc/manual/installation/installing-binary.xml +++ b/doc/manual/installation/installing-binary.xml @@ -6,13 +6,30 @@ Installing a Binary Distribution -If you are using Linux or macOS, the easiest way to install -Nix is to run the following command: +If you are using Linux or macOS, the easiest way to install Nix +is to run the following command: -$ bash <(curl https://nixos.org/nix/install) + $ sh <(curl https://nixos.org/nix/install) +As of Nix 2.1.0, the Nix installer will always default to creating a +single-user installation, however opting in to the multi-user +installation is highly recommended. + + +
+ Single User Installation + + + To explicitly select a single-user installation on your system: + + + sh <(curl https://nixos.org/nix/install) --no-daemon + + + + This will perform a single-user installation of Nix, meaning that /nix is owned by the invoking user. You should run this under your usual user account, not as @@ -33,58 +50,141 @@ and .profile to source the NIX_INSTALLER_NO_MODIFY_PROFILE environment variable before executing the install script to disable this behaviour. - - + There may also be references to Nix in + /etc/profile, + /etc/bashrc, and + /etc/zshrc which you may remove. + -You can also download a binary tarball that contains Nix and all -its dependencies. (This is what the install script at -https://nixos.org/nix/install does automatically.) You -should unpack it somewhere (e.g. in /tmp), and -then run the script named install inside the binary -tarball: +
- -alice$ cd /tmp -alice$ tar xfj nix-1.8-x86_64-darwin.tar.bz2 -alice$ cd nix-1.8-x86_64-darwin -alice$ ./install - +
+ Installing a pinned Nix version from a URL - + + NixOS.org hosts version-specific installation URLs for all Nix + versions since 1.11.16, at + https://nixos.org/releases/nix/nix-VERSION/install. + -You can uninstall Nix simply by running: + + These install scripts can be used the same as the main + NixOS.org installation script: - -$ rm -rf /nix + + sh <(curl https://nixos.org/nix/install) + - + + In the same directory of the install script are sha256 sums, and + gpg signature files. + +
+ +
+ Installing from a binary tarball + + You can also download a binary tarball that contains Nix and all + its dependencies. (This is what the install script at + https://nixos.org/nix/install does automatically.) You + should unpack it somewhere (e.g. in /tmp), + and then run the script named install inside + the binary tarball: + + + +alice$ cd /tmp +alice$ tar xfj nix-1.8-x86_64-darwin.tar.bz2 +alice$ cd nix-1.8-x86_64-darwin +alice$ ./install + + + + + If you need to edit the multi-user installation script to use + different group ID or a different user ID range, modify the + variables set in the file named + install-multi-user. + +
diff --git a/doc/manual/installation/prerequisites-source.xml b/doc/manual/installation/prerequisites-source.xml index 01e9688d635..e87d0de21ef 100644 --- a/doc/manual/installation/prerequisites-source.xml +++ b/doc/manual/installation/prerequisites-source.xml @@ -25,11 +25,18 @@ If your distribution does not provide it, you can get it from .
+ The libbrotlienc and + libbrotlidec libraries to provide implementation + of the Brotli compression algorithm. They are available for download + from the official repository . + The bzip2 compressor program and the libbz2 library. Thus you must have bzip2 installed, including development headers and libraries. If your distribution does not provide these, you can obtain bzip2 from . + xlink:href="https://web.archive.org/web/20180624184756/http://www.bzip.org/" + />. liblzma, which is provided by XZ Utils. If your distribution does not provide this, you can @@ -51,6 +58,10 @@ pass the flag to configure. + The boost library of version + 1.66.0 or higher. It can be obtained from the official web site + . + The xmllint and xsltproc programs to build this manual and the man-pages. These are part of the libxml2 and @@ -76,6 +87,15 @@ modify the parser or when you are building from the Git repository. + The libseccomp is used to provide + syscall filtering on Linux. This is an optional dependency and can + be disabled passing a + option to the configure script (Not recommended + unless your system doesn't support + libseccomp). To get the library, visit . + diff --git a/doc/manual/installation/supported-platforms.xml b/doc/manual/installation/supported-platforms.xml index 6858573ff40..3e74be49d1f 100644 --- a/doc/manual/installation/supported-platforms.xml +++ b/doc/manual/installation/supported-platforms.xml @@ -10,7 +10,7 @@ - Linux (i686, x86_64). + Linux (i686, x86_64, aarch64). macOS (x86_64). diff --git a/doc/manual/installation/upgrading.xml b/doc/manual/installation/upgrading.xml new file mode 100644 index 00000000000..30670d7fec9 --- /dev/null +++ b/doc/manual/installation/upgrading.xml @@ -0,0 +1,22 @@ + + + Upgrading Nix + + + Multi-user Nix users on macOS can upgrade Nix by running: + sudo -i sh -c 'nix-channel --update && + nix-env -iA nixpkgs.nix && + launchctl remove org.nixos.nix-daemon && + launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist' + + + + + Single-user installations of Nix should run this: + nix-channel --update; nix-env -iA nixpkgs.nix + + diff --git a/doc/manual/introduction/about-nix.xml b/doc/manual/introduction/about-nix.xml index e8c0a29753a..c21ed34ddc7 100644 --- a/doc/manual/introduction/about-nix.xml +++ b/doc/manual/introduction/about-nix.xml @@ -262,12 +262,6 @@ xlink:href="http://nixos.org/">NixOS homepage. xlink:href="http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html">GNU LGPLv2.1 or (at your option) any later version. -Nix uses the linenoise-ng -library, which has the following license: - - - diff --git a/doc/manual/manual.xml b/doc/manual/manual.xml index b408b681772..87d9de28ab1 100644 --- a/doc/manual/manual.xml +++ b/doc/manual/manual.xml @@ -32,6 +32,7 @@ + diff --git a/doc/manual/packages/s3-substituter.xml b/doc/manual/packages/s3-substituter.xml new file mode 100644 index 00000000000..2ec9687a0c6 --- /dev/null +++ b/doc/manual/packages/s3-substituter.xml @@ -0,0 +1,183 @@ + +
+ +Serving a Nix store via AWS S3 or S3-compatible Service + +Nix has built-in support for storing and fetching store paths +from Amazon S3 and S3 compatible services. This uses the same +binary cache mechanism that Nix usually uses to +fetch prebuilt binaries from cache.nixos.org. + +The following options can be specified as URL parameters to +the S3 URL: + + + profile + + + The name of the AWS configuration profile to use. By default + Nix will use the default profile. + + + + + region + + + The region of the S3 bucket. us–east-1 by + default. + + + + If your bucket is not in us–east-1, you + should always explicitly specify the region parameter. + + + + + endpoint + + + The URL to your S3-compatible service, for when not using + Amazon S3. Do not specify this value if you're using Amazon + S3. + + This endpoint must support HTTPS and will use + path-based addressing instead of virtual host based + addressing. + + + + scheme + + + The scheme used for S3 requests, https + (default) or http. This option allows you to + disable HTTPS for binary caches which don't support it. + + HTTPS should be used if the cache might contain + sensitive information. + + + + +In this example we will use the bucket named +example-nix-cache. + +
+ Anonymous Reads to your S3-compatible binary cache + + If your binary cache is publicly accessible and does not + require authentication, the simplest and easiest way to use Nix with + your S3 compatible binary cache is to use the HTTP URL for that + cache. + + For AWS S3 the binary cache URL for example bucket will be + exactly https://example-nix-cache.s3.amazonaws.com or + s3://example-nix-cache. For S3 compatible binary caches, + consult that cache's documentation. + + Your bucket will need the following bucket policy: + + +
+ +
+ Authenticated Reads to your S3 binary cache + + For AWS S3 the binary cache URL for example bucket will be + exactly s3://example-nix-cache. + + Nix will use the default + credential provider chain for authenticating requests to + Amazon S3. + + Nix supports authenticated reads from Amazon S3 and S3 + compatible binary caches. + + Your bucket will need a bucket policy allowing the desired + users to perform the s3:GetObject and + s3:GetBucketLocation action on all objects in the + bucket. The anonymous policy in can be updated to + have a restricted Principal to support + this. +
+ + +
+ Authenticated Writes to your S3-compatible binary cache + + Nix support fully supports writing to Amazon S3 and S3 + compatible buckets. The binary cache URL for our example bucket will + be s3://example-nix-cache. + + Nix will use the default + credential provider chain for authenticating requests to + Amazon S3. + + Your account will need the following IAM policy to + upload to the cache: + + + + + Uploading with a specific credential profile for Amazon S3 + nix copy --to 's3://example-nix-cache?profile=cache-upload&region=eu-west-2' nixpkgs.hello + + + Uploading to an S3-Compatible Binary Cache + nix copy --to 's3://example-nix-cache?profile=cache-upload&scheme=https&endpoint=minio.example.com' nixpkgs.hello + +
+
diff --git a/doc/manual/packages/sharing-packages.xml b/doc/manual/packages/sharing-packages.xml index 8465c182ee7..bb6c52b8f8c 100644 --- a/doc/manual/packages/sharing-packages.xml +++ b/doc/manual/packages/sharing-packages.xml @@ -15,5 +15,6 @@ packages between machines. + diff --git a/doc/manual/release-notes/release-notes.xml b/doc/manual/release-notes/release-notes.xml index b8392a647af..e8ff586fa43 100644 --- a/doc/manual/release-notes/release-notes.xml +++ b/doc/manual/release-notes/release-notes.xml @@ -12,6 +12,8 @@ --> + + diff --git a/doc/manual/release-notes/rl-2.1.xml b/doc/manual/release-notes/rl-2.1.xml new file mode 100644 index 00000000000..16c243fc191 --- /dev/null +++ b/doc/manual/release-notes/rl-2.1.xml @@ -0,0 +1,133 @@ +
+ +Release 2.1 (2018-09-02) + +This is primarily a bug fix release. It also reduces memory +consumption in certain situations. In addition, it has the following +new features: + + + + + The Nix installer will no longer default to the Multi-User + installation for macOS. You can still instruct the installer to + run in multi-user mode. + + + + + The Nix installer now supports performing a Multi-User + installation for Linux computers which are running systemd. You + can select a Multi-User installation by passing the + flag to the installer: sh <(curl + https://nixos.org/nix/install) --daemon. + + + The multi-user installer cannot handle systems with SELinux. + If your system has SELinux enabled, you can force the installer to run + in single-user mode. + + + + New builtin functions: + builtins.bitAnd, + builtins.bitOr, + builtins.bitXor, + builtins.fromTOML, + builtins.concatMap, + builtins.mapAttrs. + + + + + The S3 binary cache store now supports uploading NARs larger + than 5 GiB. + + + + The S3 binary cache store now supports uploading to + S3-compatible services with the endpoint + option. + + + + The flag is no longer required + to recover from disappeared NARs in binary caches. + + + + nix-daemon now respects + . + + + + nix run now respects + nix-support/propagated-user-env-packages. + + + + +This release has contributions from + +Adrien Devresse, +Aleksandr Pashkov, +Alexandre Esteves, +Amine Chikhaoui, +Andrew Dunham, +Asad Saeeduddin, +aszlig, +Ben Challenor, +Ben Gamari, +Benjamin Hipple, +Bogdan Seniuc, +Corey O'Connor, +Daiderd Jordan, +Daniel Peebles, +Daniel Poelzleithner, +Danylo Hlynskyi, +Dmitry Kalinkin, +Domen Kožar, +Doug Beardsley, +Eelco Dolstra, +Erik Arvstedt, +Félix Baylac-Jacqué, +Gleb Peregud, +Graham Christensen, +Guillaume Maudoux, +Ivan Kozik, +John Arnold, +Justin Humm, +Linus Heckemann, +Lorenzo Manacorda, +Matthew Justin Bauer, +Matthew O'Gorman, +Maximilian Bosch, +Michael Bishop, +Michael Fiano, +Michael Mercier, +Michael Raskin, +Michael Weiss, +Nicolas Dudebout, +Peter Simons, +Ryan Trinkle, +Samuel Dionne-Riel, +Sean Seefried, +Shea Levy, +Symphorien Gibol, +Tim Engler, +Tim Sears, +Tuomas Tynkkynen, +volth, +Will Dietz, +Yorick van Pelt and +zimbatm. + + +
diff --git a/doc/manual/release-notes/rl-2.2.xml b/doc/manual/release-notes/rl-2.2.xml new file mode 100644 index 00000000000..d29eb87e82c --- /dev/null +++ b/doc/manual/release-notes/rl-2.2.xml @@ -0,0 +1,143 @@ +
+ +Release 2.2 (2019-01-11) + +This is primarily a bug fix release. It also has the following +changes: + + + + + In derivations that use structured attributes (i.e. that + specify set the __structuredAttrs attribute to + true to cause all attributes to be passed to + the builder in JSON format), you can now specify closure checks + per output, e.g.: + + +outputChecks."out" = { + # The closure of 'out' must not be larger than 256 MiB. + maxClosureSize = 256 * 1024 * 1024; + + # It must not refer to C compiler or to the 'dev' output. + disallowedRequisites = [ stdenv.cc "dev" ]; +}; + +outputChecks."dev" = { + # The 'dev' output must not be larger than 128 KiB. + maxSize = 128 * 1024; +}; + + + + + + + + The derivation attribute + requiredSystemFeatures is now enforced for + local builds, and not just to route builds to remote builders. + The supported features of a machine can be specified through the + configuration setting system-features. + + By default, system-features includes + kvm if /dev/kvm + exists. For compatibility, it also includes the pseudo-features + nixos-test, benchmark and + big-parallel which are used by Nixpkgs to route + builds to particular Hydra build machines. + + + + + Sandbox builds are now enabled by default on Linux. + + + + The new command nix doctor shows + potential issues with your Nix installation. + + + + The fetchGit builtin function now uses a + caching scheme that puts different remote repositories in distinct + local repositories, rather than a single shared repository. This + may require more disk space but is faster. + + + + The dirOf builtin function now works on + relative paths. + + + + Nix now supports SRI hashes, + allowing the hash algorithm and hash to be specified in a single + string. For example, you can write: + + +import <nix/fetchurl.nix> { + url = https://nixos.org/releases/nix/nix-2.1.3/nix-2.1.3.tar.xz; + hash = "sha256-XSLa0FjVyADWWhFfkZ2iKTjFDda6mMXjoYMXLRSYQKQ="; +}; + + + instead of + + +import <nix/fetchurl.nix> { + url = https://nixos.org/releases/nix/nix-2.1.3/nix-2.1.3.tar.xz; + sha256 = "5d22dad058d5c800d65a115f919da22938c50dd6ba98c5e3a183172d149840a4"; +}; + + + + + In fixed-output derivations, the + outputHashAlgo attribute is no longer mandatory + if outputHash specifies the hash. + + nix hash-file and nix + hash-path now print hashes in SRI format by + default. They also use SHA-256 by default instead of SHA-512 + because that's what we use most of the time in Nixpkgs. + + + + Integers are now 64 bits on all platforms. + + + + The evaluator now prints profiling statistics (enabled via + the NIX_SHOW_STATS and + NIX_COUNT_CALLS environment variables) in JSON + format. + + + + The option in nix-store + --query has been removed. Instead, there now is an + option to output the dependency graph + in GraphML format. + + + + All nix-* commands are now symlinks to + nix. This saves a bit of disk space. + + + + nix repl now uses + libeditline or + libreadline. + + + + +
+ diff --git a/local.mk b/local.mk index 5d7e0fb2e42..4b380176f2e 100644 --- a/local.mk +++ b/local.mk @@ -6,7 +6,7 @@ dist-files += configure config.h.in nix.spec perl/configure clean-files += Makefile.config -GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I src/libexpr +GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I src/libexpr -I src/nix $(foreach i, config.h $(call rwildcard, src/lib*, *.hh), \ $(eval $(call install-file-in, $(i), $(includedir)/nix, 0644))) diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl index aa7633a709c..8432c95960c 100755 --- a/maintainers/upload-release.pl +++ b/maintainers/upload-release.pl @@ -76,15 +76,22 @@ sub downloadFile { write_file("$dstFile.sha256", $sha256_expected); + if (! -e "$dstFile.asc") { + system("gpg2 --detach-sign --armor $dstFile") == 0 or die "unable to sign $dstFile\n"; + } + return ($dstFile, $sha256_expected); } downloadFile("tarball", "2"); # .tar.bz2 my ($tarball, $tarballHash) = downloadFile("tarball", "3"); # .tar.xz -my ($tarball_i686_linux, $tarball_i686_linux_hash) = downloadFile("binaryTarball.i686-linux", "1"); -my ($tarball_x86_64_linux, $tarball_x86_64_linux_hash) = downloadFile("binaryTarball.x86_64-linux", "1"); -my ($tarball_aarch64_linux, $tarball_aarch64_linux_hash) = downloadFile("binaryTarball.aarch64-linux", "1"); -my ($tarball_x86_64_darwin, $tarball_x86_64_darwin_hash) = downloadFile("binaryTarball.x86_64-darwin", "1"); +downloadFile("binaryTarball.i686-linux", "1"); +downloadFile("binaryTarball.x86_64-linux", "1"); +downloadFile("binaryTarball.aarch64-linux", "1"); +downloadFile("binaryTarball.x86_64-darwin", "1"); +downloadFile("installerScript", "1"); + +exit if $version =~ /pre/; # Update Nixpkgs in a very hacky way. system("cd $nixpkgsDir && git pull") == 0 or die; @@ -144,17 +151,6 @@ sub getStorePath { write_file("$siteDir/nix-release.tt", "[%-\n" . "latestNixVersion = \"$version\"\n" . - "nix_hash_i686_linux = \"$tarball_i686_linux_hash\"\n" . - "nix_hash_x86_64_linux = \"$tarball_x86_64_linux_hash\"\n" . - "nix_hash_aarch64_linux = \"$tarball_aarch64_linux_hash\"\n" . - "nix_hash_x86_64_darwin = \"$tarball_x86_64_darwin_hash\"\n" . "-%]\n"); -system("cd $siteDir && nix-shell --run 'make nix/install nix/install.sig'") == 0 or die; - -copy("$siteDir/nix/install", "$siteDir/nix/install-$version") or die; -copy("$siteDir/nix/install.sig", "$siteDir/nix/install-$version.sig") or die; - -system("cd $siteDir && git add nix/install-$version nix/install-$version.sig") == 0 or die; - system("cd $siteDir && git commit -a -m 'Nix $version released'") == 0 or die; diff --git a/misc/docker/Dockerfile b/misc/docker/Dockerfile deleted file mode 100644 index 2f8e3dd7a67..00000000000 --- a/misc/docker/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -FROM alpine - -# Enable HTTPS support in wget. -RUN apk add --update openssl - -# Download Nix and install it into the system. -RUN wget https://nixos.org/releases/nix/nix-2.0/nix-2.0-x86_64-linux.tar.bz2 \ - && echo "6312837aee33306cdbb351b75ba1638b89d21b30f0caf0346f9a742425f197ee nix-2.0-x86_64-linux.tar.bz2" | sha256sum -c \ - && tar xjf nix-*-x86_64-linux.tar.bz2 \ - && addgroup -g 30000 -S nixbld \ - && for i in $(seq 1 30); do adduser -S -D -h /var/empty -g "Nix build user $i" -u $((30000 + i)) -G nixbld nixbld$i ; done \ - && mkdir -m 0755 /nix && USER=root sh nix-*-x86_64-linux/install \ - && ln -s /nix/var/nix/profiles/default/etc/profile.d/nix.sh /etc/profile.d/ \ - && rm -r /nix-*-x86_64-linux \ - && rm -r /var/cache/apk/* - -ONBUILD ENV \ - ENV=/etc/profile \ - PATH=/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin \ - GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt \ - NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt - -ENV \ - ENV=/etc/profile \ - PATH=/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin \ - GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt \ - NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt \ - NIX_PATH=/nix/var/nix/profiles/per-user/root/channels diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in index 549619a57d5..92ed12fa509 100644 --- a/misc/launchd/org.nixos.nix-daemon.plist.in +++ b/misc/launchd/org.nixos.nix-daemon.plist.in @@ -2,6 +2,11 @@ + EnvironmentVariables + + OBJC_DISABLE_INITIALIZE_FORK_SAFETY + YES + Label org.nixos.nix-daemon KeepAlive diff --git a/mk/libraries.mk b/mk/libraries.mk index 14c95fa91cf..3953446cba3 100644 --- a/mk/libraries.mk +++ b/mk/libraries.mk @@ -125,7 +125,7 @@ define build-library $(1)_PATH := $$(_d)/$$($(1)_NAME).a $$($(1)_PATH): $$($(1)_OBJS) | $$(_d)/ - $(trace-ar) ar crs $$@ $$? + $(trace-ar) $(AR) crs $$@ $$? $(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS) diff --git a/nix.spec.in b/nix.spec.in index d962bcc857b..477768c6a68 100644 --- a/nix.spec.in +++ b/nix.spec.in @@ -3,33 +3,47 @@ %global nixbld_user "nix-builder-" %global nixbld_group "nixbld" +# NOTE: BUILD on EL7 requires +# - Centos / RHEL7 software collection repository +# yum install centos-release-scl +# +# - Recent boost backport +# curl https://copr.fedorainfracloud.org/coprs/whosthere/boost/repo/epel-7/whosthere-boost-epel-7.repo -o /etc/yum.repos.d/whosthere-boost-epel-7.repo +# + +# Disable documentation generation +# necessary on some platforms +%bcond_without docgen + Summary: The Nix software deployment system Name: nix Version: @PACKAGE_VERSION@ Release: 2%{?dist} License: LGPLv2+ -%if 0%{?rhel} && 0%{?rhel} < 7 Group: Applications/System -%endif URL: http://nixos.org/ Source0: %{name}-%{version}.tar.bz2 -%if 0%{?el5} -BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) -%endif + Requires: curl Requires: bzip2 Requires: gzip Requires: xz -Requires: libseccomp -Requires: boost-context +BuildRequires: bison +BuildRequires: boost-devel >= 1.60 BuildRequires: bzip2-devel -BuildRequires: sqlite-devel + +# for RHEL <= 7, we need software collections for a C++14 compatible compatible compiler +%if 0%{?rhel} +BuildRequires: devtoolset-7-gcc +BuildRequires: devtoolset-7-gcc-c++ +%endif + +BuildRequires: flex BuildRequires: libcurl-devel BuildRequires: libseccomp-devel -BuildRequires: boost-devel - -# Hack to make that shitty RPM scanning hack shut up. -Provides: perl(Nix::SSH) +BuildRequires: openssl-devel +BuildRequires: sqlite-devel +BuildRequires: xz-devel %description Nix is a purely functional package manager. It allows multiple @@ -41,9 +55,6 @@ it can be used equally well under other Unix systems. %package devel Summary: Development files for %{name} -%if 0%{?rhel} && 0%{?rhel} < 7 -Group: Development/Libraries -%endif Requires: %{name}%{?_isa} = %{version}-%{release} %description devel @@ -53,9 +64,6 @@ developing applications that use %{name}. %package doc Summary: Documentation files for %{name} -%if 0%{?rhel} && 0%{?rhel} < 7 -Group: Documentation -%endif BuildArch: noarch Requires: %{name} = %{version}-%{release} @@ -67,20 +75,25 @@ The %{name}-doc package contains documentation files for %{name}. %build +%if 0%{?rhel} +source /opt/rh/devtoolset-7/enable +%endif extraFlags= # - override docdir so large documentation files are owned by the # -doc subpackage # - set localstatedir by hand to the preferred nix value %configure --localstatedir=/nix/var \ + %{!?without_docgen:--disable-doc-gen} \ --docdir=%{_defaultdocdir}/%{name}-doc-%{version} \ $extraFlags -make -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES +make V=1 %{?_smp_mflags} %install -%if 0%{?el5} -rm -rf $RPM_BUILD_ROOT +%if 0%{?rhel} +source /opt/rh/devtoolset-7/enable %endif + make DESTDIR=$RPM_BUILD_ROOT install find $RPM_BUILD_ROOT -name '*.la' -exec rm -f {} ';' @@ -130,6 +143,7 @@ systemctl start nix-daemon.socket %endif %files +%license COPYING %{_bindir}/nix* %{_libdir}/*.so %{_prefix}/libexec/* @@ -138,9 +152,11 @@ systemctl start nix-daemon.socket %{_prefix}/lib/systemd/system/nix-daemon.service %endif %{_datadir}/nix -%{_mandir}/man1/*.1* -%{_mandir}/man5/*.5* -%{_mandir}/man8/*.8* +#%if ! %{without docgen} +#%{_mandir}/man1/*.1* +#%{_mandir}/man5/*.5* +#%{_mandir}/man8/*.8* +#%endif %config(noreplace) %{_sysconfdir}/profile.d/nix.sh %config(noreplace) %{_sysconfdir}/profile.d/nix-daemon.sh /nix @@ -149,6 +165,9 @@ systemctl start nix-daemon.socket %{_includedir}/nix %{_prefix}/lib/pkgconfig/*.pc -%files doc -%docdir %{_defaultdocdir}/%{name}-doc-%{version} -%{_defaultdocdir}/%{name}-doc-%{version} + +#%if ! %{without docgen} +#%files doc +#%docdir %{_defaultdocdir}/%{name}-doc-%{version} +#%{_defaultdocdir}/%{name}-doc-%{version} +#%endif diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index bbfb2934315..ce553bb53eb 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -27,7 +27,7 @@ static ref store() static std::shared_ptr _store; if (!_store) { try { - settings.loadConfFile(); + loadConfFile(); settings.lockCPU = false; _store = openStore(); } catch (Error & e) { diff --git a/release-common.nix b/release-common.nix index 0c12bc7ceb3..4c556598526 100644 --- a/release-common.nix +++ b/release-common.nix @@ -30,7 +30,7 @@ rec { }); configureFlags = - [ "--disable-init-state" + [ "--enable-gc" ] ++ lib.optionals stdenv.isLinux [ "--with-sandbox-shell=${sh}/bin/busybox" @@ -49,7 +49,7 @@ rec { buildDeps = [ curl - bzip2 xz brotli + bzip2 xz brotli editline openssl pkgconfig sqlite boehmgc boost @@ -57,13 +57,20 @@ rec { git mercurial ] - ++ lib.optional stdenv.isLinux libseccomp + ++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) - (aws-sdk-cpp.override { - apis = ["s3"]; + ((aws-sdk-cpp.override { + apis = ["s3" "transfer"]; customMemoryManagement = false; - }); + }).overrideDerivation (args: { + /* + patches = args.patches or [] ++ [ (fetchpatch { + url = https://github.com/edolstra/aws-sdk-cpp/commit/3e07e1f1aae41b4c8b340735ff9e8c735f0c063f.patch; + sha256 = "1pij0v449p166f9l29x7ppzk8j7g9k9mp15ilh5qxp29c7fnvxy2"; + }) ]; + */ + })); perlDeps = [ perl diff --git a/release.nix b/release.nix index 37deb8e7ee3..5c0027301b3 100644 --- a/release.nix +++ b/release.nix @@ -1,5 +1,5 @@ { nix ? builtins.fetchGit ./. -, nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-18.03"; } +, nixpkgs ? builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-18.09"; } , officialRelease ? false , systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] }: @@ -67,6 +67,14 @@ let buildInputs = buildDeps; + preConfigure = + # Copy libboost_context so we don't get all of Boost in our closure. + # https://github.com/NixOS/nixpkgs/issues/45462 + '' + mkdir -p $out/lib + cp ${boost}/lib/libboost_context* $out/lib + ''; + configureFlags = configureFlags ++ [ "--sysconfdir=/etc" ]; @@ -74,8 +82,6 @@ let makeFlags = "profiledir=$(out)/etc/profile.d"; - preBuild = "unset NIX_INDENT_MAKE"; - installFlags = "sysconfdir=$(out)/etc"; doInstallCheck = true; @@ -103,8 +109,6 @@ let enableParallelBuilding = true; postUnpack = "sourceRoot=$sourceRoot/perl"; - - preBuild = "unset NIX_INDENT_MAKE"; }); @@ -189,10 +193,6 @@ let buildInputs = buildDeps; - configureFlags = '' - --disable-init-state - ''; - dontInstall = false; doInstallCheck = true; @@ -206,14 +206,14 @@ let }; - rpm_fedora27x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora27x86_64) [ ]; + #rpm_fedora27x86_64 = makeRPM_x86_64 (diskImageFunsFun: diskImageFunsFun.fedora27x86_64) [ ]; #deb_debian8i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.debian8i386) [ "libsodium-dev" ] [ "libsodium13" ]; #deb_debian8x86_64 = makeDeb_x86_64 (diskImageFunsFun: diskImageFunsFun.debian8x86_64) [ "libsodium-dev" ] [ "libsodium13" ]; - deb_ubuntu1710i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1710i386) [ ] [ "libsodium18" ]; - deb_ubuntu1710x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1710x86_64) [ ] [ "libsodium18" "libboost-context1.62.0" ]; + #deb_ubuntu1710i386 = makeDeb_i686 (diskImageFuns: diskImageFuns.ubuntu1710i386) [ ] [ "libsodium18" ]; + #deb_ubuntu1710x86_64 = makeDeb_x86_64 (diskImageFuns: diskImageFuns.ubuntu1710x86_64) [ ] [ "libsodium18" "libboost-context1.62.0" ]; # System tests. @@ -241,6 +241,7 @@ let { diskImage = vmTools.diskImages.ubuntu1204x86_64; } '' + set -x useradd -m alice su - alice -c 'tar xf ${binaryTarball.x86_64-linux}/*.tar.*' mkdir /dest-nix @@ -249,6 +250,17 @@ let su - alice -c '_NIX_INSTALLER_TEST=1 ./nix-*/install' su - alice -c 'nix-store --verify' su - alice -c 'PAGER= nix-store -qR ${build.x86_64-linux}' + + # Check whether 'nix upgrade-nix' works. + cat > /tmp/paths.nix <> $out/nix-support/hydra-build-products + ''; + + # Aggregate job containing the release-critical jobs. release = pkgs.releaseTools.aggregate { name = "nix-${tarball.version}"; @@ -284,14 +313,17 @@ let build.i686-linux build.x86_64-darwin build.x86_64-linux + build.aarch64-linux binaryTarball.i686-linux binaryTarball.x86_64-darwin binaryTarball.x86_64-linux + binaryTarball.aarch64-linux tests.remoteBuilds tests.nix-copy-closure tests.binaryTarball tests.evalNixpkgs tests.evalNixOS + installerScript ]; }; @@ -311,7 +343,7 @@ let src = jobs.tarball; diskImage = (diskImageFun vmTools.diskImageFuns) { extraPackages = - [ "sqlite" "sqlite-devel" "bzip2-devel" "libcurl-devel" "openssl-devel" "xz-devel" "libseccomp-devel" "libsodium-devel" "boost-devel" ] + [ "sqlite" "sqlite-devel" "bzip2-devel" "libcurl-devel" "openssl-devel" "xz-devel" "libseccomp-devel" "libsodium-devel" "boost-devel" "bison" "flex" ] ++ extraPackages; }; # At most 2047MB can be simulated in qemu-system-i386 memSize = 2047; diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index 5f6542355e0..f93be9d3a45 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -674,9 +674,6 @@ $NIX_INSTALLED_NIX. EOF fi - _sudo "to initialize the Nix Database" \ - $NIX_INSTALLED_NIX/bin/nix-store --init - cat ./.reginfo \ | _sudo "to load data for the first time in to the Nix Database" \ "$NIX_INSTALLED_NIX/bin/nix-store" --load-db @@ -727,11 +724,17 @@ setup_default_profile() { _sudo "to installing a bootstrapping Nix in to the default Profile" \ HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX" - _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \ - HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT" + if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ]; then + _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \ + HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT" + export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt + fi + # Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call, + # otherwise it will be lost in environments where sudo doesn't pass + # all the environment variables by default. _sudo "to update the default channel in the default profile" \ - HOME="$ROOT_HOME" NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs + HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs } diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh index cd71d7947d7..fc633fa2337 100644 --- a/scripts/install-nix-from-closure.sh +++ b/scripts/install-nix-from-closure.sh @@ -30,15 +30,14 @@ if [ "$(uname -s)" = "Darwin" ]; then fi fi -# Determine if we should punt to the single-user installer or not +# Determine if we could use the multi-user installer or not if [ "$(uname -s)" = "Darwin" ]; then - INSTALL_MODE=daemon + echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2 elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then - INSTALL_MODE=daemon -else - INSTALL_MODE=no-daemon + echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2 fi +INSTALL_MODE=no-daemon # Trivially handle the --daemon / --no-daemon options if [ "x${1:-}" = "x--no-daemon" ]; then INSTALL_MODE=no-daemon @@ -47,14 +46,18 @@ elif [ "x${1:-}" = "x--daemon" ]; then elif [ "x${1:-}" != "x" ]; then ( echo "Nix Installer [--daemon|--no-daemon]" + + echo "Choose installation method." echo "" - echo " --daemon: Force the installer to use the Daemon" - echo " based installer, even though it may not" - echo " work." + echo " --daemon: Installs and configures a background daemon that manages the store," + echo " providing multi-user support and better isolation for local builds." + echo " Both for security and reproducibility, this method is recommended if" + echo " supported on your platform." + echo " See https://nixos.org/nix/manual/#sect-multi-user-installation" echo "" - echo " --no-daemon: Force a no-daemon, single-user" - echo " installation even when the preferred" - echo " method is with the daemon." + echo " --no-daemon: Simple, single-user installation that does not require root and is" + echo " trivial to uninstall." + echo " (default)" echo "" ) >&2 exit @@ -106,12 +109,6 @@ for i in $(cd "$self/store" >/dev/null && echo ./*); do done echo "" >&2 -echo "initialising Nix database..." >&2 -if ! $nix/bin/nix-store --init; then - echo "$0: failed to initialize the Nix database" >&2 - exit 1 -fi - if ! "$nix/bin/nix-store" --load-db < "$self/.reginfo"; then echo "$0: unable to register valid paths" >&2 exit 1 diff --git a/scripts/install.in b/scripts/install.in new file mode 100644 index 00000000000..7bff7b216d9 --- /dev/null +++ b/scripts/install.in @@ -0,0 +1,67 @@ +#!/bin/sh + +# This script installs the Nix package manager on your system by +# downloading a binary distribution and running its installer script +# (which in turn creates and populates /nix). + +{ # Prevent execution if this script was only partially downloaded +oops() { + echo "$0:" "$@" >&2 + exit 1 +} + +tmpDir="$(mktemp -d -t nix-binary-tarball-unpack.XXXXXXXXXX || \ + oops "Can't create temporary directory for downloading the Nix binary tarball")" +cleanup() { + rm -rf "$tmpDir" +} +trap cleanup EXIT INT QUIT TERM + +require_util() { + type "$1" > /dev/null 2>&1 || command -v "$1" > /dev/null 2>&1 || + oops "you do not have '$1' installed, which I need to $2" +} + +case "$(uname -s).$(uname -m)" in + Linux.x86_64) system=x86_64-linux; hash=@binaryTarball_x86_64-linux@;; + Linux.i?86) system=i686-linux; hash=@binaryTarball_i686-linux@;; + Linux.aarch64) system=aarch64-linux; hash=@binaryTarball_aarch64-linux@;; + Darwin.x86_64) system=x86_64-darwin; hash=@binaryTarball_x86_64-darwin@;; + *) oops "sorry, there is no binary distribution of Nix for your platform";; +esac + +url="https://nixos.org/releases/nix/nix-@nixVersion@/nix-@nixVersion@-$system.tar.bz2" + +tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.bz2")" + +require_util curl "download the binary tarball" +require_util bzcat "decompress the binary tarball" +require_util tar "unpack the binary tarball" + +echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..." +curl -L "$url" -o "$tarball" || oops "failed to download '$url'" + +if type sha256sum > /dev/null 2>&1; then + hash2="$(sha256sum -b "$tarball" | cut -c1-64)" +elif type shasum > /dev/null 2>&1; then + hash2="$(shasum -a 256 -b "$tarball" | cut -c1-64)" +elif type openssl > /dev/null 2>&1; then + hash2="$(openssl dgst -r -sha256 "$tarball" | cut -c1-64)" +else + oops "cannot verify the SHA-256 hash of '$url'; you need one of 'shasum', 'sha256sum', or 'openssl'" +fi + +if [ "$hash" != "$hash2" ]; then + oops "SHA-256 hash mismatch in '$url'; expected $hash, got $hash2" +fi + +unpack=$tmpDir/unpack +mkdir -p "$unpack" +< "$tarball" bzcat | tar -xf - -C "$unpack" || oops "failed to unpack '$url'" + +script=$(echo "$unpack"/*/install) + +[ -e "$script" ] || oops "installation script is missing from the binary tarball!" +"$script" "$@" + +} # End of wrapping diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in index 43c7156062d..6940969cca7 100644 --- a/scripts/nix-profile-daemon.sh.in +++ b/scripts/nix-profile-daemon.sh.in @@ -1,5 +1,5 @@ # Only execute this file once per shell. -if [ -n "$__ETC_PROFILE_NIX_SOURCED" ]; then return; fi +if [ -n "${__ETC_PROFILE_NIX_SOURCED:-}" ]; then return; fi __ETC_PROFILE_NIX_SOURCED=1 # Set up secure multi-user builds: non-root users build through the @@ -49,6 +49,26 @@ if test -w $HOME; then fi fi -export NIX_SSL_CERT_FILE="@localstatedir@/nix/profiles/default/etc/ssl/certs/ca-bundle.crt" -export NIX_PATH="@localstatedir@/nix/profiles/per-user/root/channels" -export PATH="$HOME/.nix-profile/bin:$HOME/.nix-profile/lib/kde4/libexec:@localstatedir@/nix/profiles/default/bin:@localstatedir@/nix/profiles/default:@localstatedir@/nix/profiles/default/lib/kde4/libexec:$PATH" + +# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. +if [ ! -z "${NIX_SSL_CERT_FILE:-}" ]; then + : # Allow users to override the NIX_SSL_CERT_FILE +elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch + export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt +elif [ -e /etc/ssl/ca-bundle.pem ]; then # openSUSE Tumbleweed + export NIX_SSL_CERT_FILE=/etc/ssl/ca-bundle.pem +elif [ -e /etc/ssl/certs/ca-bundle.crt ]; then # Old NixOS + export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt +elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS + export NIX_SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt +else + # Fall back to what is in the nix profiles, favouring whatever is defined last. + for i in $NIX_PROFILES; do + if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then + export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt + fi + done +fi + +export NIX_PATH="nixpkgs=@localstatedir@/nix/profiles/per-user/root/channels/nixpkgs:@localstatedir@/nix/profiles/per-user/root/channels" +export PATH="$HOME/.nix-profile/bin:@localstatedir@/nix/profiles/default/bin:$PATH" diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in index a5f52274fc7..db03e16ba89 100644 --- a/scripts/nix-profile.sh.in +++ b/scripts/nix-profile.sh.in @@ -75,7 +75,7 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then export NIX_SSL_CERT_FILE="$NIX_LINK/etc/ca-bundle.crt" fi - if [ -n "${MANPATH}" ]; then + if [ -n "${MANPATH-}" ]; then export MANPATH="$NIX_LINK/share/man:$MANPATH" fi diff --git a/shell.nix b/shell.nix index c04bcd15130..817684b7646 100644 --- a/shell.nix +++ b/shell.nix @@ -1,6 +1,6 @@ { useClang ? false }: -with import (builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-18.03"; }) {}; +with import (builtins.fetchGit { url = https://github.com/NixOS/nixpkgs-channels.git; ref = "nixos-18.09"; }) {}; with import ./release-common.nix { inherit pkgs; }; diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index 38dbe3e58b2..279ae62f69c 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -17,6 +17,7 @@ #include "store-api.hh" #include "derivations.hh" #include "local-store.hh" +#include "legacy.hh" using namespace nix; using std::cin; @@ -37,11 +38,15 @@ static AutoCloseFD openSlotLock(const Machine & m, unsigned long long slot) return openLockFile(fmt("%s/%s-%d", currentLoad, escapeUri(m.storeUri), slot), true); } -int main (int argc, char * * argv) -{ - return handleExceptions(argv[0], [&]() { - initNix(); +static bool allSupportedLocally(const std::set& requiredFeatures) { + for (auto & feature : requiredFeatures) + if (!settings.systemFeatures.get().count(feature)) return false; + return true; +} +static int _main(int argc, char * * argv) +{ + { logger = makeJSONLogger(*logger); /* Ensure we don't get any SSH passphrase or host key popups. */ @@ -80,7 +85,7 @@ int main (int argc, char * * argv) if (machines.empty()) { std::cerr << "# decline-permanently\n"; - return; + return 0; } string drvPath; @@ -90,17 +95,18 @@ int main (int argc, char * * argv) try { auto s = readString(source); - if (s != "try") return; - } catch (EndOfFile &) { return; } + if (s != "try") return 0; + } catch (EndOfFile &) { return 0; } auto amWilling = readInt(source); auto neededSystem = readString(source); source >> drvPath; auto requiredFeatures = readStrings>(source); - auto canBuildLocally = amWilling - && ( neededSystem == settings.thisSystem - || settings.extraPlatforms.get().count(neededSystem) > 0); + auto canBuildLocally = amWilling + && ( neededSystem == settings.thisSystem + || settings.extraPlatforms.get().count(neededSystem) > 0) + && allSupportedLocally(requiredFeatures); /* Error ignored here, will be caught later */ mkdir(currentLoad.c_str(), 0777); @@ -253,6 +259,8 @@ int main (int argc, char * * argv) copyPaths(ref(sshStore), store, missing, NoRepair, NoCheckSigs, NoSubstitute); } - return; - }); + return 0; + } } + +static RegisterLegacyCommand s1("build-remote", _main); diff --git a/src/build-remote/local.mk b/src/build-remote/local.mk deleted file mode 100644 index 50b0409d188..00000000000 --- a/src/build-remote/local.mk +++ /dev/null @@ -1,9 +0,0 @@ -programs += build-remote - -build-remote_DIR := $(d) - -build-remote_INSTALL_DIR := $(libexecdir)/nix - -build-remote_LIBS = libmain libformat libstore libutil - -build-remote_SOURCES := $(d)/build-remote.cc diff --git a/src/cpptoml/LICENSE b/src/cpptoml/LICENSE new file mode 100644 index 00000000000..8802c4fa5a3 --- /dev/null +++ b/src/cpptoml/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2014 Chase Geigle + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/cpptoml/cpptoml.h b/src/cpptoml/cpptoml.h new file mode 100644 index 00000000000..07de010b152 --- /dev/null +++ b/src/cpptoml/cpptoml.h @@ -0,0 +1,3457 @@ +/** + * @file cpptoml.h + * @author Chase Geigle + * @date May 2013 + */ + +#ifndef _CPPTOML_H_ +#define _CPPTOML_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if __cplusplus > 201103L +#define CPPTOML_DEPRECATED(reason) [[deprecated(reason)]] +#elif defined(__clang__) +#define CPPTOML_DEPRECATED(reason) __attribute__((deprecated(reason))) +#elif defined(__GNUG__) +#define CPPTOML_DEPRECATED(reason) __attribute__((deprecated)) +#elif defined(_MSC_VER) +#if _MSC_VER < 1910 +#define CPPTOML_DEPRECATED(reason) __declspec(deprecated) +#else +#define CPPTOML_DEPRECATED(reason) [[deprecated(reason)]] +#endif +#endif + +namespace cpptoml +{ +class writer; // forward declaration +class base; // forward declaration +#if defined(CPPTOML_USE_MAP) +// a std::map will ensure that entries a sorted, albeit at a slight +// performance penalty relative to the (default) unordered_map +using string_to_base_map = std::map>; +#else +// by default an unordered_map is used for best performance as the +// toml specification does not require entries to be sorted +using string_to_base_map + = std::unordered_map>; +#endif + +// if defined, `base` will retain type information in form of an enum class +// such that static_cast can be used instead of dynamic_cast +// #define CPPTOML_NO_RTTI + +template +class option +{ + public: + option() : empty_{true} + { + // nothing + } + + option(T value) : empty_{false}, value_(std::move(value)) + { + // nothing + } + + explicit operator bool() const + { + return !empty_; + } + + const T& operator*() const + { + return value_; + } + + const T* operator->() const + { + return &value_; + } + + const T& value_or(const T& alternative) const + { + if (!empty_) + return value_; + return alternative; + } + + private: + bool empty_; + T value_; +}; + +struct local_date +{ + int year = 0; + int month = 0; + int day = 0; +}; + +struct local_time +{ + int hour = 0; + int minute = 0; + int second = 0; + int microsecond = 0; +}; + +struct zone_offset +{ + int hour_offset = 0; + int minute_offset = 0; +}; + +struct local_datetime : local_date, local_time +{ +}; + +struct offset_datetime : local_datetime, zone_offset +{ + static inline struct offset_datetime from_zoned(const struct tm& t) + { + offset_datetime dt; + dt.year = t.tm_year + 1900; + dt.month = t.tm_mon + 1; + dt.day = t.tm_mday; + dt.hour = t.tm_hour; + dt.minute = t.tm_min; + dt.second = t.tm_sec; + + char buf[16]; + strftime(buf, 16, "%z", &t); + + int offset = std::stoi(buf); + dt.hour_offset = offset / 100; + dt.minute_offset = offset % 100; + return dt; + } + + CPPTOML_DEPRECATED("from_local has been renamed to from_zoned") + static inline struct offset_datetime from_local(const struct tm& t) + { + return from_zoned(t); + } + + static inline struct offset_datetime from_utc(const struct tm& t) + { + offset_datetime dt; + dt.year = t.tm_year + 1900; + dt.month = t.tm_mon + 1; + dt.day = t.tm_mday; + dt.hour = t.tm_hour; + dt.minute = t.tm_min; + dt.second = t.tm_sec; + return dt; + } +}; + +CPPTOML_DEPRECATED("datetime has been renamed to offset_datetime") +typedef offset_datetime datetime; + +class fill_guard +{ + public: + fill_guard(std::ostream& os) : os_(os), fill_{os.fill()} + { + // nothing + } + + ~fill_guard() + { + os_.fill(fill_); + } + + private: + std::ostream& os_; + std::ostream::char_type fill_; +}; + +inline std::ostream& operator<<(std::ostream& os, const local_date& dt) +{ + fill_guard g{os}; + os.fill('0'); + + using std::setw; + os << setw(4) << dt.year << "-" << setw(2) << dt.month << "-" << setw(2) + << dt.day; + + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const local_time& ltime) +{ + fill_guard g{os}; + os.fill('0'); + + using std::setw; + os << setw(2) << ltime.hour << ":" << setw(2) << ltime.minute << ":" + << setw(2) << ltime.second; + + if (ltime.microsecond > 0) + { + os << "."; + int power = 100000; + for (int curr_us = ltime.microsecond; curr_us; power /= 10) + { + auto num = curr_us / power; + os << num; + curr_us -= num * power; + } + } + + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const zone_offset& zo) +{ + fill_guard g{os}; + os.fill('0'); + + using std::setw; + + if (zo.hour_offset != 0 || zo.minute_offset != 0) + { + if (zo.hour_offset > 0) + { + os << "+"; + } + else + { + os << "-"; + } + os << setw(2) << std::abs(zo.hour_offset) << ":" << setw(2) + << std::abs(zo.minute_offset); + } + else + { + os << "Z"; + } + + return os; +} + +inline std::ostream& operator<<(std::ostream& os, const local_datetime& dt) +{ + return os << static_cast(dt) << "T" + << static_cast(dt); +} + +inline std::ostream& operator<<(std::ostream& os, const offset_datetime& dt) +{ + return os << static_cast(dt) + << static_cast(dt); +} + +template +struct is_one_of; + +template +struct is_one_of : std::is_same +{ +}; + +template +struct is_one_of +{ + const static bool value + = std::is_same::value || is_one_of::value; +}; + +template +class value; + +template +struct valid_value + : is_one_of +{ +}; + +template +struct value_traits; + +template +struct valid_value_or_string_convertible +{ + + const static bool value = valid_value::type>::value + || std::is_convertible::value; +}; + +template +struct value_traits:: + value>::type> +{ + using value_type = typename std:: + conditional::type>::value, + typename std::decay::type, std::string>::type; + + using type = value; + + static value_type construct(T&& val) + { + return value_type(val); + } +}; + +template +struct value_traits::value + && std::is_floating_point< + typename std::decay::type>::value>:: + type> +{ + using value_type = typename std::decay::type; + + using type = value; + + static value_type construct(T&& val) + { + return value_type(val); + } +}; + +template +struct value_traits::value + && std::is_signed:: + type>::value>::type> +{ + using value_type = int64_t; + + using type = value; + + static value_type construct(T&& val) + { + if (val < (std::numeric_limits::min)()) + throw std::underflow_error{"constructed value cannot be " + "represented by a 64-bit signed " + "integer"}; + + if (val > (std::numeric_limits::max)()) + throw std::overflow_error{"constructed value cannot be represented " + "by a 64-bit signed integer"}; + + return static_cast(val); + } +}; + +template +struct value_traits::value + && std::is_unsigned:: + type>::value>::type> +{ + using value_type = int64_t; + + using type = value; + + static value_type construct(T&& val) + { + if (val > static_cast((std::numeric_limits::max)())) + throw std::overflow_error{"constructed value cannot be represented " + "by a 64-bit signed integer"}; + + return static_cast(val); + } +}; + +class array; +class table; +class table_array; + +template +struct array_of_trait +{ + using return_type = option>; +}; + +template <> +struct array_of_trait +{ + using return_type = option>>; +}; + +template +inline std::shared_ptr::type> make_value(T&& val); +inline std::shared_ptr make_array(); +template +inline std::shared_ptr make_element(); +inline std::shared_ptr make_table(); +inline std::shared_ptr make_table_array(); + +#if defined(CPPTOML_NO_RTTI) +/// Base type used to store underlying data type explicitly if RTTI is disabled +enum class base_type +{ + NONE, + STRING, + LOCAL_TIME, + LOCAL_DATE, + LOCAL_DATETIME, + OFFSET_DATETIME, + INT, + FLOAT, + BOOL, + TABLE, + ARRAY, + TABLE_ARRAY +}; + +/// Type traits class to convert C++ types to enum member +template +struct base_type_traits; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::STRING; +}; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::LOCAL_TIME; +}; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::LOCAL_DATE; +}; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::LOCAL_DATETIME; +}; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::OFFSET_DATETIME; +}; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::INT; +}; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::FLOAT; +}; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::BOOL; +}; + +template <> +struct base_type_traits
+{ + static const base_type type = base_type::TABLE; +}; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::ARRAY; +}; + +template <> +struct base_type_traits +{ + static const base_type type = base_type::TABLE_ARRAY; +}; +#endif + +/** + * A generic base TOML value used for type erasure. + */ +class base : public std::enable_shared_from_this +{ + public: + virtual ~base() = default; + + virtual std::shared_ptr clone() const = 0; + + /** + * Determines if the given TOML element is a value. + */ + virtual bool is_value() const + { + return false; + } + + /** + * Determines if the given TOML element is a table. + */ + virtual bool is_table() const + { + return false; + } + + /** + * Converts the TOML element into a table. + */ + std::shared_ptr
as_table() + { + if (is_table()) + return std::static_pointer_cast
(shared_from_this()); + return nullptr; + } + /** + * Determines if the TOML element is an array of "leaf" elements. + */ + virtual bool is_array() const + { + return false; + } + + /** + * Converts the TOML element to an array. + */ + std::shared_ptr as_array() + { + if (is_array()) + return std::static_pointer_cast(shared_from_this()); + return nullptr; + } + + /** + * Determines if the given TOML element is an array of tables. + */ + virtual bool is_table_array() const + { + return false; + } + + /** + * Converts the TOML element into a table array. + */ + std::shared_ptr as_table_array() + { + if (is_table_array()) + return std::static_pointer_cast(shared_from_this()); + return nullptr; + } + + /** + * Attempts to coerce the TOML element into a concrete TOML value + * of type T. + */ + template + std::shared_ptr> as(); + + template + std::shared_ptr> as() const; + + template + void accept(Visitor&& visitor, Args&&... args) const; + +#if defined(CPPTOML_NO_RTTI) + base_type type() const + { + return type_; + } + + protected: + base(const base_type t) : type_(t) + { + // nothing + } + + private: + const base_type type_ = base_type::NONE; + +#else + protected: + base() + { + // nothing + } +#endif +}; + +/** + * A concrete TOML value representing the "leaves" of the "tree". + */ +template +class value : public base +{ + struct make_shared_enabler + { + // nothing; this is a private key accessible only to friends + }; + + template + friend std::shared_ptr::type> + cpptoml::make_value(U&& val); + + public: + static_assert(valid_value::value, "invalid value type"); + + std::shared_ptr clone() const override; + + value(const make_shared_enabler&, const T& val) : value(val) + { + // nothing; note that users cannot actually invoke this function + // because they lack access to the make_shared_enabler. + } + + bool is_value() const override + { + return true; + } + + /** + * Gets the data associated with this value. + */ + T& get() + { + return data_; + } + + /** + * Gets the data associated with this value. Const version. + */ + const T& get() const + { + return data_; + } + + private: + T data_; + + /** + * Constructs a value from the given data. + */ +#if defined(CPPTOML_NO_RTTI) + value(const T& val) : base(base_type_traits::type), data_(val) + { + } +#else + value(const T& val) : data_(val) + { + } +#endif + + value(const value& val) = delete; + value& operator=(const value& val) = delete; +}; + +template +std::shared_ptr::type> make_value(T&& val) +{ + using value_type = typename value_traits::type; + using enabler = typename value_type::make_shared_enabler; + return std::make_shared( + enabler{}, value_traits::construct(std::forward(val))); +} + +template +inline std::shared_ptr> base::as() +{ +#if defined(CPPTOML_NO_RTTI) + if (type() == base_type_traits::type) + return std::static_pointer_cast>(shared_from_this()); + else + return nullptr; +#else + return std::dynamic_pointer_cast>(shared_from_this()); +#endif +} + +// special case value to allow getting an integer parameter as a +// double value +template <> +inline std::shared_ptr> base::as() +{ +#if defined(CPPTOML_NO_RTTI) + if (type() == base_type::FLOAT) + return std::static_pointer_cast>(shared_from_this()); + + if (type() == base_type::INT) + { + auto v = std::static_pointer_cast>(shared_from_this()); + return make_value(static_cast(v->get()));; + } +#else + if (auto v = std::dynamic_pointer_cast>(shared_from_this())) + return v; + + if (auto v = std::dynamic_pointer_cast>(shared_from_this())) + return make_value(static_cast(v->get())); +#endif + + return nullptr; +} + +template +inline std::shared_ptr> base::as() const +{ +#if defined(CPPTOML_NO_RTTI) + if (type() == base_type_traits::type) + return std::static_pointer_cast>(shared_from_this()); + else + return nullptr; +#else + return std::dynamic_pointer_cast>(shared_from_this()); +#endif +} + +// special case value to allow getting an integer parameter as a +// double value +template <> +inline std::shared_ptr> base::as() const +{ +#if defined(CPPTOML_NO_RTTI) + if (type() == base_type::FLOAT) + return std::static_pointer_cast>(shared_from_this()); + + if (type() == base_type::INT) + { + auto v = as(); + // the below has to be a non-const value due to a bug in + // libc++: https://llvm.org/bugs/show_bug.cgi?id=18843 + return make_value(static_cast(v->get())); + } +#else + if (auto v + = std::dynamic_pointer_cast>(shared_from_this())) + return v; + + if (auto v = as()) + { + // the below has to be a non-const value due to a bug in + // libc++: https://llvm.org/bugs/show_bug.cgi?id=18843 + return make_value(static_cast(v->get())); + } +#endif + + return nullptr; +} + +/** + * Exception class for array insertion errors. + */ +class array_exception : public std::runtime_error +{ + public: + array_exception(const std::string& err) : std::runtime_error{err} + { + } +}; + +class array : public base +{ + public: + friend std::shared_ptr make_array(); + + std::shared_ptr clone() const override; + + virtual bool is_array() const override + { + return true; + } + + using size_type = std::size_t; + + /** + * arrays can be iterated over + */ + using iterator = std::vector>::iterator; + + /** + * arrays can be iterated over. Const version. + */ + using const_iterator = std::vector>::const_iterator; + + iterator begin() + { + return values_.begin(); + } + + const_iterator begin() const + { + return values_.begin(); + } + + iterator end() + { + return values_.end(); + } + + const_iterator end() const + { + return values_.end(); + } + + /** + * Obtains the array (vector) of base values. + */ + std::vector>& get() + { + return values_; + } + + /** + * Obtains the array (vector) of base values. Const version. + */ + const std::vector>& get() const + { + return values_; + } + + std::shared_ptr at(size_t idx) const + { + return values_.at(idx); + } + + /** + * Obtains an array of values. Note that elements may be + * nullptr if they cannot be converted to a value. + */ + template + std::vector>> array_of() const + { + std::vector>> result(values_.size()); + + std::transform(values_.begin(), values_.end(), result.begin(), + [&](std::shared_ptr v) { return v->as(); }); + + return result; + } + + /** + * Obtains a option>. The option will be empty if the array + * contains values that are not of type T. + */ + template + inline typename array_of_trait::return_type get_array_of() const + { + std::vector result; + result.reserve(values_.size()); + + for (const auto& val : values_) + { + if (auto v = val->as()) + result.push_back(v->get()); + else + return {}; + } + + return {std::move(result)}; + } + + /** + * Obtains an array of arrays. Note that elements may be nullptr + * if they cannot be converted to a array. + */ + std::vector> nested_array() const + { + std::vector> result(values_.size()); + + std::transform(values_.begin(), values_.end(), result.begin(), + [&](std::shared_ptr v) -> std::shared_ptr { + if (v->is_array()) + return std::static_pointer_cast(v); + return std::shared_ptr{}; + }); + + return result; + } + + /** + * Add a value to the end of the array + */ + template + void push_back(const std::shared_ptr>& val) + { + if (values_.empty() || values_[0]->as()) + { + values_.push_back(val); + } + else + { + throw array_exception{"Arrays must be homogenous."}; + } + } + + /** + * Add an array to the end of the array + */ + void push_back(const std::shared_ptr& val) + { + if (values_.empty() || values_[0]->is_array()) + { + values_.push_back(val); + } + else + { + throw array_exception{"Arrays must be homogenous."}; + } + } + + /** + * Convenience function for adding a simple element to the end + * of the array. + */ + template + void push_back(T&& val, typename value_traits::type* = 0) + { + push_back(make_value(std::forward(val))); + } + + /** + * Insert a value into the array + */ + template + iterator insert(iterator position, const std::shared_ptr>& value) + { + if (values_.empty() || values_[0]->as()) + { + return values_.insert(position, value); + } + else + { + throw array_exception{"Arrays must be homogenous."}; + } + } + + /** + * Insert an array into the array + */ + iterator insert(iterator position, const std::shared_ptr& value) + { + if (values_.empty() || values_[0]->is_array()) + { + return values_.insert(position, value); + } + else + { + throw array_exception{"Arrays must be homogenous."}; + } + } + + /** + * Convenience function for inserting a simple element in the array + */ + template + iterator insert(iterator position, T&& val, + typename value_traits::type* = 0) + { + return insert(position, make_value(std::forward(val))); + } + + /** + * Erase an element from the array + */ + iterator erase(iterator position) + { + return values_.erase(position); + } + + /** + * Clear the array + */ + void clear() + { + values_.clear(); + } + + /** + * Reserve space for n values. + */ + void reserve(size_type n) + { + values_.reserve(n); + } + + private: +#if defined(CPPTOML_NO_RTTI) + array() : base(base_type::ARRAY) + { + // empty + } +#else + array() = default; +#endif + + template + array(InputIterator begin, InputIterator end) : values_{begin, end} + { + // nothing + } + + array(const array& obj) = delete; + array& operator=(const array& obj) = delete; + + std::vector> values_; +}; + +inline std::shared_ptr make_array() +{ + struct make_shared_enabler : public array + { + make_shared_enabler() + { + // nothing + } + }; + + return std::make_shared(); +} + +template <> +inline std::shared_ptr make_element() +{ + return make_array(); +} + +/** + * Obtains a option>. The option will be empty if the array + * contains values that are not of type T. + */ +template <> +inline typename array_of_trait::return_type +array::get_array_of() const +{ + std::vector> result; + result.reserve(values_.size()); + + for (const auto& val : values_) + { + if (auto v = val->as_array()) + result.push_back(v); + else + return {}; + } + + return {std::move(result)}; +} + +class table; + +class table_array : public base +{ + friend class table; + friend std::shared_ptr make_table_array(); + + public: + std::shared_ptr clone() const override; + + using size_type = std::size_t; + + /** + * arrays can be iterated over + */ + using iterator = std::vector>::iterator; + + /** + * arrays can be iterated over. Const version. + */ + using const_iterator = std::vector>::const_iterator; + + iterator begin() + { + return array_.begin(); + } + + const_iterator begin() const + { + return array_.begin(); + } + + iterator end() + { + return array_.end(); + } + + const_iterator end() const + { + return array_.end(); + } + + virtual bool is_table_array() const override + { + return true; + } + + std::vector>& get() + { + return array_; + } + + const std::vector>& get() const + { + return array_; + } + + /** + * Add a table to the end of the array + */ + void push_back(const std::shared_ptr
& val) + { + array_.push_back(val); + } + + /** + * Insert a table into the array + */ + iterator insert(iterator position, const std::shared_ptr
& value) + { + return array_.insert(position, value); + } + + /** + * Erase an element from the array + */ + iterator erase(iterator position) + { + return array_.erase(position); + } + + /** + * Clear the array + */ + void clear() + { + array_.clear(); + } + + /** + * Reserve space for n tables. + */ + void reserve(size_type n) + { + array_.reserve(n); + } + + private: +#if defined(CPPTOML_NO_RTTI) + table_array() : base(base_type::TABLE_ARRAY) + { + // nothing + } +#else + table_array() + { + // nothing + } +#endif + + table_array(const table_array& obj) = delete; + table_array& operator=(const table_array& rhs) = delete; + + std::vector> array_; +}; + +inline std::shared_ptr make_table_array() +{ + struct make_shared_enabler : public table_array + { + make_shared_enabler() + { + // nothing + } + }; + + return std::make_shared(); +} + +template <> +inline std::shared_ptr make_element() +{ + return make_table_array(); +} + +// The below are overloads for fetching specific value types out of a value +// where special casting behavior (like bounds checking) is desired + +template +typename std::enable_if::value + && std::is_signed::value, + option>::type +get_impl(const std::shared_ptr& elem) +{ + if (auto v = elem->as()) + { + if (v->get() < (std::numeric_limits::min)()) + throw std::underflow_error{ + "T cannot represent the value requested in get"}; + + if (v->get() > (std::numeric_limits::max)()) + throw std::overflow_error{ + "T cannot represent the value requested in get"}; + + return {static_cast(v->get())}; + } + else + { + return {}; + } +} + +template +typename std::enable_if::value + && std::is_unsigned::value, + option>::type +get_impl(const std::shared_ptr& elem) +{ + if (auto v = elem->as()) + { + if (v->get() < 0) + throw std::underflow_error{"T cannot store negative value in get"}; + + if (static_cast(v->get()) > (std::numeric_limits::max)()) + throw std::overflow_error{ + "T cannot represent the value requested in get"}; + + return {static_cast(v->get())}; + } + else + { + return {}; + } +} + +template +typename std::enable_if::value + || std::is_same::value, + option>::type +get_impl(const std::shared_ptr& elem) +{ + if (auto v = elem->as()) + { + return {v->get()}; + } + else + { + return {}; + } +} + +/** + * Represents a TOML keytable. + */ +class table : public base +{ + public: + friend class table_array; + friend std::shared_ptr
make_table(); + + std::shared_ptr clone() const override; + + /** + * tables can be iterated over. + */ + using iterator = string_to_base_map::iterator; + + /** + * tables can be iterated over. Const version. + */ + using const_iterator = string_to_base_map::const_iterator; + + iterator begin() + { + return map_.begin(); + } + + const_iterator begin() const + { + return map_.begin(); + } + + iterator end() + { + return map_.end(); + } + + const_iterator end() const + { + return map_.end(); + } + + bool is_table() const override + { + return true; + } + + bool empty() const + { + return map_.empty(); + } + + /** + * Determines if this key table contains the given key. + */ + bool contains(const std::string& key) const + { + return map_.find(key) != map_.end(); + } + + /** + * Determines if this key table contains the given key. Will + * resolve "qualified keys". Qualified keys are the full access + * path separated with dots like "grandparent.parent.child". + */ + bool contains_qualified(const std::string& key) const + { + return resolve_qualified(key); + } + + /** + * Obtains the base for a given key. + * @throw std::out_of_range if the key does not exist + */ + std::shared_ptr get(const std::string& key) const + { + return map_.at(key); + } + + /** + * Obtains the base for a given key. Will resolve "qualified + * keys". Qualified keys are the full access path separated with + * dots like "grandparent.parent.child". + * + * @throw std::out_of_range if the key does not exist + */ + std::shared_ptr get_qualified(const std::string& key) const + { + std::shared_ptr p; + resolve_qualified(key, &p); + return p; + } + + /** + * Obtains a table for a given key, if possible. + */ + std::shared_ptr
get_table(const std::string& key) const + { + if (contains(key) && get(key)->is_table()) + return std::static_pointer_cast
(get(key)); + return nullptr; + } + + /** + * Obtains a table for a given key, if possible. Will resolve + * "qualified keys". + */ + std::shared_ptr
get_table_qualified(const std::string& key) const + { + if (contains_qualified(key) && get_qualified(key)->is_table()) + return std::static_pointer_cast
(get_qualified(key)); + return nullptr; + } + + /** + * Obtains an array for a given key. + */ + std::shared_ptr get_array(const std::string& key) const + { + if (!contains(key)) + return nullptr; + return get(key)->as_array(); + } + + /** + * Obtains an array for a given key. Will resolve "qualified keys". + */ + std::shared_ptr get_array_qualified(const std::string& key) const + { + if (!contains_qualified(key)) + return nullptr; + return get_qualified(key)->as_array(); + } + + /** + * Obtains a table_array for a given key, if possible. + */ + std::shared_ptr get_table_array(const std::string& key) const + { + if (!contains(key)) + return nullptr; + return get(key)->as_table_array(); + } + + /** + * Obtains a table_array for a given key, if possible. Will resolve + * "qualified keys". + */ + std::shared_ptr + get_table_array_qualified(const std::string& key) const + { + if (!contains_qualified(key)) + return nullptr; + return get_qualified(key)->as_table_array(); + } + + /** + * Helper function that attempts to get a value corresponding + * to the template parameter from a given key. + */ + template + option get_as(const std::string& key) const + { + try + { + return get_impl(get(key)); + } + catch (const std::out_of_range&) + { + return {}; + } + } + + /** + * Helper function that attempts to get a value corresponding + * to the template parameter from a given key. Will resolve "qualified + * keys". + */ + template + option get_qualified_as(const std::string& key) const + { + try + { + return get_impl(get_qualified(key)); + } + catch (const std::out_of_range&) + { + return {}; + } + } + + /** + * Helper function that attempts to get an array of values of a given + * type corresponding to the template parameter for a given key. + * + * If the key doesn't exist, doesn't exist as an array type, or one or + * more keys inside the array type are not of type T, an empty option + * is returned. Otherwise, an option containing a vector of the values + * is returned. + */ + template + inline typename array_of_trait::return_type + get_array_of(const std::string& key) const + { + if (auto v = get_array(key)) + { + std::vector result; + result.reserve(v->get().size()); + + for (const auto& b : v->get()) + { + if (auto val = b->as()) + result.push_back(val->get()); + else + return {}; + } + return {std::move(result)}; + } + + return {}; + } + + /** + * Helper function that attempts to get an array of values of a given + * type corresponding to the template parameter for a given key. Will + * resolve "qualified keys". + * + * If the key doesn't exist, doesn't exist as an array type, or one or + * more keys inside the array type are not of type T, an empty option + * is returned. Otherwise, an option containing a vector of the values + * is returned. + */ + template + inline typename array_of_trait::return_type + get_qualified_array_of(const std::string& key) const + { + if (auto v = get_array_qualified(key)) + { + std::vector result; + result.reserve(v->get().size()); + + for (const auto& b : v->get()) + { + if (auto val = b->as()) + result.push_back(val->get()); + else + return {}; + } + return {std::move(result)}; + } + + return {}; + } + + /** + * Adds an element to the keytable. + */ + void insert(const std::string& key, const std::shared_ptr& value) + { + map_[key] = value; + } + + /** + * Convenience shorthand for adding a simple element to the + * keytable. + */ + template + void insert(const std::string& key, T&& val, + typename value_traits::type* = 0) + { + insert(key, make_value(std::forward(val))); + } + + /** + * Removes an element from the table. + */ + void erase(const std::string& key) + { + map_.erase(key); + } + + private: +#if defined(CPPTOML_NO_RTTI) + table() : base(base_type::TABLE) + { + // nothing + } +#else + table() + { + // nothing + } +#endif + + table(const table& obj) = delete; + table& operator=(const table& rhs) = delete; + + std::vector split(const std::string& value, + char separator) const + { + std::vector result; + std::string::size_type p = 0; + std::string::size_type q; + while ((q = value.find(separator, p)) != std::string::npos) + { + result.emplace_back(value, p, q - p); + p = q + 1; + } + result.emplace_back(value, p); + return result; + } + + // If output parameter p is specified, fill it with the pointer to the + // specified entry and throw std::out_of_range if it couldn't be found. + // + // Otherwise, just return true if the entry could be found or false + // otherwise and do not throw. + bool resolve_qualified(const std::string& key, + std::shared_ptr* p = nullptr) const + { + auto parts = split(key, '.'); + auto last_key = parts.back(); + parts.pop_back(); + + auto cur_table = this; + for (const auto& part : parts) + { + cur_table = cur_table->get_table(part).get(); + if (!cur_table) + { + if (!p) + return false; + + throw std::out_of_range{key + " is not a valid key"}; + } + } + + if (!p) + return cur_table->map_.count(last_key) != 0; + + *p = cur_table->map_.at(last_key); + return true; + } + + string_to_base_map map_; +}; + +/** + * Helper function that attempts to get an array of arrays for a given + * key. + * + * If the key doesn't exist, doesn't exist as an array type, or one or + * more keys inside the array type are not of type T, an empty option + * is returned. Otherwise, an option containing a vector of the values + * is returned. + */ +template <> +inline typename array_of_trait::return_type +table::get_array_of(const std::string& key) const +{ + if (auto v = get_array(key)) + { + std::vector> result; + result.reserve(v->get().size()); + + for (const auto& b : v->get()) + { + if (auto val = b->as_array()) + result.push_back(val); + else + return {}; + } + + return {std::move(result)}; + } + + return {}; +} + +/** + * Helper function that attempts to get an array of arrays for a given + * key. Will resolve "qualified keys". + * + * If the key doesn't exist, doesn't exist as an array type, or one or + * more keys inside the array type are not of type T, an empty option + * is returned. Otherwise, an option containing a vector of the values + * is returned. + */ +template <> +inline typename array_of_trait::return_type +table::get_qualified_array_of(const std::string& key) const +{ + if (auto v = get_array_qualified(key)) + { + std::vector> result; + result.reserve(v->get().size()); + + for (const auto& b : v->get()) + { + if (auto val = b->as_array()) + result.push_back(val); + else + return {}; + } + + return {std::move(result)}; + } + + return {}; +} + +std::shared_ptr
make_table() +{ + struct make_shared_enabler : public table + { + make_shared_enabler() + { + // nothing + } + }; + + return std::make_shared(); +} + +template <> +inline std::shared_ptr
make_element
() +{ + return make_table(); +} + +template +std::shared_ptr value::clone() const +{ + return make_value(data_); +} + +inline std::shared_ptr array::clone() const +{ + auto result = make_array(); + result->reserve(values_.size()); + for (const auto& ptr : values_) + result->values_.push_back(ptr->clone()); + return result; +} + +inline std::shared_ptr table_array::clone() const +{ + auto result = make_table_array(); + result->reserve(array_.size()); + for (const auto& ptr : array_) + result->array_.push_back(ptr->clone()->as_table()); + return result; +} + +inline std::shared_ptr table::clone() const +{ + auto result = make_table(); + for (const auto& pr : map_) + result->insert(pr.first, pr.second->clone()); + return result; +} + +/** + * Exception class for all TOML parsing errors. + */ +class parse_exception : public std::runtime_error +{ + public: + parse_exception(const std::string& err) : std::runtime_error{err} + { + } + + parse_exception(const std::string& err, std::size_t line_number) + : std::runtime_error{err + " at line " + std::to_string(line_number)} + { + } +}; + +inline bool is_number(char c) +{ + return c >= '0' && c <= '9'; +} + +/** + * Helper object for consuming expected characters. + */ +template +class consumer +{ + public: + consumer(std::string::iterator& it, const std::string::iterator& end, + OnError&& on_error) + : it_(it), end_(end), on_error_(std::forward(on_error)) + { + // nothing + } + + void operator()(char c) + { + if (it_ == end_ || *it_ != c) + on_error_(); + ++it_; + } + + template + void operator()(const char (&str)[N]) + { + std::for_each(std::begin(str), std::end(str) - 1, + [&](char c) { (*this)(c); }); + } + + int eat_digits(int len) + { + int val = 0; + for (int i = 0; i < len; ++i) + { + if (!is_number(*it_) || it_ == end_) + on_error_(); + val = 10 * val + (*it_++ - '0'); + } + return val; + } + + void error() + { + on_error_(); + } + + private: + std::string::iterator& it_; + const std::string::iterator& end_; + OnError on_error_; +}; + +template +consumer make_consumer(std::string::iterator& it, + const std::string::iterator& end, + OnError&& on_error) +{ + return consumer(it, end, std::forward(on_error)); +} + +// replacement for std::getline to handle incorrectly line-ended files +// https://stackoverflow.com/questions/6089231/getting-std-ifstream-to-handle-lf-cr-and-crlf +namespace detail +{ +inline std::istream& getline(std::istream& input, std::string& line) +{ + line.clear(); + + std::istream::sentry sentry{input, true}; + auto sb = input.rdbuf(); + + while (true) + { + auto c = sb->sbumpc(); + if (c == '\r') + { + if (sb->sgetc() == '\n') + c = sb->sbumpc(); + } + + if (c == '\n') + return input; + + if (c == std::istream::traits_type::eof()) + { + if (line.empty()) + input.setstate(std::ios::eofbit); + return input; + } + + line.push_back(static_cast(c)); + } +} +} + +/** + * The parser class. + */ +class parser +{ + public: + /** + * Parsers are constructed from streams. + */ + parser(std::istream& stream) : input_(stream) + { + // nothing + } + + parser& operator=(const parser& parser) = delete; + + /** + * Parses the stream this parser was created on until EOF. + * @throw parse_exception if there are errors in parsing + */ + std::shared_ptr
parse() + { + std::shared_ptr
root = make_table(); + + table* curr_table = root.get(); + + while (detail::getline(input_, line_)) + { + line_number_++; + auto it = line_.begin(); + auto end = line_.end(); + consume_whitespace(it, end); + if (it == end || *it == '#') + continue; + if (*it == '[') + { + curr_table = root.get(); + parse_table(it, end, curr_table); + } + else + { + parse_key_value(it, end, curr_table); + consume_whitespace(it, end); + eol_or_comment(it, end); + } + } + return root; + } + + private: +#if defined _MSC_VER + __declspec(noreturn) +#elif defined __GNUC__ + __attribute__((noreturn)) +#endif + void throw_parse_exception(const std::string& err) + { + throw parse_exception{err, line_number_}; + } + + void parse_table(std::string::iterator& it, + const std::string::iterator& end, table*& curr_table) + { + // remove the beginning keytable marker + ++it; + if (it == end) + throw_parse_exception("Unexpected end of table"); + if (*it == '[') + parse_table_array(it, end, curr_table); + else + parse_single_table(it, end, curr_table); + } + + void parse_single_table(std::string::iterator& it, + const std::string::iterator& end, + table*& curr_table) + { + if (it == end || *it == ']') + throw_parse_exception("Table name cannot be empty"); + + std::string full_table_name; + bool inserted = false; + while (it != end && *it != ']') + { + auto part = parse_key(it, end, + [](char c) { return c == '.' || c == ']'; }); + + if (part.empty()) + throw_parse_exception("Empty component of table name"); + + if (!full_table_name.empty()) + full_table_name += "."; + full_table_name += part; + + if (curr_table->contains(part)) + { + auto b = curr_table->get(part); + if (b->is_table()) + curr_table = static_cast(b.get()); + else if (b->is_table_array()) + curr_table = std::static_pointer_cast(b) + ->get() + .back() + .get(); + else + throw_parse_exception("Key " + full_table_name + + "already exists as a value"); + } + else + { + inserted = true; + curr_table->insert(part, make_table()); + curr_table = static_cast(curr_table->get(part).get()); + } + consume_whitespace(it, end); + if (it != end && *it == '.') + ++it; + consume_whitespace(it, end); + } + + if (it == end) + throw_parse_exception( + "Unterminated table declaration; did you forget a ']'?"); + + // table already existed + if (!inserted) + { + auto is_value + = [](const std::pair&>& p) { + return p.second->is_value(); + }; + + // if there are any values, we can't add values to this table + // since it has already been defined. If there aren't any + // values, then it was implicitly created by something like + // [a.b] + if (curr_table->empty() || std::any_of(curr_table->begin(), + curr_table->end(), is_value)) + { + throw_parse_exception("Redefinition of table " + + full_table_name); + } + } + + ++it; + consume_whitespace(it, end); + eol_or_comment(it, end); + } + + void parse_table_array(std::string::iterator& it, + const std::string::iterator& end, table*& curr_table) + { + ++it; + if (it == end || *it == ']') + throw_parse_exception("Table array name cannot be empty"); + + std::string full_ta_name; + while (it != end && *it != ']') + { + auto part = parse_key(it, end, + [](char c) { return c == '.' || c == ']'; }); + + if (part.empty()) + throw_parse_exception("Empty component of table array name"); + + if (!full_ta_name.empty()) + full_ta_name += "."; + full_ta_name += part; + + consume_whitespace(it, end); + if (it != end && *it == '.') + ++it; + consume_whitespace(it, end); + + if (curr_table->contains(part)) + { + auto b = curr_table->get(part); + + // if this is the end of the table array name, add an + // element to the table array that we just looked up + if (it != end && *it == ']') + { + if (!b->is_table_array()) + throw_parse_exception("Key " + full_ta_name + + " is not a table array"); + auto v = b->as_table_array(); + v->get().push_back(make_table()); + curr_table = v->get().back().get(); + } + // otherwise, just keep traversing down the key name + else + { + if (b->is_table()) + curr_table = static_cast(b.get()); + else if (b->is_table_array()) + curr_table = std::static_pointer_cast(b) + ->get() + .back() + .get(); + else + throw_parse_exception("Key " + full_ta_name + + " already exists as a value"); + } + } + else + { + // if this is the end of the table array name, add a new + // table array and a new table inside that array for us to + // add keys to next + if (it != end && *it == ']') + { + curr_table->insert(part, make_table_array()); + auto arr = std::static_pointer_cast( + curr_table->get(part)); + arr->get().push_back(make_table()); + curr_table = arr->get().back().get(); + } + // otherwise, create the implicitly defined table and move + // down to it + else + { + curr_table->insert(part, make_table()); + curr_table + = static_cast(curr_table->get(part).get()); + } + } + } + + // consume the last "]]" + if (it == end) + throw_parse_exception("Unterminated table array name"); + ++it; + if (it == end) + throw_parse_exception("Unterminated table array name"); + ++it; + + consume_whitespace(it, end); + eol_or_comment(it, end); + } + + void parse_key_value(std::string::iterator& it, std::string::iterator& end, + table* curr_table) + { + auto key = parse_key(it, end, [](char c) { return c == '='; }); + if (curr_table->contains(key)) + throw_parse_exception("Key " + key + " already present"); + if (it == end || *it != '=') + throw_parse_exception("Value must follow after a '='"); + ++it; + consume_whitespace(it, end); + curr_table->insert(key, parse_value(it, end)); + consume_whitespace(it, end); + } + + template + std::string parse_key(std::string::iterator& it, + const std::string::iterator& end, Function&& fun) + { + consume_whitespace(it, end); + if (*it == '"') + { + return parse_quoted_key(it, end); + } + else + { + auto bke = std::find_if(it, end, std::forward(fun)); + return parse_bare_key(it, bke); + } + } + + std::string parse_bare_key(std::string::iterator& it, + const std::string::iterator& end) + { + if (it == end) + { + throw_parse_exception("Bare key missing name"); + } + + auto key_end = end; + --key_end; + consume_backwards_whitespace(key_end, it); + ++key_end; + std::string key{it, key_end}; + + if (std::find(it, key_end, '#') != key_end) + { + throw_parse_exception("Bare key " + key + " cannot contain #"); + } + + if (std::find_if(it, key_end, + [](char c) { return c == ' ' || c == '\t'; }) + != key_end) + { + throw_parse_exception("Bare key " + key + + " cannot contain whitespace"); + } + + if (std::find_if(it, key_end, + [](char c) { return c == '[' || c == ']'; }) + != key_end) + { + throw_parse_exception("Bare key " + key + + " cannot contain '[' or ']'"); + } + + it = end; + return key; + } + + std::string parse_quoted_key(std::string::iterator& it, + const std::string::iterator& end) + { + return string_literal(it, end, '"'); + } + + enum class parse_type + { + STRING = 1, + LOCAL_TIME, + LOCAL_DATE, + LOCAL_DATETIME, + OFFSET_DATETIME, + INT, + FLOAT, + BOOL, + ARRAY, + INLINE_TABLE + }; + + std::shared_ptr parse_value(std::string::iterator& it, + std::string::iterator& end) + { + parse_type type = determine_value_type(it, end); + switch (type) + { + case parse_type::STRING: + return parse_string(it, end); + case parse_type::LOCAL_TIME: + return parse_time(it, end); + case parse_type::LOCAL_DATE: + case parse_type::LOCAL_DATETIME: + case parse_type::OFFSET_DATETIME: + return parse_date(it, end); + case parse_type::INT: + case parse_type::FLOAT: + return parse_number(it, end); + case parse_type::BOOL: + return parse_bool(it, end); + case parse_type::ARRAY: + return parse_array(it, end); + case parse_type::INLINE_TABLE: + return parse_inline_table(it, end); + default: + throw_parse_exception("Failed to parse value"); + } + } + + parse_type determine_value_type(const std::string::iterator& it, + const std::string::iterator& end) + { + if(it == end) + { + throw_parse_exception("Failed to parse value type"); + } + if (*it == '"' || *it == '\'') + { + return parse_type::STRING; + } + else if (is_time(it, end)) + { + return parse_type::LOCAL_TIME; + } + else if (auto dtype = date_type(it, end)) + { + return *dtype; + } + else if (is_number(*it) || *it == '-' || *it == '+') + { + return determine_number_type(it, end); + } + else if (*it == 't' || *it == 'f') + { + return parse_type::BOOL; + } + else if (*it == '[') + { + return parse_type::ARRAY; + } + else if (*it == '{') + { + return parse_type::INLINE_TABLE; + } + throw_parse_exception("Failed to parse value type"); + } + + parse_type determine_number_type(const std::string::iterator& it, + const std::string::iterator& end) + { + // determine if we are an integer or a float + auto check_it = it; + if (*check_it == '-' || *check_it == '+') + ++check_it; + while (check_it != end && is_number(*check_it)) + ++check_it; + if (check_it != end && *check_it == '.') + { + ++check_it; + while (check_it != end && is_number(*check_it)) + ++check_it; + return parse_type::FLOAT; + } + else + { + return parse_type::INT; + } + } + + std::shared_ptr> parse_string(std::string::iterator& it, + std::string::iterator& end) + { + auto delim = *it; + assert(delim == '"' || delim == '\''); + + // end is non-const here because we have to be able to potentially + // parse multiple lines in a string, not just one + auto check_it = it; + ++check_it; + if (check_it != end && *check_it == delim) + { + ++check_it; + if (check_it != end && *check_it == delim) + { + it = ++check_it; + return parse_multiline_string(it, end, delim); + } + } + return make_value(string_literal(it, end, delim)); + } + + std::shared_ptr> + parse_multiline_string(std::string::iterator& it, + std::string::iterator& end, char delim) + { + std::stringstream ss; + + auto is_ws = [](char c) { return c == ' ' || c == '\t'; }; + + bool consuming = false; + std::shared_ptr> ret; + + auto handle_line + = [&](std::string::iterator& local_it, + std::string::iterator& local_end) { + if (consuming) + { + local_it = std::find_if_not(local_it, local_end, is_ws); + + // whole line is whitespace + if (local_it == local_end) + return; + } + + consuming = false; + + while (local_it != local_end) + { + // handle escaped characters + if (delim == '"' && *local_it == '\\') + { + auto check = local_it; + // check if this is an actual escape sequence or a + // whitespace escaping backslash + ++check; + consume_whitespace(check, local_end); + if (check == local_end) + { + consuming = true; + break; + } + + ss << parse_escape_code(local_it, local_end); + continue; + } + + // if we can end the string + if (std::distance(local_it, local_end) >= 3) + { + auto check = local_it; + // check for """ + if (*check++ == delim && *check++ == delim + && *check++ == delim) + { + local_it = check; + ret = make_value(ss.str()); + break; + } + } + + ss << *local_it++; + } + }; + + // handle the remainder of the current line + handle_line(it, end); + if (ret) + return ret; + + // start eating lines + while (detail::getline(input_, line_)) + { + ++line_number_; + + it = line_.begin(); + end = line_.end(); + + handle_line(it, end); + + if (ret) + return ret; + + if (!consuming) + ss << std::endl; + } + + throw_parse_exception("Unterminated multi-line basic string"); + } + + std::string string_literal(std::string::iterator& it, + const std::string::iterator& end, char delim) + { + ++it; + std::string val; + while (it != end) + { + // handle escaped characters + if (delim == '"' && *it == '\\') + { + val += parse_escape_code(it, end); + } + else if (*it == delim) + { + ++it; + consume_whitespace(it, end); + return val; + } + else + { + val += *it++; + } + } + throw_parse_exception("Unterminated string literal"); + } + + std::string parse_escape_code(std::string::iterator& it, + const std::string::iterator& end) + { + ++it; + if (it == end) + throw_parse_exception("Invalid escape sequence"); + char value; + if (*it == 'b') + { + value = '\b'; + } + else if (*it == 't') + { + value = '\t'; + } + else if (*it == 'n') + { + value = '\n'; + } + else if (*it == 'f') + { + value = '\f'; + } + else if (*it == 'r') + { + value = '\r'; + } + else if (*it == '"') + { + value = '"'; + } + else if (*it == '\\') + { + value = '\\'; + } + else if (*it == 'u' || *it == 'U') + { + return parse_unicode(it, end); + } + else + { + throw_parse_exception("Invalid escape sequence"); + } + ++it; + return std::string(1, value); + } + + std::string parse_unicode(std::string::iterator& it, + const std::string::iterator& end) + { + bool large = *it++ == 'U'; + auto codepoint = parse_hex(it, end, large ? 0x10000000 : 0x1000); + + if ((codepoint > 0xd7ff && codepoint < 0xe000) || codepoint > 0x10ffff) + { + throw_parse_exception( + "Unicode escape sequence is not a Unicode scalar value"); + } + + std::string result; + // See Table 3-6 of the Unicode standard + if (codepoint <= 0x7f) + { + // 1-byte codepoints: 00000000 0xxxxxxx + // repr: 0xxxxxxx + result += static_cast(codepoint & 0x7f); + } + else if (codepoint <= 0x7ff) + { + // 2-byte codepoints: 00000yyy yyxxxxxx + // repr: 110yyyyy 10xxxxxx + // + // 0x1f = 00011111 + // 0xc0 = 11000000 + // + result += static_cast(0xc0 | ((codepoint >> 6) & 0x1f)); + // + // 0x80 = 10000000 + // 0x3f = 00111111 + // + result += static_cast(0x80 | (codepoint & 0x3f)); + } + else if (codepoint <= 0xffff) + { + // 3-byte codepoints: zzzzyyyy yyxxxxxx + // repr: 1110zzzz 10yyyyyy 10xxxxxx + // + // 0xe0 = 11100000 + // 0x0f = 00001111 + // + result += static_cast(0xe0 | ((codepoint >> 12) & 0x0f)); + result += static_cast(0x80 | ((codepoint >> 6) & 0x1f)); + result += static_cast(0x80 | (codepoint & 0x3f)); + } + else + { + // 4-byte codepoints: 000uuuuu zzzzyyyy yyxxxxxx + // repr: 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx + // + // 0xf0 = 11110000 + // 0x07 = 00000111 + // + result += static_cast(0xf0 | ((codepoint >> 18) & 0x07)); + result += static_cast(0x80 | ((codepoint >> 12) & 0x3f)); + result += static_cast(0x80 | ((codepoint >> 6) & 0x3f)); + result += static_cast(0x80 | (codepoint & 0x3f)); + } + return result; + } + + uint32_t parse_hex(std::string::iterator& it, + const std::string::iterator& end, uint32_t place) + { + uint32_t value = 0; + while (place > 0) + { + if (it == end) + throw_parse_exception("Unexpected end of unicode sequence"); + + if (!is_hex(*it)) + throw_parse_exception("Invalid unicode escape sequence"); + + value += place * hex_to_digit(*it++); + place /= 16; + } + return value; + } + + bool is_hex(char c) + { + return is_number(c) || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'); + } + + uint32_t hex_to_digit(char c) + { + if (is_number(c)) + return static_cast(c - '0'); + return 10 + static_cast( + c - ((c >= 'a' && c <= 'f') ? 'a' : 'A')); + } + + std::shared_ptr parse_number(std::string::iterator& it, + const std::string::iterator& end) + { + auto check_it = it; + auto check_end = find_end_of_number(it, end); + + auto eat_sign = [&]() { + if (check_it != end && (*check_it == '-' || *check_it == '+')) + ++check_it; + }; + + eat_sign(); + + auto eat_numbers = [&]() { + auto beg = check_it; + while (check_it != end && is_number(*check_it)) + { + ++check_it; + if (check_it != end && *check_it == '_') + { + ++check_it; + if (check_it == end || !is_number(*check_it)) + throw_parse_exception("Malformed number"); + } + } + + if (check_it == beg) + throw_parse_exception("Malformed number"); + }; + + auto check_no_leading_zero = [&]() { + if (check_it != end && *check_it == '0' && check_it + 1 != check_end + && check_it[1] != '.') + { + throw_parse_exception("Numbers may not have leading zeros"); + } + }; + + check_no_leading_zero(); + eat_numbers(); + + if (check_it != end + && (*check_it == '.' || *check_it == 'e' || *check_it == 'E')) + { + bool is_exp = *check_it == 'e' || *check_it == 'E'; + + ++check_it; + if (check_it == end) + throw_parse_exception("Floats must have trailing digits"); + + auto eat_exp = [&]() { + eat_sign(); + check_no_leading_zero(); + eat_numbers(); + }; + + if (is_exp) + eat_exp(); + else + eat_numbers(); + + if (!is_exp && check_it != end + && (*check_it == 'e' || *check_it == 'E')) + { + ++check_it; + eat_exp(); + } + + return parse_float(it, check_it); + } + else + { + return parse_int(it, check_it); + } + } + + std::shared_ptr> parse_int(std::string::iterator& it, + const std::string::iterator& end) + { + std::string v{it, end}; + v.erase(std::remove(v.begin(), v.end(), '_'), v.end()); + it = end; + try + { + return make_value(std::stoll(v)); + } + catch (const std::invalid_argument& ex) + { + throw_parse_exception("Malformed number (invalid argument: " + + std::string{ex.what()} + ")"); + } + catch (const std::out_of_range& ex) + { + throw_parse_exception("Malformed number (out of range: " + + std::string{ex.what()} + ")"); + } + } + + std::shared_ptr> parse_float(std::string::iterator& it, + const std::string::iterator& end) + { + std::string v{it, end}; + v.erase(std::remove(v.begin(), v.end(), '_'), v.end()); + it = end; + char decimal_point = std::localeconv()->decimal_point[0]; + std::replace(v.begin(), v.end(), '.', decimal_point); + try + { + return make_value(std::stod(v)); + } + catch (const std::invalid_argument& ex) + { + throw_parse_exception("Malformed number (invalid argument: " + + std::string{ex.what()} + ")"); + } + catch (const std::out_of_range& ex) + { + throw_parse_exception("Malformed number (out of range: " + + std::string{ex.what()} + ")"); + } + } + + std::shared_ptr> parse_bool(std::string::iterator& it, + const std::string::iterator& end) + { + auto eat = make_consumer(it, end, [this]() { + throw_parse_exception("Attempted to parse invalid boolean value"); + }); + + if (*it == 't') + { + eat("true"); + return make_value(true); + } + else if (*it == 'f') + { + eat("false"); + return make_value(false); + } + + eat.error(); + return nullptr; + } + + std::string::iterator find_end_of_number(std::string::iterator it, + std::string::iterator end) + { + return std::find_if(it, end, [](char c) { + return !is_number(c) && c != '_' && c != '.' && c != 'e' && c != 'E' + && c != '-' && c != '+'; + }); + } + + std::string::iterator find_end_of_date(std::string::iterator it, + std::string::iterator end) + { + return std::find_if(it, end, [](char c) { + return !is_number(c) && c != 'T' && c != 'Z' && c != ':' && c != '-' + && c != '+' && c != '.'; + }); + } + + std::string::iterator find_end_of_time(std::string::iterator it, + std::string::iterator end) + { + return std::find_if(it, end, [](char c) { + return !is_number(c) && c != ':' && c != '.'; + }); + } + + local_time read_time(std::string::iterator& it, + const std::string::iterator& end) + { + auto time_end = find_end_of_time(it, end); + + auto eat = make_consumer( + it, time_end, [&]() { throw_parse_exception("Malformed time"); }); + + local_time ltime; + + ltime.hour = eat.eat_digits(2); + eat(':'); + ltime.minute = eat.eat_digits(2); + eat(':'); + ltime.second = eat.eat_digits(2); + + int power = 100000; + if (it != time_end && *it == '.') + { + ++it; + while (it != time_end && is_number(*it)) + { + ltime.microsecond += power * (*it++ - '0'); + power /= 10; + } + } + + if (it != time_end) + throw_parse_exception("Malformed time"); + + return ltime; + } + + std::shared_ptr> + parse_time(std::string::iterator& it, const std::string::iterator& end) + { + return make_value(read_time(it, end)); + } + + std::shared_ptr parse_date(std::string::iterator& it, + const std::string::iterator& end) + { + auto date_end = find_end_of_date(it, end); + + auto eat = make_consumer( + it, date_end, [&]() { throw_parse_exception("Malformed date"); }); + + local_date ldate; + ldate.year = eat.eat_digits(4); + eat('-'); + ldate.month = eat.eat_digits(2); + eat('-'); + ldate.day = eat.eat_digits(2); + + if (it == date_end) + return make_value(ldate); + + eat('T'); + + local_datetime ldt; + static_cast(ldt) = ldate; + static_cast(ldt) = read_time(it, date_end); + + if (it == date_end) + return make_value(ldt); + + offset_datetime dt; + static_cast(dt) = ldt; + + int hoff = 0; + int moff = 0; + if (*it == '+' || *it == '-') + { + auto plus = *it == '+'; + ++it; + + hoff = eat.eat_digits(2); + dt.hour_offset = (plus) ? hoff : -hoff; + eat(':'); + moff = eat.eat_digits(2); + dt.minute_offset = (plus) ? moff : -moff; + } + else if (*it == 'Z') + { + ++it; + } + + if (it != date_end) + throw_parse_exception("Malformed date"); + + return make_value(dt); + } + + std::shared_ptr parse_array(std::string::iterator& it, + std::string::iterator& end) + { + // this gets ugly because of the "homogeneity" restriction: + // arrays can either be of only one type, or contain arrays + // (each of those arrays could be of different types, though) + // + // because of the latter portion, we don't really have a choice + // but to represent them as arrays of base values... + ++it; + + // ugh---have to read the first value to determine array type... + skip_whitespace_and_comments(it, end); + + // edge case---empty array + if (*it == ']') + { + ++it; + return make_array(); + } + + auto val_end = std::find_if( + it, end, [](char c) { return c == ',' || c == ']' || c == '#'; }); + parse_type type = determine_value_type(it, val_end); + switch (type) + { + case parse_type::STRING: + return parse_value_array(it, end); + case parse_type::LOCAL_TIME: + return parse_value_array(it, end); + case parse_type::LOCAL_DATE: + return parse_value_array(it, end); + case parse_type::LOCAL_DATETIME: + return parse_value_array(it, end); + case parse_type::OFFSET_DATETIME: + return parse_value_array(it, end); + case parse_type::INT: + return parse_value_array(it, end); + case parse_type::FLOAT: + return parse_value_array(it, end); + case parse_type::BOOL: + return parse_value_array(it, end); + case parse_type::ARRAY: + return parse_object_array(&parser::parse_array, '[', it, + end); + case parse_type::INLINE_TABLE: + return parse_object_array( + &parser::parse_inline_table, '{', it, end); + default: + throw_parse_exception("Unable to parse array"); + } + } + + template + std::shared_ptr parse_value_array(std::string::iterator& it, + std::string::iterator& end) + { + auto arr = make_array(); + while (it != end && *it != ']') + { + auto value = parse_value(it, end); + if (auto v = value->as()) + arr->get().push_back(value); + else + throw_parse_exception("Arrays must be homogeneous"); + skip_whitespace_and_comments(it, end); + if (*it != ',') + break; + ++it; + skip_whitespace_and_comments(it, end); + } + if (it != end) + ++it; + return arr; + } + + template + std::shared_ptr parse_object_array(Function&& fun, char delim, + std::string::iterator& it, + std::string::iterator& end) + { + auto arr = make_element(); + + while (it != end && *it != ']') + { + if (*it != delim) + throw_parse_exception("Unexpected character in array"); + + arr->get().push_back(((*this).*fun)(it, end)); + skip_whitespace_and_comments(it, end); + + if (*it != ',') + break; + + ++it; + skip_whitespace_and_comments(it, end); + } + + if (it == end || *it != ']') + throw_parse_exception("Unterminated array"); + + ++it; + return arr; + } + + std::shared_ptr
parse_inline_table(std::string::iterator& it, + std::string::iterator& end) + { + auto tbl = make_table(); + do + { + ++it; + if (it == end) + throw_parse_exception("Unterminated inline table"); + + consume_whitespace(it, end); + parse_key_value(it, end, tbl.get()); + consume_whitespace(it, end); + } while (*it == ','); + + if (it == end || *it != '}') + throw_parse_exception("Unterminated inline table"); + + ++it; + consume_whitespace(it, end); + + return tbl; + } + + void skip_whitespace_and_comments(std::string::iterator& start, + std::string::iterator& end) + { + consume_whitespace(start, end); + while (start == end || *start == '#') + { + if (!detail::getline(input_, line_)) + throw_parse_exception("Unclosed array"); + line_number_++; + start = line_.begin(); + end = line_.end(); + consume_whitespace(start, end); + } + } + + void consume_whitespace(std::string::iterator& it, + const std::string::iterator& end) + { + while (it != end && (*it == ' ' || *it == '\t')) + ++it; + } + + void consume_backwards_whitespace(std::string::iterator& back, + const std::string::iterator& front) + { + while (back != front && (*back == ' ' || *back == '\t')) + --back; + } + + void eol_or_comment(const std::string::iterator& it, + const std::string::iterator& end) + { + if (it != end && *it != '#') + throw_parse_exception("Unidentified trailing character '" + + std::string{*it} + + "'---did you forget a '#'?"); + } + + bool is_time(const std::string::iterator& it, + const std::string::iterator& end) + { + auto time_end = find_end_of_time(it, end); + auto len = std::distance(it, time_end); + + if (len < 8) + return false; + + if (it[2] != ':' || it[5] != ':') + return false; + + if (len > 8) + return it[8] == '.' && len > 9; + + return true; + } + + option date_type(const std::string::iterator& it, + const std::string::iterator& end) + { + auto date_end = find_end_of_date(it, end); + auto len = std::distance(it, date_end); + + if (len < 10) + return {}; + + if (it[4] != '-' || it[7] != '-') + return {}; + + if (len >= 19 && it[10] == 'T' && is_time(it + 11, date_end)) + { + // datetime type + auto time_end = find_end_of_time(it + 11, date_end); + if (time_end == date_end) + return {parse_type::LOCAL_DATETIME}; + else + return {parse_type::OFFSET_DATETIME}; + } + else if (len == 10) + { + // just a regular date + return {parse_type::LOCAL_DATE}; + } + + return {}; + } + + std::istream& input_; + std::string line_; + std::size_t line_number_ = 0; +}; + +/** + * Utility function to parse a file as a TOML file. Returns the root table. + * Throws a parse_exception if the file cannot be opened. + */ +inline std::shared_ptr
parse_file(const std::string& filename) +{ +#if defined(BOOST_NOWIDE_FSTREAM_INCLUDED_HPP) + boost::nowide::ifstream file{filename.c_str()}; +#elif defined(NOWIDE_FSTREAM_INCLUDED_HPP) + nowide::ifstream file{filename.c_str()}; +#else + std::ifstream file{filename}; +#endif + if (!file.is_open()) + throw parse_exception{filename + " could not be opened for parsing"}; + parser p{file}; + return p.parse(); +} + +template +struct value_accept; + +template <> +struct value_accept<> +{ + template + static void accept(const base&, Visitor&&, Args&&...) + { + // nothing + } +}; + +template +struct value_accept +{ + template + static void accept(const base& b, Visitor&& visitor, Args&&... args) + { + if (auto v = b.as()) + { + visitor.visit(*v, std::forward(args)...); + } + else + { + value_accept::accept(b, std::forward(visitor), + std::forward(args)...); + } + } +}; + +/** + * base implementation of accept() that calls visitor.visit() on the concrete + * class. + */ +template +void base::accept(Visitor&& visitor, Args&&... args) const +{ + if (is_value()) + { + using value_acceptor + = value_accept; + value_acceptor::accept(*this, std::forward(visitor), + std::forward(args)...); + } + else if (is_table()) + { + visitor.visit(static_cast(*this), + std::forward(args)...); + } + else if (is_array()) + { + visitor.visit(static_cast(*this), + std::forward(args)...); + } + else if (is_table_array()) + { + visitor.visit(static_cast(*this), + std::forward(args)...); + } +} + +/** + * Writer that can be passed to accept() functions of cpptoml objects and + * will output valid TOML to a stream. + */ +class toml_writer +{ + public: + /** + * Construct a toml_writer that will write to the given stream + */ + toml_writer(std::ostream& s, const std::string& indent_space = "\t") + : stream_(s), indent_(indent_space), has_naked_endline_(false) + { + // nothing + } + + public: + /** + * Output a base value of the TOML tree. + */ + template + void visit(const value& v, bool = false) + { + write(v); + } + + /** + * Output a table element of the TOML tree + */ + void visit(const table& t, bool in_array = false) + { + write_table_header(in_array); + std::vector values; + std::vector tables; + + for (const auto& i : t) + { + if (i.second->is_table() || i.second->is_table_array()) + { + tables.push_back(i.first); + } + else + { + values.push_back(i.first); + } + } + + for (unsigned int i = 0; i < values.size(); ++i) + { + path_.push_back(values[i]); + + if (i > 0) + endline(); + + write_table_item_header(*t.get(values[i])); + t.get(values[i])->accept(*this, false); + path_.pop_back(); + } + + for (unsigned int i = 0; i < tables.size(); ++i) + { + path_.push_back(tables[i]); + + if (values.size() > 0 || i > 0) + endline(); + + write_table_item_header(*t.get(tables[i])); + t.get(tables[i])->accept(*this, false); + path_.pop_back(); + } + + endline(); + } + + /** + * Output an array element of the TOML tree + */ + void visit(const array& a, bool = false) + { + write("["); + + for (unsigned int i = 0; i < a.get().size(); ++i) + { + if (i > 0) + write(", "); + + if (a.get()[i]->is_array()) + { + a.get()[i]->as_array()->accept(*this, true); + } + else + { + a.get()[i]->accept(*this, true); + } + } + + write("]"); + } + + /** + * Output a table_array element of the TOML tree + */ + void visit(const table_array& t, bool = false) + { + for (unsigned int j = 0; j < t.get().size(); ++j) + { + if (j > 0) + endline(); + + t.get()[j]->accept(*this, true); + } + + endline(); + } + + /** + * Escape a string for output. + */ + static std::string escape_string(const std::string& str) + { + std::string res; + for (auto it = str.begin(); it != str.end(); ++it) + { + if (*it == '\b') + { + res += "\\b"; + } + else if (*it == '\t') + { + res += "\\t"; + } + else if (*it == '\n') + { + res += "\\n"; + } + else if (*it == '\f') + { + res += "\\f"; + } + else if (*it == '\r') + { + res += "\\r"; + } + else if (*it == '"') + { + res += "\\\""; + } + else if (*it == '\\') + { + res += "\\\\"; + } + else if ((const uint32_t)*it <= 0x001f) + { + res += "\\u"; + std::stringstream ss; + ss << std::hex << static_cast(*it); + res += ss.str(); + } + else + { + res += *it; + } + } + return res; + } + + protected: + /** + * Write out a string. + */ + void write(const value& v) + { + write("\""); + write(escape_string(v.get())); + write("\""); + } + + /** + * Write out a double. + */ + void write(const value& v) + { + std::ios::fmtflags flags{stream_.flags()}; + + stream_ << std::showpoint; + write(v.get()); + + stream_.flags(flags); + } + + /** + * Write out an integer, local_date, local_time, local_datetime, or + * offset_datetime. + */ + template + typename std::enable_if::value>::type + write(const value& v) + { + write(v.get()); + } + + /** + * Write out a boolean. + */ + void write(const value& v) + { + write((v.get() ? "true" : "false")); + } + + /** + * Write out the header of a table. + */ + void write_table_header(bool in_array = false) + { + if (!path_.empty()) + { + indent(); + + write("["); + + if (in_array) + { + write("["); + } + + for (unsigned int i = 0; i < path_.size(); ++i) + { + if (i > 0) + { + write("."); + } + + if (path_[i].find_first_not_of("ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" + "fghijklmnopqrstuvwxyz0123456789" + "_-") + == std::string::npos) + { + write(path_[i]); + } + else + { + write("\""); + write(escape_string(path_[i])); + write("\""); + } + } + + if (in_array) + { + write("]"); + } + + write("]"); + endline(); + } + } + + /** + * Write out the identifier for an item in a table. + */ + void write_table_item_header(const base& b) + { + if (!b.is_table() && !b.is_table_array()) + { + indent(); + + if (path_.back().find_first_not_of("ABCDEFGHIJKLMNOPQRSTUVWXYZabcde" + "fghijklmnopqrstuvwxyz0123456789" + "_-") + == std::string::npos) + { + write(path_.back()); + } + else + { + write("\""); + write(escape_string(path_.back())); + write("\""); + } + + write(" = "); + } + } + + private: + /** + * Indent the proper number of tabs given the size of + * the path. + */ + void indent() + { + for (std::size_t i = 1; i < path_.size(); ++i) + write(indent_); + } + + /** + * Write a value out to the stream. + */ + template + void write(const T& v) + { + stream_ << v; + has_naked_endline_ = false; + } + + /** + * Write an endline out to the stream + */ + void endline() + { + if (!has_naked_endline_) + { + stream_ << "\n"; + has_naked_endline_ = true; + } + } + + private: + std::ostream& stream_; + const std::string indent_; + std::vector path_; + bool has_naked_endline_; +}; + +inline std::ostream& operator<<(std::ostream& stream, const base& b) +{ + toml_writer writer{stream}; + b.accept(writer); + return stream; +} + +template +std::ostream& operator<<(std::ostream& stream, const value& v) +{ + toml_writer writer{stream}; + v.accept(writer); + return stream; +} + +inline std::ostream& operator<<(std::ostream& stream, const table& t) +{ + toml_writer writer{stream}; + t.accept(writer); + return stream; +} + +inline std::ostream& operator<<(std::ostream& stream, const table_array& t) +{ + toml_writer writer{stream}; + t.accept(writer); + return stream; +} + +inline std::ostream& operator<<(std::ostream& stream, const array& a) +{ + toml_writer writer{stream}; + a.accept(writer); + return stream; +} +} +#endif diff --git a/src/libexpr/attr-set.cc b/src/libexpr/attr-set.cc index b284daa3c2f..0785897d251 100644 --- a/src/libexpr/attr-set.cc +++ b/src/libexpr/attr-set.cc @@ -1,5 +1,5 @@ #include "attr-set.hh" -#include "eval.hh" +#include "eval-inline.hh" #include @@ -7,30 +7,18 @@ namespace nix { -/* Note: Various places expect the allocated memory to be zeroed. */ -static void * allocBytes(size_t n) -{ - void * p; -#if HAVE_BOEHMGC - p = GC_malloc(n); -#else - p = calloc(n, 1); -#endif - if (!p) throw std::bad_alloc(); - return p; -} - - /* Allocate a new array of attributes for an attribute set with a specific capacity. The space is implicitly reserved after the Bindings structure. */ -Bindings * EvalState::allocBindings(Bindings::size_t capacity) +Bindings * EvalState::allocBindings(size_t capacity) { - return new (allocBytes(sizeof(Bindings) + sizeof(Attr) * capacity)) Bindings(capacity); + if (capacity > std::numeric_limits::max()) + throw Error("attribute set of size %d is too big", capacity); + return new (allocBytes(sizeof(Bindings) + sizeof(Attr) * capacity)) Bindings((Bindings::size_t) capacity); } -void EvalState::mkAttrs(Value & v, unsigned int capacity) +void EvalState::mkAttrs(Value & v, size_t capacity) { if (capacity == 0) { v = vEmptySet; diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh index 8cc50e56135..c27116e3b44 100644 --- a/src/libexpr/eval-inline.hh +++ b/src/libexpr/eval-inline.hh @@ -78,4 +78,18 @@ inline void EvalState::forceList(Value & v, const Pos & pos) throwTypeError("value is %1% while a list was expected, at %2%", v, pos); } +/* Note: Various places expect the allocated memory to be zeroed. */ +inline void * allocBytes(size_t n) +{ + void * p; +#if HAVE_BOEHMGC + p = GC_MALLOC(n); +#else + p = calloc(n, 1); +#endif + if (!p) throw std::bad_alloc(); + return p; +} + + } diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 8548990795a..2a194d0e08c 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -6,10 +6,16 @@ #include "globals.hh" #include "eval-inline.hh" #include "download.hh" +#include "json.hh" #include #include #include +#include +#include +#include +#include + #include #include @@ -18,15 +24,8 @@ #include #include -#define NEW new (UseGC) - -#else - -#define NEW new - #endif - namespace nix { @@ -34,7 +33,7 @@ static char * dupString(const char * s) { char * t; #if HAVE_BOEHMGC - t = GC_strdup(s); + t = GC_STRDUP(s); #else t = strdup(s); #endif @@ -43,20 +42,6 @@ static char * dupString(const char * s) } -/* Note: Various places expect the allocated memory to be zeroed. */ -static void * allocBytes(size_t n) -{ - void * p; -#if HAVE_BOEHMGC - p = GC_malloc(n); -#else - p = calloc(n, 1); -#endif - if (!p) throw std::bad_alloc(); - return p; -} - - static void printValue(std::ostream & str, std::set & active, const Value & v) { checkInterrupt(); @@ -199,8 +184,15 @@ void initGC() #if HAVE_BOEHMGC /* Initialise the Boehm garbage collector. */ + + /* Don't look for interior pointers. This reduces the odds of + misdetection a bit. */ GC_set_all_interior_pointers(0); + /* We don't have any roots in data segments, so don't scan from + there. */ + GC_set_no_dls(1); + GC_INIT(); GC_set_oom_fn(oomHandler); @@ -307,20 +299,32 @@ EvalState::EvalState(const Strings & _searchPath, ref store) assert(gcInitialised); + static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes"); + /* Initialise the Nix expression search path. */ - if (!settings.pureEval) { + if (!evalSettings.pureEval) { Strings paths = parseNixPath(getEnv("NIX_PATH", "")); for (auto & i : _searchPath) addToSearchPath(i); for (auto & i : paths) addToSearchPath(i); } addToSearchPath("nix=" + canonPath(settings.nixDataDir + "/nix/corepkgs", true)); - if (settings.restrictEval || settings.pureEval) { + if (evalSettings.restrictEval || evalSettings.pureEval) { allowedPaths = PathSet(); + for (auto & i : searchPath) { auto r = resolveSearchPathElem(i); if (!r.first) continue; - allowedPaths->insert(r.second); + + auto path = r.second; + + if (store->isInStore(r.second)) { + PathSet closure; + store->computeFSClosure(store->toStorePath(r.second), closure); + for (auto & path : closure) + allowedPaths->insert(path); + } else + allowedPaths->insert(r.second); } } @@ -334,7 +338,6 @@ EvalState::EvalState(const Strings & _searchPath, ref store) EvalState::~EvalState() { - fileEvalCache.clear(); } @@ -342,25 +345,37 @@ Path EvalState::checkSourcePath(const Path & path_) { if (!allowedPaths) return path_; + auto i = resolvedPaths.find(path_); + if (i != resolvedPaths.end()) + return i->second; + bool found = false; + /* First canonicalize the path without symlinks, so we make sure an + * attacker can't append ../../... to a path that would be in allowedPaths + * and thus leak symlink targets. + */ + Path abspath = canonPath(path_); + for (auto & i : *allowedPaths) { - if (isDirOrInDir(path_, i)) { + if (isDirOrInDir(abspath, i)) { found = true; break; } } if (!found) - throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path_); + throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", abspath); /* Resolve symlinks. */ - debug(format("checking access to '%s'") % path_); - Path path = canonPath(path_, true); + debug(format("checking access to '%s'") % abspath); + Path path = canonPath(abspath, true); for (auto & i : *allowedPaths) { - if (isDirOrInDir(path, i)) + if (isDirOrInDir(path, i)) { + resolvedPaths[path_] = path; return path; + } } throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path); @@ -369,13 +384,13 @@ Path EvalState::checkSourcePath(const Path & path_) void EvalState::checkURI(const std::string & uri) { - if (!settings.restrictEval) return; + if (!evalSettings.restrictEval) return; /* 'uri' should be equal to a prefix, or in a subdirectory of a prefix. Thus, the prefix https://github.co does not permit access to https://github.com. Note: this allows 'http://' and 'https://' as prefixes for any http/https URI. */ - for (auto & prefix : settings.allowedUris.get()) + for (auto & prefix : evalSettings.allowedUris.get()) if (uri == prefix || (uri.size() > prefix.size() && prefix.size() > 0 @@ -422,7 +437,7 @@ Value * EvalState::addConstant(const string & name, Value & v) Value * EvalState::addPrimOp(const string & name, - unsigned int arity, PrimOpFun primOp) + size_t arity, PrimOpFun primOp) { if (arity == 0) { Value v; @@ -433,7 +448,7 @@ Value * EvalState::addPrimOp(const string & name, string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name; Symbol sym = symbols.create(name2); v->type = tPrimOp; - v->primOp = NEW PrimOp(primOp, arity, sym); + v->primOp = new PrimOp(primOp, arity, sym); staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl; baseEnv.values[baseEnvDispl++] = v; baseEnv.values[0]->attrs->push_back(Attr(sym, v)); @@ -528,7 +543,7 @@ Value & mkString(Value & v, const string & s, const PathSet & context) { mkString(v, s.c_str()); if (!context.empty()) { - unsigned int n = 0; + size_t n = 0; v.string.context = (const char * *) allocBytes((context.size() + 1) * sizeof(char *)); for (auto & i : context) @@ -547,17 +562,17 @@ void mkPath(Value & v, const char * s) inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval) { - for (unsigned int l = var.level; l; --l, env = env->up) ; + for (size_t l = var.level; l; --l, env = env->up) ; if (!var.fromWith) return env->values[var.displ]; while (1) { - if (!env->haveWithAttrs) { + if (env->type == Env::HasWithExpr) { if (noEval) return 0; Value * v = allocValue(); evalAttrs(*env->up, (Expr *) env->values[0], *v); env->values[0] = v; - env->haveWithAttrs = true; + env->type = Env::HasWithAttrs; } Bindings::iterator j = env->values[0]->attrs->find(var.name); if (j != env->values[0]->attrs->end()) { @@ -566,26 +581,37 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval) } if (!env->prevWith) throwUndefinedVarError("undefined variable '%1%' at %2%", var.name, var.pos); - for (unsigned int l = env->prevWith; l; --l, env = env->up) ; + for (size_t l = env->prevWith; l; --l, env = env->up) ; } } +std::atomic nrValuesFreed{0}; + +void finalizeValue(void * obj, void * data) +{ + nrValuesFreed++; +} + Value * EvalState::allocValue() { nrValues++; - return (Value *) allocBytes(sizeof(Value)); + auto v = (Value *) allocBytes(sizeof(Value)); + //GC_register_finalizer_no_order(v, finalizeValue, nullptr, nullptr, nullptr); + return v; } -Env & EvalState::allocEnv(unsigned int size) +Env & EvalState::allocEnv(size_t size) { - assert(size <= std::numeric_limits::max()); + if (size > std::numeric_limits::max()) + throw Error("environment size %d is too big", size); nrEnvs++; nrValuesInEnvs += size; Env * env = (Env *) allocBytes(sizeof(Env) + size * sizeof(Value *)); - env->size = size; + env->size = (decltype(Env::size)) size; + env->type = Env::Plain; /* We assume that env->values has been cleared by the allocator; maybeThunk() and lookupVar fromWith expect this. */ @@ -593,7 +619,7 @@ Env & EvalState::allocEnv(unsigned int size) } -void EvalState::mkList(Value & v, unsigned int size) +void EvalState::mkList(Value & v, size_t size) { clearValue(v); if (size == 1) @@ -705,7 +731,17 @@ void EvalState::evalFile(const Path & path_, Value & v) } printTalkative("evaluating file '%1%'", path2); - Expr * e = parseExprFromFile(checkSourcePath(path2)); + Expr * e = nullptr; + + auto j = fileParseCache.find(path2); + if (j != fileParseCache.end()) + e = j->second; + + if (!e) + e = parseExprFromFile(checkSourcePath(path2)); + + fileParseCache[path2] = e; + try { eval(e, v); } catch (Error & e) { @@ -721,6 +757,7 @@ void EvalState::evalFile(const Path & path_, Value & v) void EvalState::resetFileCache() { fileEvalCache.clear(); + fileParseCache.clear(); } @@ -805,7 +842,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v) /* The recursive attributes are evaluated in the new environment, while the inherited attributes are evaluated in the original environment. */ - unsigned int displ = 0; + size_t displ = 0; for (auto & i : attrs) { Value * vAttr; if (hasOverrides && !i.second.inherited) { @@ -879,7 +916,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v) /* The recursive attributes are evaluated in the new environment, while the inherited attributes are evaluated in the original environment. */ - unsigned int displ = 0; + size_t displ = 0; for (auto & i : attrs->attrs) env2.values[displ++] = i.second.e->maybeThunk(state, i.second.inherited ? env : env2); @@ -890,7 +927,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v) void ExprList::eval(EvalState & state, Env & env, Value & v) { state.mkList(v, elems.size()); - for (unsigned int n = 0; n < elems.size(); ++n) + for (size_t n = 0; n < elems.size(); ++n) v.listElems()[n] = elems[n]->maybeThunk(state, env); } @@ -1012,22 +1049,22 @@ void ExprApp::eval(EvalState & state, Env & env, Value & v) void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos) { /* Figure out the number of arguments still needed. */ - unsigned int argsDone = 0; + size_t argsDone = 0; Value * primOp = &fun; while (primOp->type == tPrimOpApp) { argsDone++; primOp = primOp->primOpApp.left; } assert(primOp->type == tPrimOp); - unsigned int arity = primOp->primOp->arity; - unsigned int argsLeft = arity - argsDone; + auto arity = primOp->primOp->arity; + auto argsLeft = arity - argsDone; if (argsLeft == 1) { /* We have all the arguments, so call the primop. */ /* Put all the arguments in an array. */ Value * vArgs[arity]; - unsigned int n = arity - 1; + auto n = arity - 1; vArgs[n--] = &arg; for (Value * arg = &fun; arg->type == tPrimOpApp; arg = arg->primOpApp.left) vArgs[n--] = arg->primOpApp.right; @@ -1048,6 +1085,8 @@ void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos) void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos) { + forceValue(fun, pos); + if (fun.type == tPrimOp || fun.type == tPrimOpApp) { callPrimOp(fun, arg, v, pos); return; @@ -1063,10 +1102,8 @@ void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & po auto & fun2 = *allocValue(); fun2 = fun; /* !!! Should we use the attr pos here? */ - forceValue(*found->value, pos); Value v2; callFunction(*found->value, fun2, v2, pos); - forceValue(v2, pos); return callFunction(v2, arg, v, pos); } } @@ -1076,13 +1113,13 @@ void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & po ExprLambda & lambda(*fun.lambda.fun); - unsigned int size = + auto size = (lambda.arg.empty() ? 0 : 1) + (lambda.matchAttrs ? lambda.formals->formals.size() : 0); Env & env2(allocEnv(size)); env2.up = fun.lambda.env; - unsigned int displ = 0; + size_t displ = 0; if (!lambda.matchAttrs) env2.values[displ++] = &arg; @@ -1096,7 +1133,7 @@ void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & po /* For each formal argument, get the actual argument. If there is no matching actual argument but the formal argument has a default, use the default. */ - unsigned int attrsUsed = 0; + size_t attrsUsed = 0; for (auto & i : lambda.formals->formals) { Bindings::iterator j = arg.attrs->find(i.name); if (j == arg.attrs->end()) { @@ -1153,7 +1190,6 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res) if (fun.type == tAttrs) { auto found = fun.attrs->find(sFunctor); if (found != fun.attrs->end()) { - forceValue(*found->value); Value * v = allocValue(); callFunction(*found->value, fun, *v, noPos); forceValue(*v); @@ -1188,7 +1224,7 @@ void ExprWith::eval(EvalState & state, Env & env, Value & v) Env & env2(state.allocEnv(1)); env2.up = &env; env2.prevWith = prevWith; - env2.haveWithAttrs = false; + env2.type = Env::HasWithExpr; env2.values[0] = (Value *) attrs; body->eval(state, env2, v); @@ -1294,15 +1330,15 @@ void ExprOpConcatLists::eval(EvalState & state, Env & env, Value & v) } -void EvalState::concatLists(Value & v, unsigned int nrLists, Value * * lists, const Pos & pos) +void EvalState::concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos) { nrListConcats++; Value * nonEmpty = 0; - unsigned int len = 0; - for (unsigned int n = 0; n < nrLists; ++n) { + size_t len = 0; + for (size_t n = 0; n < nrLists; ++n) { forceList(*lists[n], pos); - unsigned int l = lists[n]->listSize(); + auto l = lists[n]->listSize(); len += l; if (l) nonEmpty = lists[n]; } @@ -1314,8 +1350,8 @@ void EvalState::concatLists(Value & v, unsigned int nrLists, Value * * lists, co mkList(v, len); auto out = v.listElems(); - for (unsigned int n = 0, pos = 0; n < nrLists; ++n) { - unsigned int l = lists[n]->listSize(); + for (size_t n = 0, pos = 0; n < nrLists; ++n) { + auto l = lists[n]->listSize(); if (l) memcpy(out + pos, lists[n]->listElems(), l * sizeof(Value *)); pos += l; @@ -1410,7 +1446,7 @@ void EvalState::forceValueDeep(Value & v) } else if (v.isList()) { - for (unsigned int n = 0; n < v.listSize(); ++n) + for (size_t n = 0; n < v.listSize(); ++n) recurse(*v.listElems()[n]); } }; @@ -1537,7 +1573,6 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context, if (v.type == tAttrs) { auto i = v.attrs->find(sToString); if (i != v.attrs->end()) { - forceValue(*i->value, pos); Value v1; callFunction(*i->value, v, v1, pos); return coerceToString(pos, v1, context, coerceMore, copyToStore); @@ -1562,7 +1597,7 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context, if (v.isList()) { string result; - for (unsigned int n = 0; n < v.listSize(); ++n) { + for (size_t n = 0; n < v.listSize(); ++n) { result += coerceToString(pos, *v.listElems()[n], context, coerceMore, copyToStore); if (n < v.listSize() - 1 @@ -1649,7 +1684,7 @@ bool EvalState::eqValues(Value & v1, Value & v2) case tList2: case tListN: if (v1.listSize() != v2.listSize()) return false; - for (unsigned int n = 0; n < v1.listSize(); ++n) + for (size_t n = 0; n < v1.listSize(); ++n) if (!eqValues(*v1.listElems()[n], *v2.listElems()[n])) return false; return true; @@ -1691,12 +1726,9 @@ bool EvalState::eqValues(Value & v1, Value & v2) } } - void EvalState::printStats() { bool showStats = getEnv("NIX_SHOW_STATS", "0") != "0"; - Verbosity v = showStats ? lvlInfo : lvlDebug; - printMsg(v, "evaluation statistics:"); struct rusage buf; getrusage(RUSAGE_SELF, &buf); @@ -1707,62 +1739,101 @@ void EvalState::printStats() uint64_t bValues = nrValues * sizeof(Value); uint64_t bAttrsets = nrAttrsets * sizeof(Bindings) + nrAttrsInAttrsets * sizeof(Attr); - printMsg(v, format(" time elapsed: %1%") % cpuTime); - printMsg(v, format(" size of a value: %1%") % sizeof(Value)); - printMsg(v, format(" size of an attr: %1%") % sizeof(Attr)); - printMsg(v, format(" environments allocated count: %1%") % nrEnvs); - printMsg(v, format(" environments allocated bytes: %1%") % bEnvs); - printMsg(v, format(" list elements count: %1%") % nrListElems); - printMsg(v, format(" list elements bytes: %1%") % bLists); - printMsg(v, format(" list concatenations: %1%") % nrListConcats); - printMsg(v, format(" values allocated count: %1%") % nrValues); - printMsg(v, format(" values allocated bytes: %1%") % bValues); - printMsg(v, format(" sets allocated: %1% (%2% bytes)") % nrAttrsets % bAttrsets); - printMsg(v, format(" right-biased unions: %1%") % nrOpUpdates); - printMsg(v, format(" values copied in right-biased unions: %1%") % nrOpUpdateValuesCopied); - printMsg(v, format(" symbols in symbol table: %1%") % symbols.size()); - printMsg(v, format(" size of symbol table: %1%") % symbols.totalSize()); - printMsg(v, format(" number of thunks: %1%") % nrThunks); - printMsg(v, format(" number of thunks avoided: %1%") % nrAvoided); - printMsg(v, format(" number of attr lookups: %1%") % nrLookups); - printMsg(v, format(" number of primop calls: %1%") % nrPrimOpCalls); - printMsg(v, format(" number of function calls: %1%") % nrFunctionCalls); - printMsg(v, format(" total allocations: %1% bytes") % (bEnvs + bLists + bValues + bAttrsets)); - #if HAVE_BOEHMGC GC_word heapSize, totalBytes; GC_get_heap_usage_safe(&heapSize, 0, 0, 0, &totalBytes); - printMsg(v, format(" current Boehm heap size: %1% bytes") % heapSize); - printMsg(v, format(" total Boehm heap allocations: %1% bytes") % totalBytes); #endif - - if (countCalls) { - v = lvlInfo; - - printMsg(v, format("calls to %1% primops:") % primOpCalls.size()); - typedef std::multimap PrimOpCalls_; - PrimOpCalls_ primOpCalls_; - for (auto & i : primOpCalls) - primOpCalls_.insert(std::pair(i.second, i.first)); - for (auto i = primOpCalls_.rbegin(); i != primOpCalls_.rend(); ++i) - printMsg(v, format("%1$10d %2%") % i->first % i->second); - - printMsg(v, format("calls to %1% functions:") % functionCalls.size()); - typedef std::multimap FunctionCalls_; - FunctionCalls_ functionCalls_; - for (auto & i : functionCalls) - functionCalls_.insert(std::pair(i.second, i.first)); - for (auto i = functionCalls_.rbegin(); i != functionCalls_.rend(); ++i) - printMsg(v, format("%1$10d %2%") % i->first % i->second->showNamePos()); - - printMsg(v, format("evaluations of %1% attributes:") % attrSelects.size()); - typedef std::multimap AttrSelects_; - AttrSelects_ attrSelects_; - for (auto & i : attrSelects) - attrSelects_.insert(std::pair(i.second, i.first)); - for (auto i = attrSelects_.rbegin(); i != attrSelects_.rend(); ++i) - printMsg(v, format("%1$10d %2%") % i->first % i->second); - + if (showStats) { + auto outPath = getEnv("NIX_SHOW_STATS_PATH","-"); + std::fstream fs; + if (outPath != "-") + fs.open(outPath, std::fstream::out); + JSONObject topObj(outPath == "-" ? std::cerr : fs, true); + topObj.attr("cpuTime",cpuTime); + { + auto envs = topObj.object("envs"); + envs.attr("number", nrEnvs); + envs.attr("elements", nrValuesInEnvs); + envs.attr("bytes", bEnvs); + } + { + auto lists = topObj.object("list"); + lists.attr("elements", nrListElems); + lists.attr("bytes", bLists); + lists.attr("concats", nrListConcats); + } + { + auto values = topObj.object("values"); + values.attr("number", nrValues); + values.attr("bytes", bValues); + } + { + auto syms = topObj.object("symbols"); + syms.attr("number", symbols.size()); + syms.attr("bytes", symbols.totalSize()); + } + { + auto sets = topObj.object("sets"); + sets.attr("number", nrAttrsets); + sets.attr("bytes", bAttrsets); + sets.attr("elements", nrAttrsInAttrsets); + } + { + auto sizes = topObj.object("sizes"); + sizes.attr("Env", sizeof(Env)); + sizes.attr("Value", sizeof(Value)); + sizes.attr("Bindings", sizeof(Bindings)); + sizes.attr("Attr", sizeof(Attr)); + } + topObj.attr("nrOpUpdates", nrOpUpdates); + topObj.attr("nrOpUpdateValuesCopied", nrOpUpdateValuesCopied); + topObj.attr("nrThunks", nrThunks); + topObj.attr("nrAvoided", nrAvoided); + topObj.attr("nrLookups", nrLookups); + topObj.attr("nrPrimOpCalls", nrPrimOpCalls); + topObj.attr("nrFunctionCalls", nrFunctionCalls); +#if HAVE_BOEHMGC + { + auto gc = topObj.object("gc"); + gc.attr("heapSize", heapSize); + gc.attr("totalBytes", totalBytes); + } +#endif + if (countCalls) { + { + auto obj = topObj.object("primops"); + for (auto & i : primOpCalls) + obj.attr(i.first, i.second); + } + { + auto list = topObj.list("functions"); + for (auto & i : functionCalls) { + auto obj = list.object(); + if (i.first->name.set()) + obj.attr("name", (const string &) i.first->name); + else + obj.attr("name", nullptr); + if (i.first->pos) { + obj.attr("file", (const string &) i.first->pos.file); + obj.attr("line", i.first->pos.line); + obj.attr("column", i.first->pos.column); + } + obj.attr("count", i.second); + } + } + { + auto list = topObj.list("attributes"); + for (auto & i : attrSelects) { + auto obj = list.object(); + if (i.first) { + obj.attr("file", (const string &) i.first.file); + obj.attr("line", i.first.line); + obj.attr("column", i.first.column); + } + obj.attr("count", i.second); + } + } + } } } @@ -1810,7 +1881,7 @@ size_t valueSize(Value & v) if (seen.find(v.listElems()) == seen.end()) { seen.insert(v.listElems()); sz += v.listSize() * sizeof(Value *); - for (unsigned int n = 0; n < v.listSize(); ++n) + for (size_t n = 0; n < v.listSize(); ++n) sz += doValue(*v.listElems()[n]); } break; @@ -1846,9 +1917,10 @@ size_t valueSize(Value & v) size_t sz = sizeof(Env) + sizeof(Value *) * env.size; - for (unsigned int i = 0; i < env.size; ++i) - if (env.values[i]) - sz += doValue(*env.values[i]); + if (env.type != Env::HasWithExpr) + for (size_t i = 0; i < env.size; ++i) + if (env.values[i]) + sz += doValue(*env.values[i]); if (env.up) sz += doEnv(*env.up); @@ -1877,4 +1949,9 @@ std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) { } +EvalSettings evalSettings; + +static GlobalConfig::Register r1(&evalSettings); + + } diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 9d8799b7906..9fe3878916d 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -5,8 +5,10 @@ #include "nixexpr.hh" #include "symbol-table.hh" #include "hash.hh" +#include "config.hh" #include +#include namespace nix { @@ -34,8 +36,8 @@ struct Env { Env * up; unsigned short size; // used by ‘valueSize’ - unsigned short prevWith:15; // nr of levels up to next `with' environment - unsigned short haveWithAttrs:1; + unsigned short prevWith:14; // nr of levels up to next `with' environment + enum { Plain = 0, HasWithExpr, HasWithAttrs } type:2; Value * values[0]; }; @@ -88,6 +90,14 @@ public: private: SrcToStore srcToStore; + /* A cache from path names to parse trees. */ +#if HAVE_BOEHMGC + typedef std::map, traceable_allocator > > FileParseCache; +#else + typedef std::map FileParseCache; +#endif + FileParseCache fileParseCache; + /* A cache from path names to values. */ #if HAVE_BOEHMGC typedef std::map, traceable_allocator > > FileEvalCache; @@ -100,6 +110,9 @@ private: std::map> searchPathResolved; + /* Cache used by checkSourcePath(). */ + std::unordered_map resolvedPaths; + public: EvalState(const Strings & _searchPath, ref store); @@ -214,7 +227,7 @@ private: Value * addConstant(const string & name, Value & v); Value * addPrimOp(const string & name, - unsigned int arity, PrimOpFun primOp); + size_t arity, PrimOpFun primOp); public: @@ -248,18 +261,18 @@ public: /* Allocation primitives. */ Value * allocValue(); - Env & allocEnv(unsigned int size); + Env & allocEnv(size_t size); Value * allocAttr(Value & vAttrs, const Symbol & name); - Bindings * allocBindings(Bindings::size_t capacity); + Bindings * allocBindings(size_t capacity); - void mkList(Value & v, unsigned int length); - void mkAttrs(Value & v, unsigned int capacity); + void mkList(Value & v, size_t length); + void mkAttrs(Value & v, size_t capacity); void mkThunk_(Value & v, Expr * expr); void mkPos(Value & v, Pos * pos); - void concatLists(Value & v, unsigned int nrLists, Value * * lists, const Pos & pos); + void concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos); /* Print statistics. */ void printStats(); @@ -282,15 +295,15 @@ private: bool countCalls; - typedef std::map PrimOpCalls; + typedef std::map PrimOpCalls; PrimOpCalls primOpCalls; - typedef std::map FunctionCalls; + typedef std::map FunctionCalls; FunctionCalls functionCalls; void incrFunctionCall(ExprLambda * fun); - typedef std::map AttrSelects; + typedef std::map AttrSelects; AttrSelects attrSelects; friend struct ExprOpUpdate; @@ -303,6 +316,9 @@ private: /* Return a string representing the type of the value `v'. */ string showType(const Value & v); +/* Decode a context string ‘!!’ into a pair . */ +std::pair decodeContext(const string & s); /* If `path' refers to a directory, then append "/default.nix". */ Path resolveExprPath(Path path); @@ -316,4 +332,25 @@ struct InvalidPathError : EvalError #endif }; +struct EvalSettings : Config +{ + Setting enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation", + "Whether builtin functions that allow executing native code should be enabled."}; + + Setting restrictEval{this, false, "restrict-eval", + "Whether to restrict file system access to paths in $NIX_PATH, " + "and network access to the URI prefixes listed in 'allowed-uris'."}; + + Setting pureEval{this, false, "pure-eval", + "Whether to restrict file system and network access to files specified by cryptographic hash."}; + + Setting enableImportFromDerivation{this, true, "allow-import-from-derivation", + "Whether the evaluator allows importing the result of a derivation."}; + + Setting allowedUris{this, {}, "allowed-uris", + "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."}; +}; + +extern EvalSettings evalSettings; + } diff --git a/src/libexpr/get-drvs.hh b/src/libexpr/get-drvs.hh index 4d9128e3f44..daaa635fe1b 100644 --- a/src/libexpr/get-drvs.hh +++ b/src/libexpr/get-drvs.hh @@ -44,7 +44,7 @@ public: string queryDrvPath() const; string queryOutPath() const; string queryOutputName() const; - /** Return the list of outputs. The "outputs to install" are determined by `mesa.outputsToInstall`. */ + /** Return the list of outputs. The "outputs to install" are determined by `meta.outputsToInstall`. */ Outputs queryOutputs(bool onlyOutputsToInstall = false); StringSet queryMetaNames(); diff --git a/src/libexpr/json-to-value.cc b/src/libexpr/json-to-value.cc index 8b140459554..3f601795778 100644 --- a/src/libexpr/json-to-value.cc +++ b/src/libexpr/json-to-value.cc @@ -110,7 +110,7 @@ static void parseJSON(EvalState & state, const char * & s, Value & v) if (number_type == tFloat) mkFloat(v, stod(tmp_number)); else - mkInt(v, stoi(tmp_number)); + mkInt(v, stol(tmp_number)); } catch (std::invalid_argument e) { throw JSONParseError("invalid JSON number"); } catch (std::out_of_range e) { diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l index 1e9c29afa13..c34e5c38392 100644 --- a/src/libexpr/lexer.l +++ b/src/libexpr/lexer.l @@ -6,12 +6,14 @@ %option nounput noyy_top_state +%s DEFAULT %x STRING %x IND_STRING -%x INSIDE_DOLLAR_CURLY %{ +#include + #include "nixexpr.hh" #include "parser-tab.hh" @@ -97,8 +99,6 @@ URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~ %% -{ - if { return IF; } then { return THEN; } @@ -124,9 +124,11 @@ or { return OR_KW; } {ID} { yylval->id = strdup(yytext); return ID; } {INT} { errno = 0; - yylval->n = strtol(yytext, 0, 10); - if (errno != 0) + try { + yylval->n = boost::lexical_cast(yytext); + } catch (const boost::bad_lexical_cast &) { throw ParseError(format("invalid integer '%1%'") % yytext); + } return INT; } {FLOAT} { errno = 0; @@ -136,17 +138,19 @@ or { return OR_KW; } return FLOAT; } -\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; } -} +\$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; } -\} { return '}'; } -\} { POP_STATE(); return '}'; } -\{ { return '{'; } -\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return '{'; } +\} { /* State INITIAL only exists at the bottom of the stack and is + used as a marker. DEFAULT replaces it everywhere else. + Popping when in INITIAL state causes an empty stack exception, + so don't */ + if (YYSTATE != INITIAL) + POP_STATE(); + return '}'; + } +\{ { PUSH_STATE(DEFAULT); return '{'; } -\" { - PUSH_STATE(STRING); return '"'; - } +\" { PUSH_STATE(STRING); return '"'; } ([^\$\"\\]|\$[^\{\"\\]|\\{ANY}|\$\\{ANY})*\$/\" | ([^\$\"\\]|\$[^\{\"\\]|\\{ANY}|\$\\{ANY})+ { /* It is impossible to match strings ending with '$' with one @@ -155,7 +159,7 @@ or { return OR_KW; } yylval->e = unescapeStr(data->symbols, yytext, yyleng); return STR; } -\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; } +\$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; } \" { POP_STATE(); return '"'; } \$|\\|\$\\ { /* This can only occur when we reach EOF, otherwise the above @@ -165,7 +169,7 @@ or { return OR_KW; } return STR; } -\'\'(\ *\n)? { PUSH_STATE(IND_STRING); return IND_STRING_OPEN; } +\'\'(\ *\n)? { PUSH_STATE(IND_STRING); return IND_STRING_OPEN; } ([^\$\']|\$[^\{\']|\'[^\'\$])+ { yylval->e = new ExprIndStr(yytext); return IND_STR; @@ -183,14 +187,13 @@ or { return OR_KW; } yylval->e = unescapeStr(data->symbols, yytext + 2, yyleng - 2); return IND_STR; } -\$\{ { PUSH_STATE(INSIDE_DOLLAR_CURLY); return DOLLAR_CURLY; } +\$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; } \'\' { POP_STATE(); return IND_STRING_CLOSE; } \' { yylval->e = new ExprIndStr("'"); return IND_STR; } -{ {PATH} { if (yytext[yyleng-1] == '/') throw ParseError("path '%s' has a trailing slash", yytext); @@ -209,11 +212,11 @@ or { return OR_KW; } \#[^\r\n]* /* single-line comments */ \/\*([^*]|\*+[^*/])*\*+\/ /* long comments */ -{ANY} return yytext[0]; - -} - -<> { data->atEnd = true; return 0; } +{ANY} { + /* Don't return a negative number, as this will cause + Bison to stop parsing without an error. */ + return (unsigned char) yytext[0]; + } %% diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk index daa3258f0d3..ccd5293e4e5 100644 --- a/src/libexpr/local.mk +++ b/src/libexpr/local.mk @@ -6,7 +6,7 @@ libexpr_DIR := $(d) libexpr_SOURCES := $(wildcard $(d)/*.cc) $(wildcard $(d)/primops/*.cc) $(d)/lexer-tab.cc $(d)/parser-tab.cc -libexpr_LIBS = libutil libstore libformat +libexpr_LIBS = libutil libstore libexpr_LDFLAGS = ifneq ($(OS), FreeBSD) diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh index 8c8f3964068..665a42987dc 100644 --- a/src/libexpr/nixexpr.hh +++ b/src/libexpr/nixexpr.hh @@ -11,7 +11,6 @@ namespace nix { MakeError(EvalError, Error) MakeError(ParseError, Error) -MakeError(IncompleteParseError, ParseError) MakeError(AssertionError, EvalError) MakeError(ThrownError, AssertionError) MakeError(Abort, EvalError) @@ -255,7 +254,7 @@ struct ExprWith : Expr { Pos pos; Expr * attrs, * body; - unsigned int prevWith; + size_t prevWith; ExprWith(const Pos & pos, Expr * attrs, Expr * body) : pos(pos), attrs(attrs), body(body) { }; COMMON_METHODS }; diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index ef11dd60921..cbd576d7d12 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -31,12 +31,10 @@ namespace nix { Path basePath; Symbol path; string error; - bool atEnd; Symbol sLetBody; ParseData(EvalState & state) : state(state) , symbols(state.symbols) - , atEnd(false) , sLetBody(symbols.create("")) { }; }; @@ -136,8 +134,8 @@ static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vector(i); if (!e) { @@ -148,7 +146,7 @@ static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vectors.size(); ++j) { + for (size_t j = 0; j < e->s.size(); ++j) { if (atStartOfLine) { if (e->s[j] == ' ') curIndent++; @@ -170,8 +168,8 @@ static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vector * es2 = new vector; atStartOfLine = true; - unsigned int curDropped = 0; - unsigned int n = es.size(); + size_t curDropped = 0; + size_t n = es.size(); for (vector::iterator i = es.begin(); i != es.end(); ++i, --n) { ExprIndStr * e = dynamic_cast(*i); if (!e) { @@ -182,7 +180,7 @@ static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vectors.size(); ++j) { + for (size_t j = 0; j < e->s.size(); ++j) { if (atStartOfLine) { if (e->s[j] == ' ') { if (curDropped++ >= minIndent) @@ -275,11 +273,11 @@ void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * err %token IND_STRING_OPEN IND_STRING_CLOSE %token ELLIPSIS -%nonassoc IMPL +%right IMPL %left OR %left AND %nonassoc EQ NEQ -%left '<' '>' LEQ GEQ +%nonassoc '<' '>' LEQ GEQ %right UPDATE %left NOT %left '+' '-' @@ -541,12 +539,7 @@ Expr * EvalState::parse(const char * text, int res = yyparse(scanner, &data); yylex_destroy(scanner); - if (res) { - if (data.atEnd) - throw IncompleteParseError(data.error); - else - throw ParseError(data.error); - } + if (res) throw ParseError(data.error); data.result->bindVars(staticEnv); diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 57dc7bd1279..0da9f702f4b 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -73,7 +73,7 @@ void EvalState::realiseContext(const PathSet & context) if (drvs.empty()) return; - if (!settings.enableImportFromDerivation) + if (!evalSettings.enableImportFromDerivation) throw EvalError(format("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin())); /* For performance, prefetch all substitute info. */ @@ -464,7 +464,7 @@ static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Val static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Value & v) { string name = state.forceStringNoCtx(*args[0], pos); - mkString(v, settings.restrictEval || settings.pureEval ? "" : getEnv(name)); + mkString(v, evalSettings.restrictEval || evalSettings.pureEval ? "" : getEnv(name)); } @@ -687,21 +687,12 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * } } - /* See prim_unsafeDiscardOutputDependency. */ - else if (path.at(0) == '~') - drv.inputSrcs.insert(string(path, 1)); - /* Handle derivation outputs of the form ‘!!’. */ else if (path.at(0) == '!') { std::pair ctx = decodeContext(path); drv.inputDrvs[ctx.first].insert(ctx.second); } - /* Handle derivation contexts returned by - ‘builtins.storePath’. */ - else if (isDerivation(path)) - drv.inputDrvs[path] = state.store->queryDerivationOutputNames(path); - /* Otherwise it's a source file. */ else drv.inputSrcs.insert(path); @@ -724,16 +715,14 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * * if (outputs.size() != 1 || *(outputs.begin()) != "out") throw Error(format("multiple outputs are not supported in fixed-output derivations, at %1%") % posDrvName); - HashType ht = parseHashType(outputHashAlgo); - if (ht == htUnknown) - throw EvalError(format("unknown hash algorithm '%1%', at %2%") % outputHashAlgo % posDrvName); + HashType ht = outputHashAlgo.empty() ? htUnknown : parseHashType(outputHashAlgo); Hash h(*outputHash, ht); - outputHash = h.to_string(Base16, false); - if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo; Path outPath = state.store->makeFixedOutputPath(outputHashRecursive, h, drvName); if (!jsonObject) drv.env["out"] = outPath; - drv.outputs["out"] = DerivationOutput(outPath, outputHashAlgo, *outputHash); + drv.outputs["out"] = DerivationOutput(outPath, + (outputHashRecursive ? "r:" : "") + printHashType(h.type), + h.to_string(Base16, false)); } else { @@ -866,7 +855,7 @@ static void prim_baseNameOf(EvalState & state, const Pos & pos, Value * * args, static void prim_dirOf(EvalState & state, const Pos & pos, Value * * args, Value & v) { PathSet context; - Path dir = dirOf(state.coerceToPath(pos, *args[0], context)); + Path dir = dirOf(state.coerceToString(pos, *args[0], context, false, false)); if (args[0]->type == tPath) mkPath(v, dir.c_str()); else mkString(v, dir, context); } @@ -1006,13 +995,8 @@ static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Valu PathSet refs; for (auto path : context) { - if (path.at(0) == '=') path = string(path, 1); - if (isDerivation(path)) { - /* See prim_unsafeDiscardOutputDependency. */ - if (path.at(0) != '~') - throw EvalError(format("in 'toFile': the file '%1%' cannot refer to derivation outputs, at %2%") % name % pos); - path = string(path, 1); - } + if (path.at(0) != '/') + throw EvalError(format("in 'toFile': the file '%1%' cannot refer to derivation outputs, at %2%") % name % pos); refs.insert(path); } @@ -1031,7 +1015,7 @@ static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Valu static void addPath(EvalState & state, const Pos & pos, const string & name, const Path & path_, Value * filterFun, bool recursive, const Hash & expectedHash, Value & v) { - const auto path = settings.pureEval && expectedHash ? + const auto path = evalSettings.pureEval && expectedHash ? path_ : state.checkSourcePath(path_); PathFilter filter = filterFun ? ([&](const Path & path) { @@ -1356,6 +1340,24 @@ static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args } +/* Apply a function to every element of an attribute set. */ +static void prim_mapAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + state.forceAttrs(*args[1], pos); + + state.mkAttrs(v, args[1]->attrs->size()); + + for (auto & i : *args[1]->attrs) { + Value * vName = state.allocValue(); + Value * vFun2 = state.allocValue(); + mkString(*vName, i.name); + mkApp(*vFun2, *args[0], *vName); + mkApp(*state.allocAttr(v, i.name), *vFun2, *i.value); + } +} + + + /************************************************************* * Lists *************************************************************/ @@ -1410,7 +1412,6 @@ static void prim_tail(EvalState & state, const Pos & pos, Value * * args, Value /* Apply a function to every element of a list. */ static void prim_map(EvalState & state, const Pos & pos, Value * * args, Value & v) { - state.forceFunction(*args[0], pos); state.forceList(*args[1], pos); state.mkList(v, args[1]->listSize()); @@ -1489,19 +1490,20 @@ static void prim_foldlStrict(EvalState & state, const Pos & pos, Value * * args, state.forceFunction(*args[0], pos); state.forceList(*args[2], pos); - Value * vCur = args[1]; + if (args[2]->listSize()) { + Value * vCur = args[1]; - if (args[2]->listSize()) for (unsigned int n = 0; n < args[2]->listSize(); ++n) { Value vTmp; state.callFunction(*args[0], *vCur, vTmp, pos); vCur = n == args[2]->listSize() - 1 ? &v : state.allocValue(); state.callFunction(vTmp, *args[2]->listElems()[n], *vCur, pos); } - else - v = *vCur; - - state.forceValue(v); + state.forceValue(v); + } else { + state.forceValue(*args[1]); + v = *args[1]; + } } @@ -1538,7 +1540,6 @@ static void prim_all(EvalState & state, const Pos & pos, Value * * args, Value & static void prim_genList(EvalState & state, const Pos & pos, Value * * args, Value & v) { - state.forceFunction(*args[0], pos); auto len = state.forceInt(*args[1], pos); if (len < 0) @@ -1627,6 +1628,35 @@ static void prim_partition(EvalState & state, const Pos & pos, Value * * args, V } +/* concatMap = f: list: concatLists (map f list); */ +/* C++-version is to avoid allocating `mkApp', call `f' eagerly */ +static void prim_concatMap(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + state.forceFunction(*args[0], pos); + state.forceList(*args[1], pos); + auto nrLists = args[1]->listSize(); + + Value lists[nrLists]; + size_t len = 0; + + for (unsigned int n = 0; n < nrLists; ++n) { + Value * vElem = args[1]->listElems()[n]; + state.callFunction(*args[0], *vElem, lists[n], pos); + state.forceList(lists[n], pos); + len += lists[n].listSize(); + } + + state.mkList(v, len); + auto out = v.listElems(); + for (unsigned int n = 0, pos = 0; n < nrLists; ++n) { + auto l = lists[n].listSize(); + if (l) + memcpy(out + pos, lists[n].listElems(), l * sizeof(Value *)); + pos += l; + } +} + + /************************************************************* * Integer arithmetic *************************************************************/ @@ -1634,6 +1664,8 @@ static void prim_partition(EvalState & state, const Pos & pos, Value * * args, V static void prim_add(EvalState & state, const Pos & pos, Value * * args, Value & v) { + state.forceValue(*args[0], pos); + state.forceValue(*args[1], pos); if (args[0]->type == tFloat || args[1]->type == tFloat) mkFloat(v, state.forceFloat(*args[0], pos) + state.forceFloat(*args[1], pos)); else @@ -1643,6 +1675,8 @@ static void prim_add(EvalState & state, const Pos & pos, Value * * args, Value & static void prim_sub(EvalState & state, const Pos & pos, Value * * args, Value & v) { + state.forceValue(*args[0], pos); + state.forceValue(*args[1], pos); if (args[0]->type == tFloat || args[1]->type == tFloat) mkFloat(v, state.forceFloat(*args[0], pos) - state.forceFloat(*args[1], pos)); else @@ -1652,6 +1686,8 @@ static void prim_sub(EvalState & state, const Pos & pos, Value * * args, Value & static void prim_mul(EvalState & state, const Pos & pos, Value * * args, Value & v) { + state.forceValue(*args[0], pos); + state.forceValue(*args[1], pos); if (args[0]->type == tFloat || args[1]->type == tFloat) mkFloat(v, state.forceFloat(*args[0], pos) * state.forceFloat(*args[1], pos)); else @@ -1661,6 +1697,9 @@ static void prim_mul(EvalState & state, const Pos & pos, Value * * args, Value & static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value & v) { + state.forceValue(*args[0], pos); + state.forceValue(*args[1], pos); + NixFloat f2 = state.forceFloat(*args[1], pos); if (f2 == 0) throw EvalError(format("division by zero, at %1%") % pos); @@ -1676,6 +1715,20 @@ static void prim_div(EvalState & state, const Pos & pos, Value * * args, Value & } } +static void prim_bitAnd(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + mkInt(v, state.forceInt(*args[0], pos) & state.forceInt(*args[1], pos)); +} + +static void prim_bitOr(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + mkInt(v, state.forceInt(*args[0], pos) | state.forceInt(*args[1], pos)); +} + +static void prim_bitXor(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + mkInt(v, state.forceInt(*args[0], pos) ^ state.forceInt(*args[1], pos)); +} static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Value & v) { @@ -1727,41 +1780,6 @@ static void prim_stringLength(EvalState & state, const Pos & pos, Value * * args } -static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos, Value * * args, Value & v) -{ - PathSet context; - string s = state.coerceToString(pos, *args[0], context); - mkString(v, s, PathSet()); -} - - -static void prim_hasContext(EvalState & state, const Pos & pos, Value * * args, Value & v) -{ - PathSet context; - state.forceString(*args[0], context, pos); - mkBool(v, !context.empty()); -} - - -/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a - builder without causing the derivation to be built (for instance, - in the derivation that builds NARs in nix-push, when doing - source-only deployment). This primop marks the string context so - that builtins.derivation adds the path to drv.inputSrcs rather than - drv.inputDrvs. */ -static void prim_unsafeDiscardOutputDependency(EvalState & state, const Pos & pos, Value * * args, Value & v) -{ - PathSet context; - string s = state.coerceToString(pos, *args[0], context); - - PathSet context2; - for (auto & p : context) - context2.insert(p.at(0) == '=' ? "~" + string(p, 1) : p); - - mkString(v, s, context2); -} - - /* Return the cryptographic hash of a string in base-16. */ static void prim_hashString(EvalState & state, const Pos & pos, Value * * args, Value & v) { @@ -2042,7 +2060,7 @@ void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v, state.checkURI(url); - if (settings.pureEval && !expectedHash) + if (evalSettings.pureEval && !expectedHash) throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who); Path res = getDownloader()->downloadCached(state.store, url, unpack, name, expectedHash); @@ -2110,12 +2128,12 @@ void EvalState::createBaseEnv() addConstant(name, v); }; - if (!settings.pureEval) { + if (!evalSettings.pureEval) { mkInt(v, time(0)); addConstant("__currentTime", v); } - if (!settings.pureEval) { + if (!evalSettings.pureEval) { mkString(v, settings.thisSystem); addConstant("__currentSystem", v); } @@ -2140,7 +2158,7 @@ void EvalState::createBaseEnv() mkApp(v, *vScopedImport, *v2); forceValue(v); addConstant("import", v); - if (settings.enableNativeCode) { + if (evalSettings.enableNativeCode) { addPrimOp("__importNative", 2, prim_importNative); addPrimOp("__exec", 1, prim_exec); } @@ -2167,7 +2185,7 @@ void EvalState::createBaseEnv() // Paths addPrimOp("__toPath", 1, prim_toPath); - if (settings.pureEval) + if (evalSettings.pureEval) addPurityError("__storePath"); else addPrimOp("__storePath", 1, prim_storePath); @@ -2198,6 +2216,7 @@ void EvalState::createBaseEnv() addPrimOp("__intersectAttrs", 2, prim_intersectAttrs); addPrimOp("__catAttrs", 2, prim_catAttrs); addPrimOp("__functionArgs", 1, prim_functionArgs); + addPrimOp("__mapAttrs", 2, prim_mapAttrs); // Lists addPrimOp("__isList", 1, prim_isList); @@ -2215,21 +2234,22 @@ void EvalState::createBaseEnv() addPrimOp("__genList", 2, prim_genList); addPrimOp("__sort", 2, prim_sort); addPrimOp("__partition", 2, prim_partition); + addPrimOp("__concatMap", 2, prim_concatMap); // Integer arithmetic addPrimOp("__add", 2, prim_add); addPrimOp("__sub", 2, prim_sub); addPrimOp("__mul", 2, prim_mul); addPrimOp("__div", 2, prim_div); + addPrimOp("__bitAnd", 2, prim_bitAnd); + addPrimOp("__bitOr", 2, prim_bitOr); + addPrimOp("__bitXor", 2, prim_bitXor); addPrimOp("__lessThan", 2, prim_lessThan); // String manipulation addPrimOp("toString", 1, prim_toString); addPrimOp("__substring", 3, prim_substring); addPrimOp("__stringLength", 1, prim_stringLength); - addPrimOp("__hasContext", 1, prim_hasContext); - addPrimOp("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext); - addPrimOp("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency); addPrimOp("__hashString", 2, prim_hashString); addPrimOp("__match", 2, prim_match); addPrimOp("__split", 2, prim_split); diff --git a/src/libexpr/primops/context.cc b/src/libexpr/primops/context.cc new file mode 100644 index 00000000000..2d79739ea04 --- /dev/null +++ b/src/libexpr/primops/context.cc @@ -0,0 +1,187 @@ +#include "primops.hh" +#include "eval-inline.hh" +#include "derivations.hh" + +namespace nix { + +static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + PathSet context; + string s = state.coerceToString(pos, *args[0], context); + mkString(v, s, PathSet()); +} + +static RegisterPrimOp r1("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext); + + +static void prim_hasContext(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + PathSet context; + state.forceString(*args[0], context, pos); + mkBool(v, !context.empty()); +} + +static RegisterPrimOp r2("__hasContext", 1, prim_hasContext); + + +/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a + builder without causing the derivation to be built (for instance, + in the derivation that builds NARs in nix-push, when doing + source-only deployment). This primop marks the string context so + that builtins.derivation adds the path to drv.inputSrcs rather than + drv.inputDrvs. */ +static void prim_unsafeDiscardOutputDependency(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + PathSet context; + string s = state.coerceToString(pos, *args[0], context); + + PathSet context2; + for (auto & p : context) + context2.insert(p.at(0) == '=' ? string(p, 1) : p); + + mkString(v, s, context2); +} + +static RegisterPrimOp r3("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency); + + +/* Extract the context of a string as a structured Nix value. + + The context is represented as an attribute set whose keys are the + paths in the context set and whose values are attribute sets with + the following keys: + path: True if the relevant path is in the context as a plain store + path (i.e. the kind of context you get when interpolating + a Nix path (e.g. ./.) into a string). False if missing. + allOutputs: True if the relevant path is a derivation and it is + in the context as a drv file with all of its outputs + (i.e. the kind of context you get when referencing + .drvPath of some derivation). False if missing. + outputs: If a non-empty list, the relevant path is a derivation + and the provided outputs are referenced in the context + (i.e. the kind of context you get when referencing + .outPath of some derivation). Empty list if missing. + Note that for a given path any combination of the above attributes + may be present. +*/ +static void prim_getContext(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + struct ContextInfo { + bool path = false; + bool allOutputs = false; + Strings outputs; + }; + PathSet context; + state.forceString(*args[0], context, pos); + auto contextInfos = std::map(); + for (const auto & p : context) { + Path drv; + string output; + const Path * path = &p; + if (p.at(0) == '=') { + drv = string(p, 1); + path = &drv; + } else if (p.at(0) == '!') { + std::pair ctx = decodeContext(p); + drv = ctx.first; + output = ctx.second; + path = &drv; + } + auto isPath = drv.empty(); + auto isAllOutputs = (!drv.empty()) && output.empty(); + + auto iter = contextInfos.find(*path); + if (iter == contextInfos.end()) { + contextInfos.emplace(*path, ContextInfo{isPath, isAllOutputs, output.empty() ? Strings{} : Strings{std::move(output)}}); + } else { + if (isPath) + iter->second.path = true; + else if (isAllOutputs) + iter->second.allOutputs = true; + else + iter->second.outputs.emplace_back(std::move(output)); + } + } + + state.mkAttrs(v, contextInfos.size()); + + auto sPath = state.symbols.create("path"); + auto sAllOutputs = state.symbols.create("allOutputs"); + for (const auto & info : contextInfos) { + auto & infoVal = *state.allocAttr(v, state.symbols.create(info.first)); + state.mkAttrs(infoVal, 3); + if (info.second.path) + mkBool(*state.allocAttr(infoVal, sPath), true); + if (info.second.allOutputs) + mkBool(*state.allocAttr(infoVal, sAllOutputs), true); + if (!info.second.outputs.empty()) { + auto & outputsVal = *state.allocAttr(infoVal, state.sOutputs); + state.mkList(outputsVal, info.second.outputs.size()); + size_t i = 0; + for (const auto & output : info.second.outputs) { + mkString(*(outputsVal.listElems()[i++] = state.allocValue()), output); + } + } + infoVal.attrs->sort(); + } + v.attrs->sort(); +} + +static RegisterPrimOp r4("__getContext", 1, prim_getContext); + + +/* Append the given context to a given string. + + See the commentary above unsafeGetContext for details of the + context representation. +*/ +static void prim_appendContext(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + PathSet context; + auto orig = state.forceString(*args[0], context, pos); + + state.forceAttrs(*args[1], pos); + + auto sPath = state.symbols.create("path"); + auto sAllOutputs = state.symbols.create("allOutputs"); + for (auto & i : *args[1]->attrs) { + if (!state.store->isStorePath(i.name)) + throw EvalError("Context key '%s' is not a store path, at %s", i.name, i.pos); + if (!settings.readOnlyMode) + state.store->ensurePath(i.name); + state.forceAttrs(*i.value, *i.pos); + auto iter = i.value->attrs->find(sPath); + if (iter != i.value->attrs->end()) { + if (state.forceBool(*iter->value, *iter->pos)) + context.insert(i.name); + } + + iter = i.value->attrs->find(sAllOutputs); + if (iter != i.value->attrs->end()) { + if (state.forceBool(*iter->value, *iter->pos)) { + if (!isDerivation(i.name)) { + throw EvalError("Tried to add all-outputs context of %s, which is not a derivation, to a string, at %s", i.name, i.pos); + } + context.insert("=" + string(i.name)); + } + } + + iter = i.value->attrs->find(state.sOutputs); + if (iter != i.value->attrs->end()) { + state.forceList(*iter->value, *iter->pos); + if (iter->value->listSize() && !isDerivation(i.name)) { + throw EvalError("Tried to add derivation output context of %s, which is not a derivation, to a string, at %s", i.name, i.pos); + } + for (unsigned int n = 0; n < iter->value->listSize(); ++n) { + auto name = state.forceStringNoCtx(*iter->value->listElems()[n], *iter->pos); + context.insert("!" + name + "!" + string(i.name)); + } + } + } + + mkString(v, orig, context); +} + +static RegisterPrimOp r5("__appendContext", 2, prim_appendContext); + +} diff --git a/src/libexpr/primops/fetchGit.cc b/src/libexpr/primops/fetchGit.cc index 8bb74dad639..b46d2f25826 100644 --- a/src/libexpr/primops/fetchGit.cc +++ b/src/libexpr/primops/fetchGit.cc @@ -3,6 +3,7 @@ #include "download.hh" #include "store-api.hh" #include "pathlocks.hh" +#include "hash.hh" #include @@ -28,7 +29,7 @@ GitInfo exportGit(ref store, const std::string & uri, std::experimental::optional ref, std::string rev, const std::string & name) { - if (settings.pureEval && rev == "") + if (evalSettings.pureEval && rev == "") throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision"); if (!ref && rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.git")) { @@ -84,15 +85,16 @@ GitInfo exportGit(ref store, const std::string & uri, if (rev != "" && !std::regex_match(rev, revRegex)) throw Error("invalid Git revision '%s'", rev); - Path cacheDir = getCacheDir() + "/nix/git"; + deletePath(getCacheDir() + "/nix/git"); + + Path cacheDir = getCacheDir() + "/nix/gitv2/" + hashString(htSHA256, uri).to_string(Base32, false); if (!pathExists(cacheDir)) { + createDirs(dirOf(cacheDir)); runProgram("git", true, { "init", "--bare", cacheDir }); } - std::string localRef = hashString(htSHA256, fmt("%s-%s", uri, *ref)).to_string(Base32, false); - - Path localRefFile = cacheDir + "/refs/heads/" + localRef; + Path localRefFile = cacheDir + "/refs/heads/" + *ref; bool doFetch; time_t now = time(0); @@ -122,7 +124,7 @@ GitInfo exportGit(ref store, const std::string & uri, // FIXME: git stderr messes up our progress indicator, so // we're using --quiet for now. Should process its stderr. - runProgram("git", true, { "-C", cacheDir, "fetch", "--quiet", "--force", "--", uri, *ref + ":" + localRef }); + runProgram("git", true, { "-C", cacheDir, "fetch", "--quiet", "--force", "--", uri, fmt("%s:%s", *ref, *ref) }); struct timeval times[2]; times[0].tv_sec = now; @@ -219,8 +221,6 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va } else url = state.coerceToString(pos, *args[0], context, false, false); - if (!isUri(url)) url = absPath(url); - // FIXME: git externals probably can be used to bypass the URI // whitelist. Ah well. state.checkURI(url); diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index a75c5fc2ddf..66f49f37432 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -27,7 +27,7 @@ std::regex commitHashRegex("^[0-9a-fA-F]{40}$"); HgInfo exportMercurial(ref store, const std::string & uri, std::string rev, const std::string & name) { - if (settings.pureEval && rev == "") + if (evalSettings.pureEval && rev == "") throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision"); if (rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.hg")) { @@ -93,7 +93,22 @@ HgInfo exportMercurial(ref store, const std::string & uri, Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Mercurial repository '%s'", uri)); if (pathExists(cacheDir)) { - runProgram("hg", true, { "pull", "-R", cacheDir, "--", uri }); + try { + runProgram("hg", true, { "pull", "-R", cacheDir, "--", uri }); + } + catch (ExecError & e){ + string transJournal = cacheDir + "/.hg/store/journal"; + /* hg throws "abandoned transaction" error only if this file exists */ + if (pathExists(transJournal)) + { + runProgram("hg", true, { "recover", "-R", cacheDir }); + runProgram("hg", true, { "pull", "-R", cacheDir, "--", uri }); + } + else + { + throw ExecError(e.status, fmt("program hg '%1%' ", statusToString(e.status))); + } + } } else { createDirs(dirOf(cacheDir)); runProgram("hg", true, { "clone", "--noupdate", "--", uri, cacheDir }); @@ -184,8 +199,6 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar } else url = state.coerceToString(pos, *args[0], context, false, false); - if (!isUri(url)) url = absPath(url); - // FIXME: git externals probably can be used to bypass the URI // whitelist. Ah well. state.checkURI(url); diff --git a/src/libexpr/primops/fromTOML.cc b/src/libexpr/primops/fromTOML.cc new file mode 100644 index 00000000000..4128de05d0c --- /dev/null +++ b/src/libexpr/primops/fromTOML.cc @@ -0,0 +1,77 @@ +#include "primops.hh" +#include "eval-inline.hh" + +#include "cpptoml/cpptoml.h" + +namespace nix { + +static void prim_fromTOML(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + using namespace cpptoml; + + auto toml = state.forceStringNoCtx(*args[0], pos); + + std::istringstream tomlStream(toml); + + std::function)> visit; + + visit = [&](Value & v, std::shared_ptr t) { + + if (auto t2 = t->as_table()) { + + size_t size = 0; + for (auto & i : *t2) { (void) i; size++; } + + state.mkAttrs(v, size); + + for (auto & i : *t2) { + auto & v2 = *state.allocAttr(v, state.symbols.create(i.first)); + + if (auto i2 = i.second->as_table_array()) { + size_t size2 = i2->get().size(); + state.mkList(v2, size2); + for (size_t j = 0; j < size2; ++j) + visit(*(v2.listElems()[j] = state.allocValue()), i2->get()[j]); + } + else + visit(v2, i.second); + } + + v.attrs->sort(); + } + + else if (auto t2 = t->as_array()) { + size_t size = t2->get().size(); + + state.mkList(v, size); + + for (size_t i = 0; i < size; ++i) + visit(*(v.listElems()[i] = state.allocValue()), t2->get()[i]); + } + + else if (t->is_value()) { + if (auto val = t->as()) + mkInt(v, val->get()); + else if (auto val = t->as()) + mkFloat(v, val->get()); + else if (auto val = t->as()) + mkBool(v, val->get()); + else if (auto val = t->as()) + mkString(v, val->get()); + else + throw EvalError("unsupported value type in TOML"); + } + + else abort(); + }; + + try { + visit(v, parser(tomlStream).parse()); + } catch (std::runtime_error & e) { + throw EvalError("while parsing a TOML string at %s: %s", pos, e.what()); + } +} + +static RegisterPrimOp r("fromTOML", 1, prim_fromTOML); + +} diff --git a/src/libexpr/symbol-table.hh b/src/libexpr/symbol-table.hh index c2ee49dd32f..44929f7eea0 100644 --- a/src/libexpr/symbol-table.hh +++ b/src/libexpr/symbol-table.hh @@ -69,7 +69,7 @@ public: return Symbol(&*res.first); } - unsigned int size() const + size_t size() const { return symbols.size(); } diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh index 9df516f062e..e1ec87d3b84 100644 --- a/src/libexpr/value.hh +++ b/src/libexpr/value.hh @@ -43,8 +43,8 @@ class XMLWriter; class JSONPlaceholder; -typedef long NixInt; -typedef float NixFloat; +typedef int64_t NixInt; +typedef double NixFloat; /* External values must descend from ExternalValueBase, so that * type-agnostic nix functions (e.g. showType) can be implemented @@ -128,7 +128,7 @@ struct Value const char * path; Bindings * attrs; struct { - unsigned int size; + size_t size; Value * * elems; } bigList; Value * smallList[2]; @@ -166,7 +166,7 @@ struct Value return type == tList1 || type == tList2 ? smallList : bigList.elems; } - unsigned int listSize() const + size_t listSize() const { return type == tList1 ? 1 : type == tList2 ? 2 : bigList.size; } diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc index bcc05c2cdad..4c35a419959 100644 --- a/src/libmain/common-args.cc +++ b/src/libmain/common-args.cc @@ -29,14 +29,14 @@ MixCommonArgs::MixCommonArgs(const string & programName) .arity(2) .handler([](std::vector ss) { try { - settings.set(ss[0], ss[1]); + globalConfig.set(ss[0], ss[1]); } catch (UsageError & e) { warn(e.what()); } }); std::string cat = "config"; - settings.convertToArgs(*this, cat); + globalConfig.convertToArgs(*this, cat); // Backward compatibility hack: nix-env already had a --system flag. if (programName == "nix-env") longFlags.erase("system"); diff --git a/src/libmain/local.mk b/src/libmain/local.mk index f1fd3eb7242..0c80f5a0a03 100644 --- a/src/libmain/local.mk +++ b/src/libmain/local.mk @@ -8,7 +8,7 @@ libmain_SOURCES := $(wildcard $(d)/*.cc) libmain_LDFLAGS = $(OPENSSL_LIBS) -libmain_LIBS = libstore libutil libformat +libmain_LIBS = libstore libutil libmain_ALLOW_UNDEFINED = 1 diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 91a4eaf922a..4ed34e54dc5 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -109,7 +109,7 @@ void initNix() opensslLocks = std::vector(CRYPTO_num_locks()); CRYPTO_set_locking_callback(opensslLockCallback); - settings.loadConfFile(); + loadConfFile(); startSignalHandlerThread(); diff --git a/src/libmain/stack.cc b/src/libmain/stack.cc index cc0eea68fca..e6224de7d28 100644 --- a/src/libmain/stack.cc +++ b/src/libmain/stack.cc @@ -30,7 +30,7 @@ static void sigsegvHandler(int signo, siginfo_t * info, void * ctx) if (diff < 0) diff = -diff; if (diff < 4096) { char msg[] = "error: stack overflow (possible infinite recursion)\n"; - [[gnu::unused]] int res = write(2, msg, strlen(msg)); + [[gnu::unused]] auto res = write(2, msg, strlen(msg)); _exit(1); // maybe abort instead? } } @@ -63,7 +63,7 @@ void detectStackOverflow() act.sa_sigaction = sigsegvHandler; act.sa_flags = SA_SIGINFO | SA_ONSTACK; if (sigaction(SIGSEGV, &act, 0)) - throw SysError("resetting SIGCHLD"); + throw SysError("resetting SIGSEGV"); #endif } diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 2e9a13e564c..4527ee6ba66 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -54,17 +54,38 @@ void BinaryCacheStore::init() } } -std::shared_ptr BinaryCacheStore::getFile(const std::string & path) +void BinaryCacheStore::getFile(const std::string & path, + Callback> callback) +{ + try { + callback(getFile(path)); + } catch (...) { callback.rethrow(); } +} + +void BinaryCacheStore::getFile(const std::string & path, Sink & sink) { std::promise> promise; getFile(path, - [&](std::shared_ptr result) { - promise.set_value(result); - }, - [&](std::exception_ptr exc) { - promise.set_exception(exc); - }); - return promise.get_future().get(); + {[&](std::future> result) { + try { + promise.set_value(result.get()); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); + auto data = promise.get_future().get(); + sink((unsigned char *) data->data(), data->size()); +} + +std::shared_ptr BinaryCacheStore::getFile(const std::string & path) +{ + StringSink sink; + try { + getFile(path, sink); + } catch (NoSuchBinaryCacheFile &) { + return nullptr; + } + return sink.s; } Path BinaryCacheStore::narInfoFileFor(const Path & storePath) @@ -196,30 +217,30 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink) { auto info = queryPathInfo(storePath).cast(); - auto nar = getFile(info->url); - - if (!nar) throw Error(format("file '%s' missing from binary cache") % info->url); - - stats.narRead++; - stats.narReadCompressedBytes += nar->size(); - uint64_t narSize = 0; - StringSource source(*nar); - LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { sink(data, len); narSize += len; }); - decompress(info->compression, source, wrapperSink); + auto decompressor = makeDecompressionSink(info->compression, wrapperSink); + try { + getFile(info->url, *decompressor); + } catch (NoSuchBinaryCacheFile & e) { + throw SubstituteGone(e.what()); + } + + decompressor->finish(); + + stats.narRead++; + //stats.narReadCompressedBytes += nar->size(); // FIXME stats.narReadBytes += narSize; } void BinaryCacheStore::queryPathInfoUncached(const Path & storePath, - std::function)> success, - std::function failure) + Callback> callback) { auto uri = getUri(); auto act = std::make_shared(*logger, lvlTalkative, actQueryPathInfo, @@ -229,17 +250,22 @@ void BinaryCacheStore::queryPathInfoUncached(const Path & storePath, auto narInfoFile = narInfoFileFor(storePath); getFile(narInfoFile, - [=](std::shared_ptr data) { - if (!data) return success(0); + {[=](std::future> fut) { + try { + auto data = fut.get(); - stats.narInfoRead++; + if (!data) return callback(nullptr); - callSuccess(success, failure, (std::shared_ptr) - std::make_shared(*this, *data, narInfoFile)); + stats.narInfoRead++; - (void) act; // force Activity into this lambda to ensure it stays alive - }, - failure); + callback((std::shared_ptr) + std::make_shared(*this, *data, narInfoFile)); + + (void) act; // force Activity into this lambda to ensure it stays alive + } catch (...) { + callback.rethrow(); + } + }}); } Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath, diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh index e20b968442b..953f3b90af4 100644 --- a/src/libstore/binary-cache-store.hh +++ b/src/libstore/binary-cache-store.hh @@ -38,11 +38,16 @@ public: const std::string & data, const std::string & mimeType) = 0; - /* Return the contents of the specified file, or null if it - doesn't exist. */ + /* Note: subclasses must implement at least one of the two + following getFile() methods. */ + + /* Dump the contents of the specified file to a sink. */ + virtual void getFile(const std::string & path, Sink & sink); + + /* Fetch the specified file and call the specified callback with + the result. A subclass may implement this asynchronously. */ virtual void getFile(const std::string & path, - std::function)> success, - std::function failure) = 0; + Callback> callback); std::shared_ptr getFile(const std::string & path); @@ -67,25 +72,11 @@ public: bool isValidPathUncached(const Path & path) override; - PathSet queryAllValidPaths() override - { unsupported(); } - void queryPathInfoUncached(const Path & path, - std::function)> success, - std::function failure) override; - - void queryReferrers(const Path & path, - PathSet & referrers) override - { unsupported(); } - - PathSet queryDerivationOutputs(const Path & path) override - { unsupported(); } - - StringSet queryDerivationOutputNames(const Path & path) override - { unsupported(); } + Callback> callback) override; Path queryPathFromHashPart(const string & hashPart) override - { unsupported(); } + { unsupported("queryPathFromHashPart"); } bool wantMassQuery() override { return wantMassQuery_; } @@ -104,22 +95,10 @@ public: BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, BuildMode buildMode) override - { unsupported(); } + { unsupported("buildDerivation"); } void ensurePath(const Path & path) override - { unsupported(); } - - void addTempRoot(const Path & path) override - { unsupported(); } - - void addIndirectRoot(const Path & path) override - { unsupported(); } - - Roots findRoots() override - { unsupported(); } - - void collectGarbage(const GCOptions & options, GCResults & results) override - { unsupported(); } + { unsupported("ensurePath"); } ref getFSAccessor() override; @@ -131,4 +110,6 @@ public: }; +MakeError(NoSuchBinaryCacheFile, Error); + } diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 6108785447a..47ee8b48f4b 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -11,6 +11,7 @@ #include "compression.hh" #include "json.hh" #include "nar-info.hh" +#include "parsed-derivations.hh" #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include #include @@ -29,7 +31,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -672,8 +676,10 @@ HookInstance::HookInstance() toHook.readSide = -1; sink = FdSink(toHook.writeSide.get()); - for (auto & setting : settings.getSettings()) - sink << 1 << setting.first << setting.second; + std::map settings; + globalConfig.getSettings(settings); + for (auto & setting : settings) + sink << 1 << setting.first << setting.second.value; sink << 0; } @@ -731,11 +737,13 @@ class DerivationGoal : public Goal /* Whether to retry substituting the outputs after building the inputs. */ - bool retrySubstitution = false; + bool retrySubstitution; /* The derivation stored at drvPath. */ std::unique_ptr drv; + std::unique_ptr parsedDrv; + /* The remainder is state held during the build. */ /* Locks on the output paths. */ @@ -842,15 +850,15 @@ class DerivationGoal : public Goal BuildResult result; /* The current round, if we're building multiple times. */ - unsigned int curRound = 1; + size_t curRound = 1; - unsigned int nrRounds; + size_t nrRounds; /* Path registration info from the previous round, if we're building multiple times. Since this contains the hash, it allows us to compare whether two rounds produced the same result. */ - ValidPathInfos prevInfos; + std::map prevInfos; const uid_t sandboxUid = 1000; const gid_t sandboxGid = 100; @@ -931,6 +939,11 @@ class DerivationGoal : public Goal as valid. */ void registerOutputs(); + /* Check that an output meets the requirements specified by the + 'outputChecks' attribute (or the legacy + '{allowed,disallowed}{References,Requisites}' attributes). */ + void checkOutputs(const std::map & outputs); + /* Open a log file and a pipe to it. */ Path openLogFile(); @@ -1121,6 +1134,8 @@ void DerivationGoal::haveDerivation() { trace("have derivation"); + retrySubstitution = false; + for (auto & i : drv->outputs) worker.store.addTempRoot(i.second.path); @@ -1133,6 +1148,8 @@ void DerivationGoal::haveDerivation() return; } + parsedDrv = std::make_unique(drvPath, *drv); + /* We are first going to try to create the invalid output paths through substitutes. If that doesn't work, we'll build them. */ @@ -1159,7 +1176,7 @@ void DerivationGoal::outputsSubstituted() /* If the substitutes form an incomplete closure, then we should build the dependencies of this derivation, but after that, we can still use the substitutes for this derivation itself. */ - if (nrIncompleteClosure > 0 && !retrySubstitution) retrySubstitution = true; + if (nrIncompleteClosure > 0) retrySubstitution = true; nrFailed = nrNoSubstituters = nrIncompleteClosure = 0; @@ -1169,7 +1186,7 @@ void DerivationGoal::outputsSubstituted() return; } - unsigned int nrInvalid = checkPathValidity(false, buildMode == bmRepair).size(); + auto nrInvalid = checkPathValidity(false, buildMode == bmRepair).size(); if (buildMode == bmNormal && nrInvalid == 0) { done(BuildResult::Substituted); return; @@ -1389,7 +1406,7 @@ void DerivationGoal::tryToBuild() /* Don't do a remote build if the derivation has the attribute `preferLocalBuild' set. Also, check and repair modes are only supported for local builds. */ - bool buildLocally = buildMode != bmNormal || drv->willBuildLocally(); + bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally(); auto started = [&]() { auto msg = fmt( @@ -1635,19 +1652,13 @@ HookReply DerivationGoal::tryBuildHook() try { - /* Tell the hook about system features (beyond the system type) - required from the build machine. (The hook could parse the - drv file itself, but this is easier.) */ - Strings features = tokenizeString(get(drv->env, "requiredSystemFeatures")); - for (auto & i : features) checkStoreName(i); /* !!! abuse */ - /* Send the request to the hook. */ worker.hook->sink << "try" << (worker.getNrLocalBuilds() < settings.maxBuildJobs ? 1 : 0) << drv->platform << drvPath - << features; + << parsedDrv->getRequiredSystemFeatures(); worker.hook->sink.flush(); /* Read the first line of input, which should be a word indicating @@ -1773,35 +1784,40 @@ static std::once_flag dns_resolve_flag; static void preloadNSS() { /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already - been loaded in the parent. So we force a download of an invalid URL to force the NSS machinery to + been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to load its lookup libraries in the parent before any child gets a chance to. */ std::call_once(dns_resolve_flag, []() { - DownloadRequest request("http://this.pre-initializes.the.dns.resolvers.invalid"); - request.tries = 1; // We only need to do it once, and this also suppresses an annoying warning - try { getDownloader()->download(request); } catch (...) {} + struct addrinfo *res = NULL; + + if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", NULL, &res) != 0) { + if (res) freeaddrinfo(res); + } }); } void DerivationGoal::startBuilder() { /* Right platform? */ - if (!drv->canBuildLocally()) { - throw Error( - format("a '%1%' is required to build '%3%', but I am a '%2%'") - % drv->platform % settings.thisSystem % drvPath); - } + if (!parsedDrv->canBuildLocally()) + throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}", + drv->platform, + concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()), + drvPath, + settings.thisSystem, + concatStringsSep(", ", settings.systemFeatures)); if (drv->isBuiltin()) preloadNSS(); #if __APPLE__ - additionalSandboxProfile = get(drv->env, "__sandboxProfile"); + additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or(""); #endif /* Are we doing a chroot build? */ { + auto noChroot = parsedDrv->getBoolAttr("__noChroot"); if (settings.sandboxMode == smEnabled) { - if (get(drv->env, "__noChroot") == "1") + if (noChroot) throw Error(format("derivation '%1%' has '__noChroot' set, " "but that's not allowed when 'sandbox' is 'true'") % drvPath); #if __APPLE__ @@ -1814,7 +1830,7 @@ void DerivationGoal::startBuilder() else if (settings.sandboxMode == smDisabled) useChroot = false; else if (settings.sandboxMode == smRelaxed) - useChroot = !fixedOutput && get(drv->env, "__noChroot") != "1"; + useChroot = !fixedOutput && !noChroot; } if (worker.store.storeDir != worker.store.realStoreDir) { @@ -1865,7 +1881,7 @@ void DerivationGoal::startBuilder() writeStructuredAttrs(); /* Handle exportReferencesGraph(), if set. */ - if (!drv->env.count("__json")) { + if (!parsedDrv->getStructuredAttrs()) { /* The `exportReferencesGraph' feature allows the references graph to be passed to a builder. This attribute should be a list of pairs [name1 path1 name2 path2 ...]. The references graph of @@ -1930,7 +1946,7 @@ void DerivationGoal::startBuilder() PathSet allowedPaths = settings.allowedImpureHostPrefixes; /* This works like the above, except on a per-derivation level */ - Strings impurePaths = tokenizeString(get(drv->env, "__impureHostDeps")); + auto impurePaths = parsedDrv->getStringsAttr("__impureHostDeps").value_or(Strings()); for (auto & i : impurePaths) { bool found = false; @@ -1999,7 +2015,7 @@ void DerivationGoal::startBuilder() /* Create /etc/hosts with localhost entry. */ if (!fixedOutput) - writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n"); + writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n::1 localhost\n"); /* Make the closure of the inputs available in the chroot, rather than the whole Nix store. This prevents any access @@ -2298,7 +2314,7 @@ void DerivationGoal::initEnv() passAsFile is ignored in structure mode because it's not needed (attributes are not passed through the environment, so there is no size constraint). */ - if (!drv->env.count("__json")) { + if (!parsedDrv->getStructuredAttrs()) { StringSet passAsFile = tokenizeString(get(drv->env, "passAsFile")); int fileNr = 0; @@ -2345,8 +2361,8 @@ void DerivationGoal::initEnv() fixed-output derivations is by definition pure (since we already know the cryptographic hash of the output). */ if (fixedOutput) { - Strings varNames = tokenizeString(get(drv->env, "impureEnvVars")); - for (auto & i : varNames) env[i] = getEnv(i); + for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings())) + env[i] = getEnv(i); } /* Currently structured log messages piggyback on stderr, but we @@ -2361,111 +2377,103 @@ static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); void DerivationGoal::writeStructuredAttrs() { - auto jsonAttr = drv->env.find("__json"); - if (jsonAttr == drv->env.end()) return; - - try { - - auto jsonStr = rewriteStrings(jsonAttr->second, inputRewrites); + auto & structuredAttrs = parsedDrv->getStructuredAttrs(); + if (!structuredAttrs) return; - auto json = nlohmann::json::parse(jsonStr); + auto json = *structuredAttrs; - /* Add an "outputs" object containing the output paths. */ - nlohmann::json outputs; - for (auto & i : drv->outputs) - outputs[i.first] = rewriteStrings(i.second.path, inputRewrites); - json["outputs"] = outputs; - - /* Handle exportReferencesGraph. */ - auto e = json.find("exportReferencesGraph"); - if (e != json.end() && e->is_object()) { - for (auto i = e->begin(); i != e->end(); ++i) { - std::ostringstream str; - { - JSONPlaceholder jsonRoot(str, true); - PathSet storePaths; - for (auto & p : *i) - storePaths.insert(p.get()); - worker.store.pathInfoToJSON(jsonRoot, - exportReferences(storePaths), false, true); - } - json[i.key()] = nlohmann::json::parse(str.str()); // urgh + /* Add an "outputs" object containing the output paths. */ + nlohmann::json outputs; + for (auto & i : drv->outputs) + outputs[i.first] = rewriteStrings(i.second.path, inputRewrites); + json["outputs"] = outputs; + + /* Handle exportReferencesGraph. */ + auto e = json.find("exportReferencesGraph"); + if (e != json.end() && e->is_object()) { + for (auto i = e->begin(); i != e->end(); ++i) { + std::ostringstream str; + { + JSONPlaceholder jsonRoot(str, true); + PathSet storePaths; + for (auto & p : *i) + storePaths.insert(p.get()); + worker.store.pathInfoToJSON(jsonRoot, + exportReferences(storePaths), false, true); } + json[i.key()] = nlohmann::json::parse(str.str()); // urgh } + } - writeFile(tmpDir + "/.attrs.json", json.dump()); - - /* As a convenience to bash scripts, write a shell file that - maps all attributes that are representable in bash - - namely, strings, integers, nulls, Booleans, and arrays and - objects consisting entirely of those values. (So nested - arrays or objects are not supported.) */ + writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites)); - auto handleSimpleType = [](const nlohmann::json & value) -> std::experimental::optional { - if (value.is_string()) - return shellEscape(value); + /* As a convenience to bash scripts, write a shell file that + maps all attributes that are representable in bash - + namely, strings, integers, nulls, Booleans, and arrays and + objects consisting entirely of those values. (So nested + arrays or objects are not supported.) */ - if (value.is_number()) { - auto f = value.get(); - if (std::ceil(f) == f) - return std::to_string(value.get()); - } + auto handleSimpleType = [](const nlohmann::json & value) -> std::experimental::optional { + if (value.is_string()) + return shellEscape(value); - if (value.is_null()) - return std::string("''"); + if (value.is_number()) { + auto f = value.get(); + if (std::ceil(f) == f) + return std::to_string(value.get()); + } - if (value.is_boolean()) - return value.get() ? std::string("1") : std::string(""); + if (value.is_null()) + return std::string("''"); - return {}; - }; + if (value.is_boolean()) + return value.get() ? std::string("1") : std::string(""); - std::string jsonSh; + return {}; + }; - for (auto i = json.begin(); i != json.end(); ++i) { + std::string jsonSh; - if (!std::regex_match(i.key(), shVarName)) continue; + for (auto i = json.begin(); i != json.end(); ++i) { - auto & value = i.value(); + if (!std::regex_match(i.key(), shVarName)) continue; - auto s = handleSimpleType(value); - if (s) - jsonSh += fmt("declare %s=%s\n", i.key(), *s); + auto & value = i.value(); - else if (value.is_array()) { - std::string s2; - bool good = true; + auto s = handleSimpleType(value); + if (s) + jsonSh += fmt("declare %s=%s\n", i.key(), *s); - for (auto i = value.begin(); i != value.end(); ++i) { - auto s3 = handleSimpleType(i.value()); - if (!s3) { good = false; break; } - s2 += *s3; s2 += ' '; - } + else if (value.is_array()) { + std::string s2; + bool good = true; - if (good) - jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2); + for (auto i = value.begin(); i != value.end(); ++i) { + auto s3 = handleSimpleType(i.value()); + if (!s3) { good = false; break; } + s2 += *s3; s2 += ' '; } - else if (value.is_object()) { - std::string s2; - bool good = true; + if (good) + jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2); + } - for (auto i = value.begin(); i != value.end(); ++i) { - auto s3 = handleSimpleType(i.value()); - if (!s3) { good = false; break; } - s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3); - } + else if (value.is_object()) { + std::string s2; + bool good = true; - if (good) - jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2); + for (auto i = value.begin(); i != value.end(); ++i) { + auto s3 = handleSimpleType(i.value()); + if (!s3) { good = false; break; } + s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3); } - } - writeFile(tmpDir + "/.attrs.sh", jsonSh); - - } catch (std::exception & e) { - throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what()); + if (good) + jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2); + } } + + writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites)); } @@ -2620,7 +2628,7 @@ void DerivationGoal::runChild() createDirs(chrootRootDir + "/dev/shm"); createDirs(chrootRootDir + "/dev/pts"); ss.push_back("/dev/full"); - if (pathExists("/dev/kvm")) + if (settings.systemFeatures.get().count("kvm") && pathExists("/dev/kvm")) ss.push_back("/dev/kvm"); ss.push_back("/dev/null"); ss.push_back("/dev/random"); @@ -2909,7 +2917,7 @@ void DerivationGoal::runChild() writeFile(sandboxFile, sandboxProfile); - bool allowLocalNetworking = get(drv->env, "__darwinAllowLocalNetworking") == "1"; + bool allowLocalNetworking = parsedDrv->getBoolAttr("__darwinAllowLocalNetworking"); /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */ @@ -2981,10 +2989,9 @@ void DerivationGoal::runChild() /* Parse a list of reference specifiers. Each element must either be a store path, or the symbolic name of the output of the derivation (such as `out'). */ -PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, string attr) +PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, const Strings & paths) { PathSet result; - Paths paths = tokenizeString(attr); for (auto & i : paths) { if (store.isStorePath(i)) result.insert(i); @@ -3009,7 +3016,7 @@ void DerivationGoal::registerOutputs() if (allValid) return; } - ValidPathInfos infos; + std::map infos; /* Set of inodes seen during calls to canonicalisePathMetaData() for this build's outputs. This needs to be shared between @@ -3113,15 +3120,15 @@ void DerivationGoal::registerOutputs() the derivation to its content-addressed location. */ Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath); - Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]); + Path dest = worker.store.makeFixedOutputPath(recursive, h2, storePathToName(path)); if (h != h2) { /* Throw an error after registering the path as valid. */ delayedException = std::make_exception_ptr( - BuildError("fixed-output derivation produced path '%s' with %s hash '%s' instead of the expected hash '%s'", - dest, printHashType(h.type), printHash16or32(h2), printHash16or32(h))); + BuildError("hash mismatch in fixed-output derivation '%s':\n wanted: %s\n got: %s", + dest, h.to_string(), h2.to_string())); Path actualDest = worker.store.toRealPath(dest); @@ -3194,48 +3201,6 @@ void DerivationGoal::registerOutputs() debug(format("referenced input: '%1%'") % i); } - /* Enforce `allowedReferences' and friends. */ - auto checkRefs = [&](const string & attrName, bool allowed, bool recursive) { - if (drv->env.find(attrName) == drv->env.end()) return; - - PathSet spec = parseReferenceSpecifiers(worker.store, *drv, get(drv->env, attrName)); - - PathSet used; - if (recursive) { - /* Our requisites are the union of the closures of our references. */ - for (auto & i : references) - /* Don't call computeFSClosure on ourselves. */ - if (path != i) - worker.store.computeFSClosure(i, used); - } else - used = references; - - PathSet badPaths; - - for (auto & i : used) - if (allowed) { - if (spec.find(i) == spec.end()) - badPaths.insert(i); - } else { - if (spec.find(i) != spec.end()) - badPaths.insert(i); - } - - if (!badPaths.empty()) { - string badPathsStr; - for (auto & i : badPaths) { - badPathsStr += "\n\t"; - badPathsStr += i; - } - throw BuildError(format("output '%1%' is not allowed to refer to the following paths:%2%") % actualPath % badPathsStr); - } - }; - - checkRefs("allowedReferences", true, false); - checkRefs("allowedRequisites", true, true); - checkRefs("disallowedReferences", false, false); - checkRefs("disallowedRequisites", false, true); - if (curRound == nrRounds) { worker.store.optimisePath(actualPath); // FIXME: combine with scanForReferences() worker.markContentsGood(path); @@ -3249,11 +3214,16 @@ void DerivationGoal::registerOutputs() info.ultimate = true; worker.store.signPathInfo(info); - infos.push_back(info); + if (!info.references.empty()) info.ca.clear(); + + infos[i.first] = info; } if (buildMode == bmCheck) return; + /* Apply output checks. */ + checkOutputs(infos); + /* Compare the result with the previous round, and report which path is different, if any.*/ if (curRound > 1 && prevInfos != infos) { @@ -3261,16 +3231,16 @@ void DerivationGoal::registerOutputs() for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j) if (!(*i == *j)) { result.isNonDeterministic = true; - Path prev = i->path + checkSuffix; + Path prev = i->second.path + checkSuffix; bool prevExists = keepPreviousRound && pathExists(prev); auto msg = prevExists - ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->path, drvPath, prev) - : fmt("output '%1%' of '%2%' differs from previous round", i->path, drvPath); + ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->second.path, drvPath, prev) + : fmt("output '%1%' of '%2%' differs from previous round", i->second.path, drvPath); auto diffHook = settings.diffHook; if (prevExists && diffHook != "" && runDiffHook) { try { - auto diff = runProgram(diffHook, true, {prev, i->path}); + auto diff = runProgram(diffHook, true, {prev, i->second.path}); if (diff != "") printError(chomp(diff)); } catch (Error & error) { @@ -3315,7 +3285,11 @@ void DerivationGoal::registerOutputs() /* Register each output path as valid, and register the sets of paths referenced by each of them. If there are cycles in the outputs, this will fail. */ - worker.store.registerValidPaths(infos); + { + ValidPathInfos infos2; + for (auto & i : infos) infos2.push_back(i.second); + worker.store.registerValidPaths(infos2); + } /* In case of a fixed-output derivation hash mismatch, throw an exception now that we have registered the output as valid. */ @@ -3324,6 +3298,158 @@ void DerivationGoal::registerOutputs() } +void DerivationGoal::checkOutputs(const std::map & outputs) +{ + std::map outputsByPath; + for (auto & output : outputs) + outputsByPath.emplace(output.second.path, output.second); + + for (auto & output : outputs) { + auto & outputName = output.first; + auto & info = output.second; + + struct Checks + { + bool ignoreSelfRefs = false; + std::experimental::optional maxSize, maxClosureSize; + std::experimental::optional allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites; + }; + + /* Compute the closure and closure size of some output. This + is slightly tricky because some of its references (namely + other outputs) may not be valid yet. */ + auto getClosure = [&](const Path & path) + { + uint64_t closureSize = 0; + PathSet pathsDone; + std::queue pathsLeft; + pathsLeft.push(path); + + while (!pathsLeft.empty()) { + auto path = pathsLeft.front(); + pathsLeft.pop(); + if (!pathsDone.insert(path).second) continue; + + auto i = outputsByPath.find(path); + if (i != outputsByPath.end()) { + closureSize += i->second.narSize; + for (auto & ref : i->second.references) + pathsLeft.push(ref); + } else { + auto info = worker.store.queryPathInfo(path); + closureSize += info->narSize; + for (auto & ref : info->references) + pathsLeft.push(ref); + } + } + + return std::make_pair(pathsDone, closureSize); + }; + + auto applyChecks = [&](const Checks & checks) + { + if (checks.maxSize && info.narSize > *checks.maxSize) + throw BuildError("path '%s' is too large at %d bytes; limit is %d bytes", + info.path, info.narSize, *checks.maxSize); + + if (checks.maxClosureSize) { + uint64_t closureSize = getClosure(info.path).second; + if (closureSize > *checks.maxClosureSize) + throw BuildError("closure of path '%s' is too large at %d bytes; limit is %d bytes", + info.path, closureSize, *checks.maxClosureSize); + } + + auto checkRefs = [&](const std::experimental::optional & value, bool allowed, bool recursive) + { + if (!value) return; + + PathSet spec = parseReferenceSpecifiers(worker.store, *drv, *value); + + PathSet used = recursive ? getClosure(info.path).first : info.references; + + if (recursive && checks.ignoreSelfRefs) + used.erase(info.path); + + PathSet badPaths; + + for (auto & i : used) + if (allowed) { + if (!spec.count(i)) + badPaths.insert(i); + } else { + if (spec.count(i)) + badPaths.insert(i); + } + + if (!badPaths.empty()) { + string badPathsStr; + for (auto & i : badPaths) { + badPathsStr += "\n "; + badPathsStr += i; + } + throw BuildError("output '%s' is not allowed to refer to the following paths:%s", info.path, badPathsStr); + } + }; + + checkRefs(checks.allowedReferences, true, false); + checkRefs(checks.allowedRequisites, true, true); + checkRefs(checks.disallowedReferences, false, false); + checkRefs(checks.disallowedRequisites, false, true); + }; + + if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) { + auto outputChecks = structuredAttrs->find("outputChecks"); + if (outputChecks != structuredAttrs->end()) { + auto output = outputChecks->find(outputName); + + if (output != outputChecks->end()) { + Checks checks; + + auto maxSize = output->find("maxSize"); + if (maxSize != output->end()) + checks.maxSize = maxSize->get(); + + auto maxClosureSize = output->find("maxClosureSize"); + if (maxClosureSize != output->end()) + checks.maxClosureSize = maxClosureSize->get(); + + auto get = [&](const std::string & name) -> std::experimental::optional { + auto i = output->find(name); + if (i != output->end()) { + Strings res; + for (auto j = i->begin(); j != i->end(); ++j) { + if (!j->is_string()) + throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath); + res.push_back(j->get()); + } + checks.disallowedRequisites = res; + return res; + } + return {}; + }; + + checks.allowedReferences = get("allowedReferences"); + checks.allowedRequisites = get("allowedRequisites"); + checks.disallowedReferences = get("disallowedReferences"); + checks.disallowedRequisites = get("disallowedRequisites"); + + applyChecks(checks); + } + } + } else { + // legacy non-structured-attributes case + Checks checks; + checks.ignoreSelfRefs = true; + checks.allowedReferences = parsedDrv->getStringsAttr("allowedReferences"); + checks.allowedRequisites = parsedDrv->getStringsAttr("allowedRequisites"); + checks.disallowedReferences = parsedDrv->getStringsAttr("disallowedReferences"); + checks.disallowedRequisites = parsedDrv->getStringsAttr("disallowedRequisites"); + applyChecks(checks); + } + } +} + + Path DerivationGoal::openLogFile() { logSize = 0; @@ -3520,8 +3646,8 @@ class SubstitutionGoal : public Goal /* The current substituter. */ std::shared_ptr sub; - /* Whether any substituter can realise this path. */ - bool hasSubstitute; + /* Whether a substituter failed. */ + bool substituterFailed = false; /* Path info returned by the substituter's query info operation. */ std::shared_ptr info; @@ -3585,7 +3711,6 @@ class SubstitutionGoal : public Goal SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker, RepairFlag repair) : Goal(worker) - , hasSubstitute(false) , repair(repair) { this->storePath = storePath; @@ -3649,9 +3774,9 @@ void SubstitutionGoal::tryNext() /* Hack: don't indicate failure if there were no substituters. In that case the calling derivation should just do a build. */ - amDone(hasSubstitute ? ecFailed : ecNoSubstituters); + amDone(substituterFailed ? ecFailed : ecNoSubstituters); - if (hasSubstitute) { + if (substituterFailed) { worker.failedSubstitutions++; worker.updateProgress(); } @@ -3673,6 +3798,19 @@ void SubstitutionGoal::tryNext() } catch (InvalidPath &) { tryNext(); return; + } catch (SubstituterDisabled &) { + if (settings.tryFallback) { + tryNext(); + return; + } + throw; + } catch (Error & e) { + if (settings.tryFallback) { + printError(e.what()); + tryNext(); + return; + } + throw; } /* Update the total expected download size. */ @@ -3687,8 +3825,6 @@ void SubstitutionGoal::tryNext() worker.updateProgress(); - hasSubstitute = true; - /* Bail out early if this substituter lacks a valid signature. LocalStore::addToStore() also checks for this, but only after we've downloaded the path. */ @@ -3803,8 +3939,19 @@ void SubstitutionGoal::finished() state = &SubstitutionGoal::init; worker.waitForAWhile(shared_from_this()); return; - } catch (Error & e) { - printError(e.msg()); + } catch (std::exception & e) { + printError(e.what()); + + /* Cause the parent build to fail unless --fallback is given, + or the substitute has disappeared. The latter case behaves + the same as the substitute never having existed in the + first place. */ + try { + throw; + } catch (SubstituteGone &) { + } catch (...) { + substituterFailed = true; + } /* Try the next substitute. */ state = &SubstitutionGoal::tryNext; diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index 4ca4a838e3c..92aec63a037 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -22,52 +22,56 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData) return i->second; }; - auto fetch = [&](const string & url) { - /* No need to do TLS verification, because we check the hash of - the result anyway. */ - DownloadRequest request(url); - request.verifyTLS = false; - request.decompress = false; + Path storePath = getAttr("out"); + auto mainUrl = getAttr("url"); + bool unpack = get(drv.env, "unpack", "") == "1"; - /* Note: have to use a fresh downloader here because we're in - a forked process. */ - auto data = makeDownloader()->download(request); - assert(data.data); + /* Note: have to use a fresh downloader here because we're in + a forked process. */ + auto downloader = makeDownloader(); - return data.data; - }; + auto fetch = [&](const std::string & url) { + + auto source = sinkToSource([&](Sink & sink) { + + /* No need to do TLS verification, because we check the hash of + the result anyway. */ + DownloadRequest request(url); + request.verifyTLS = false; + request.decompress = false; + + auto decompressor = makeDecompressionSink( + unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink); + downloader->download(std::move(request), *decompressor); + decompressor->finish(); + }); + + if (unpack) + restorePath(storePath, *source); + else + writeFile(storePath, *source); - std::shared_ptr data; + auto executable = drv.env.find("executable"); + if (executable != drv.env.end() && executable->second == "1") { + if (chmod(storePath.c_str(), 0755) == -1) + throw SysError(format("making '%1%' executable") % storePath); + } + }; + /* Try the hashed mirrors first. */ if (getAttr("outputHashMode") == "flat") for (auto hashedMirror : settings.hashedMirrors.get()) try { if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/'; auto ht = parseHashType(getAttr("outputHashAlgo")); - data = fetch(hashedMirror + printHashType(ht) + "/" + Hash(getAttr("outputHash"), ht).to_string(Base16, false)); - break; + fetch(hashedMirror + printHashType(ht) + "/" + Hash(getAttr("outputHash"), ht).to_string(Base16, false)); + return; } catch (Error & e) { debug(e.what()); } - if (!data) data = fetch(getAttr("url")); - - Path storePath = getAttr("out"); - - auto unpack = drv.env.find("unpack"); - if (unpack != drv.env.end() && unpack->second == "1") { - if (string(*data, 0, 6) == string("\xfd" "7zXZ\0", 6)) - data = decompress("xz", *data); - StringSource source(*data); - restorePath(storePath, source); - } else - writeFile(storePath, *data); - - auto executable = drv.env.find("executable"); - if (executable != drv.env.end() && executable->second == "1") { - if (chmod(storePath.c_str(), 0755) == -1) - throw SysError(format("making '%1%' executable") % storePath); - } + /* Otherwise try the specified URL. */ + fetch(mainUrl); } } diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 74b861281ee..3961126fff9 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -36,12 +36,6 @@ Path BasicDerivation::findOutput(const string & id) const } -bool BasicDerivation::willBuildLocally() const -{ - return get(env, "preferLocalBuild") == "1" && canBuildLocally(); -} - - bool BasicDerivation::substitutesAllowed() const { return get(env, "allowSubstitutes", "1") == "1"; @@ -54,14 +48,6 @@ bool BasicDerivation::isBuiltin() const } -bool BasicDerivation::canBuildLocally() const -{ - return platform == settings.thisSystem - || settings.extraPlatforms.get().count(platform) > 0 - || isBuiltin(); -} - - Path writeDerivation(ref store, const Derivation & drv, const string & name, RepairFlag repair) { @@ -342,7 +328,7 @@ Hash hashDerivationModulo(Store & store, Derivation drv) Hash h = drvHashes[i.first]; if (!h) { assert(store.isValidPath(i.first)); - Derivation drv2 = readDerivation(i.first); + Derivation drv2 = readDerivation(store.toRealPath(i.first)); h = hashDerivationModulo(store, drv2); drvHashes[i.first] = h; } diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh index 7b97730d3bf..9753e796db5 100644 --- a/src/libstore/derivations.hh +++ b/src/libstore/derivations.hh @@ -56,14 +56,10 @@ struct BasicDerivation the given derivation. */ Path findOutput(const string & id) const; - bool willBuildLocally() const; - bool substitutesAllowed() const; bool isBuiltin() const; - bool canBuildLocally() const; - /* Return true iff this is a fixed-output derivation. */ bool isFixedOutput() const; diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 18f9094f82e..22382ab1d6e 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -7,6 +7,7 @@ #include "s3.hh" #include "compression.hh" #include "pathlocks.hh" +#include "finally.hh" #ifdef ENABLE_S3 #include @@ -29,12 +30,25 @@ using namespace std::string_literals; namespace nix { -double getTime() +struct DownloadSettings : Config { - struct timeval tv; - gettimeofday(&tv, 0); - return tv.tv_sec + (tv.tv_usec / 1000000.0); -} + Setting enableHttp2{this, true, "http2", + "Whether to enable HTTP/2 support."}; + + Setting userAgentSuffix{this, "", "user-agent-suffix", + "String appended to the user agent in HTTP requests."}; + + Setting httpConnections{this, 25, "http-connections", + "Number of parallel HTTP connections.", + {"binary-caches-parallel-connections"}}; + + Setting connectTimeout{this, 0, "connect-timeout", + "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."}; +}; + +static DownloadSettings downloadSettings; + +static GlobalConfig::Register r1(&downloadSettings); std::string resolveUri(const std::string & uri) { @@ -44,16 +58,6 @@ std::string resolveUri(const std::string & uri) return uri; } -ref decodeContent(const std::string & encoding, ref data) -{ - if (encoding == "") - return data; - else if (encoding == "br") - return decompress(encoding, *data); - else - throw Error("unsupported Content-Encoding '%s'", encoding); -} - struct CurlDownloader : public Downloader { CURLM * curlm = 0; @@ -61,8 +65,6 @@ struct CurlDownloader : public Downloader std::random_device rd; std::mt19937 mt19937; - bool enableHttp2; - struct DownloadItem : public std::enable_shared_from_this { CurlDownloader & downloader; @@ -70,8 +72,7 @@ struct CurlDownloader : public Downloader DownloadResult result; Activity act; bool done = false; // whether either the success or failure function has been called - std::function success; - std::function failure; + Callback callback; CURL * req = 0; bool active = false; // whether the handle has been added to the multi object std::string status; @@ -86,10 +87,21 @@ struct CurlDownloader : public Downloader std::string encoding; - DownloadItem(CurlDownloader & downloader, const DownloadRequest & request) + DownloadItem(CurlDownloader & downloader, + const DownloadRequest & request, + Callback callback) : downloader(downloader) , request(request) - , act(*logger, lvlTalkative, actDownload, fmt("downloading '%s'", request.uri), {request.uri}, request.parentAct) + , act(*logger, lvlTalkative, actDownload, + fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri), + {request.uri}, request.parentAct) + , callback(callback) + , finalSink([this](const unsigned char * data, size_t len) { + if (this->request.dataCallback) + this->request.dataCallback((char *) data, len); + else + this->result.data->append((char *) data, len); + }) { if (!request.expectedETag.empty()) requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str()); @@ -113,19 +125,40 @@ struct CurlDownloader : public Downloader } } - template - void fail(const T & e) + void failEx(std::exception_ptr ex) { assert(!done); done = true; - callFailure(failure, std::make_exception_ptr(e)); + callback.rethrow(ex); } + template + void fail(const T & e) + { + failEx(std::make_exception_ptr(e)); + } + + LambdaSink finalSink; + std::shared_ptr decompressionSink; + + std::exception_ptr writeException; + size_t writeCallback(void * contents, size_t size, size_t nmemb) { - size_t realSize = size * nmemb; - result.data->append((char *) contents, realSize); - return realSize; + try { + size_t realSize = size * nmemb; + result.bodySize += realSize; + + if (!decompressionSink) + decompressionSink = makeDecompressionSink(encoding, finalSink); + + (*decompressionSink)((unsigned char *) contents, realSize); + + return realSize; + } catch (...) { + writeException = std::current_exception(); + return 0; + } } static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp) @@ -143,6 +176,7 @@ struct CurlDownloader : public Downloader auto ss = tokenizeString>(line, " "); status = ss.size() >= 2 ? ss[1] : ""; result.data = std::make_shared(); + result.bodySize = 0; encoding = ""; } else { auto i = line.find(':'); @@ -194,7 +228,7 @@ struct CurlDownloader : public Downloader } size_t readOffset = 0; - int readCallback(char *buffer, size_t size, size_t nitems) + size_t readCallback(char *buffer, size_t size, size_t nitems) { if (readOffset == request.data->length()) return 0; @@ -205,7 +239,7 @@ struct CurlDownloader : public Downloader return count; } - static int readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp) + static size_t readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp) { return ((DownloadItem *) userp)->readCallback(buffer, size, nitems); } @@ -225,15 +259,16 @@ struct CurlDownloader : public Downloader curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str()); curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(req, CURLOPT_MAXREDIRS, 10); curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(req, CURLOPT_USERAGENT, ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + - (settings.userAgentSuffix != "" ? " " + settings.userAgentSuffix.get() : "")).c_str()); + (downloadSettings.userAgentSuffix != "" ? " " + downloadSettings.userAgentSuffix.get() : "")).c_str()); #if LIBCURL_VERSION_NUM >= 0x072b00 curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1); #endif #if LIBCURL_VERSION_NUM >= 0x072f00 - if (downloader.enableHttp2) + if (downloadSettings.enableHttp2) curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS); #endif curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, DownloadItem::writeCallbackWrapper); @@ -265,7 +300,7 @@ struct CurlDownloader : public Downloader curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0); } - curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, settings.connectTimeout.get()); + curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, downloadSettings.connectTimeout.get()); curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L); curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, lowSpeedTimeout); @@ -276,6 +311,7 @@ struct CurlDownloader : public Downloader curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL); result.data = std::make_shared(); + result.bodySize = 0; } void finish(CURLcode code) @@ -288,34 +324,40 @@ struct CurlDownloader : public Downloader if (effectiveUrlCStr) result.effectiveUrl = effectiveUrlCStr; - debug(format("finished download of '%s'; curl status = %d, HTTP status = %d, body = %d bytes") - % request.uri % code % httpStatus % (result.data ? result.data->size() : 0)); + debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes", + request.verb(), request.uri, code, httpStatus, result.bodySize); + + if (decompressionSink) + decompressionSink->finish(); if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) { code = CURLE_OK; httpStatus = 304; } - if (code == CURLE_OK && + if (writeException) + failEx(writeException); + + else if (code == CURLE_OK && (httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */)) { result.cached = httpStatus == 304; done = true; try { - if (request.decompress) - result.data = decodeContent(encoding, ref(result.data)); - callSuccess(success, failure, const_cast(result)); - act.progress(result.data->size(), result.data->size()); + act.progress(result.bodySize, result.bodySize); + callback(std::move(result)); } catch (...) { done = true; - callFailure(failure, std::current_exception()); + callback.rethrow(); } - } else { + } + + else { // We treat most errors as transient, but won't retry when hopeless Error err = Transient; - if (httpStatus == 404 || code == CURLE_FILE_COULDNT_READ_FILE) { + if (httpStatus == 404 || httpStatus == 410 || code == CURLE_FILE_COULDNT_READ_FILE) { // The file is definitely not there err = NotFound; } else if (httpStatus == 401 || httpStatus == 403 || httpStatus == 407) { @@ -345,6 +387,8 @@ struct CurlDownloader : public Downloader case CURLE_INTERFACE_FAILED: case CURLE_UNKNOWN_OPTION: case CURLE_SSL_CACERT_BADFILE: + case CURLE_TOO_MANY_REDIRECTS: + case CURLE_WRITE_ERROR: err = Misc; break; default: // Shut up warnings @@ -356,10 +400,16 @@ struct CurlDownloader : public Downloader auto exc = code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted - ? DownloadError(Interrupted, format("download of '%s' was interrupted") % request.uri) + ? DownloadError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri)) : httpStatus != 0 - ? DownloadError(err, format("unable to download '%s': HTTP error %d (curl error: %s)") % request.uri % httpStatus % curl_easy_strerror(code)) - : DownloadError(err, format("unable to download '%s': %s (%d)") % request.uri % curl_easy_strerror(code) % code); + ? DownloadError(err, + fmt("unable to %s '%s': HTTP error %d", + request.verb(), request.uri, httpStatus) + + (code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code))) + ) + : DownloadError(err, + fmt("unable to %s '%s': %s (%d)", + request.verb(), request.uri, curl_easy_strerror(code), code)); /* If this is a transient error, then maybe retry the download after a while. */ @@ -408,11 +458,9 @@ struct CurlDownloader : public Downloader #endif #if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0 curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS, - settings.binaryCachesParallelConnections.get()); + downloadSettings.httpConnections.get()); #endif - enableHttp2 = settings.enableHttp2; - wakeupPipe.create(); fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK); @@ -480,10 +528,11 @@ struct CurlDownloader : public Downloader extraFDs[0].fd = wakeupPipe.readSide.get(); extraFDs[0].events = CURL_WAIT_POLLIN; extraFDs[0].revents = 0; + long maxSleepTimeMs = items.empty() ? 10000 : 100; auto sleepTimeMs = nextWakeup != std::chrono::steady_clock::time_point() ? std::max(0, (int) std::chrono::duration_cast(nextWakeup - std::chrono::steady_clock::now()).count()) - : 10000; + : maxSleepTimeMs; vomit("download thread waiting for %d ms", sleepTimeMs); mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds); if (mc != CURLM_OK) @@ -522,7 +571,7 @@ struct CurlDownloader : public Downloader } for (auto & item : incoming) { - debug(format("starting download of %s") % item->request.uri); + debug("starting %s of %s", item->request.verb(), item->request.uri); item->init(); curl_multi_add_handle(curlm, item->req); item->active = true; @@ -551,6 +600,11 @@ struct CurlDownloader : public Downloader void enqueueItem(std::shared_ptr item) { + if (item->request.data + && !hasPrefix(item->request.uri, "http://") + && !hasPrefix(item->request.uri, "https://")) + throw nix::Error("uploading to '%s' is not supported", item->request.uri); + { auto state(state_.lock()); if (state->quit) @@ -560,48 +614,61 @@ struct CurlDownloader : public Downloader writeFull(wakeupPipe.writeSide.get(), " "); } +#ifdef ENABLE_S3 + std::tuple parseS3Uri(std::string uri) + { + auto [path, params] = splitUriAndParams(uri); + + auto slash = path.find('/', 5); // 5 is the length of "s3://" prefix + if (slash == std::string::npos) + throw nix::Error("bad S3 URI '%s'", path); + + std::string bucketName(path, 5, slash - 5); + std::string key(path, slash + 1); + + return {bucketName, key, params}; + } +#endif + void enqueueDownload(const DownloadRequest & request, - std::function success, - std::function failure) override + Callback callback) override { /* Ugly hack to support s3:// URIs. */ if (hasPrefix(request.uri, "s3://")) { // FIXME: do this on a worker thread - sync2async(success, failure, [&]() -> DownloadResult { + try { #ifdef ENABLE_S3 - S3Helper s3Helper("", Aws::Region::US_EAST_1); // FIXME: make configurable - auto slash = request.uri.find('/', 5); - if (slash == std::string::npos) - throw nix::Error("bad S3 URI '%s'", request.uri); - std::string bucketName(request.uri, 5, slash - 5); - std::string key(request.uri, slash + 1); + auto [bucketName, key, params] = parseS3Uri(request.uri); + + std::string profile = get(params, "profile", ""); + std::string region = get(params, "region", Aws::Region::US_EAST_1); + std::string scheme = get(params, "scheme", ""); + std::string endpoint = get(params, "endpoint", ""); + + S3Helper s3Helper(profile, region, scheme, endpoint); + // FIXME: implement ETag auto s3Res = s3Helper.getObject(bucketName, key); DownloadResult res; if (!s3Res.data) throw DownloadError(NotFound, fmt("S3 object '%s' does not exist", request.uri)); res.data = s3Res.data; - return res; + callback(std::move(res)); #else throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri); #endif - }); + } catch (...) { callback.rethrow(); } return; } - auto item = std::make_shared(*this, request); - item->success = success; - item->failure = failure; - enqueueItem(item); + enqueueItem(std::make_shared(*this, request, callback)); } }; ref getDownloader() { - static std::shared_ptr downloader; - static std::once_flag downloaderCreated; - std::call_once(downloaderCreated, [&]() { downloader = makeDownloader(); }); - return ref(downloader); + static ref downloader = makeDownloader(); + return downloader; } ref makeDownloader() @@ -613,8 +680,13 @@ std::future Downloader::enqueueDownload(const DownloadRequest & { auto promise = std::make_shared>(); enqueueDownload(request, - [promise](const DownloadResult & result) { promise->set_value(result); }, - [promise](std::exception_ptr exc) { promise->set_exception(exc); }); + {[promise](std::future fut) { + try { + promise->set_value(fut.get()); + } catch (...) { + promise->set_exception(std::current_exception()); + } + }}); return promise->get_future(); } @@ -623,7 +695,102 @@ DownloadResult Downloader::download(const DownloadRequest & request) return enqueueDownload(request).get(); } -Path Downloader::downloadCached(ref store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl) +void Downloader::download(DownloadRequest && request, Sink & sink) +{ + /* Note: we can't call 'sink' via request.dataCallback, because + that would cause the sink to execute on the downloader + thread. If 'sink' is a coroutine, this will fail. Also, if the + sink is expensive (e.g. one that does decompression and writing + to the Nix store), it would stall the download thread too much. + Therefore we use a buffer to communicate data between the + download thread and the calling thread. */ + + struct State { + bool quit = false; + std::exception_ptr exc; + std::string data; + std::condition_variable avail, request; + }; + + auto _state = std::make_shared>(); + + /* In case of an exception, wake up the download thread. FIXME: + abort the download request. */ + Finally finally([&]() { + auto state(_state->lock()); + state->quit = true; + state->request.notify_one(); + }); + + request.dataCallback = [_state](char * buf, size_t len) { + + auto state(_state->lock()); + + if (state->quit) return; + + /* If the buffer is full, then go to sleep until the calling + thread wakes us up (i.e. when it has removed data from the + buffer). We don't wait forever to prevent stalling the + download thread. (Hopefully sleeping will throttle the + sender.) */ + if (state->data.size() > 1024 * 1024) { + debug("download buffer is full; going to sleep"); + state.wait_for(state->request, std::chrono::seconds(10)); + } + + /* Append data to the buffer and wake up the calling + thread. */ + state->data.append(buf, len); + state->avail.notify_one(); + }; + + enqueueDownload(request, + {[_state](std::future fut) { + auto state(_state->lock()); + state->quit = true; + try { + fut.get(); + } catch (...) { + state->exc = std::current_exception(); + } + state->avail.notify_one(); + state->request.notify_one(); + }}); + + while (true) { + checkInterrupt(); + + std::string chunk; + + /* Grab data if available, otherwise wait for the download + thread to wake us up. */ + { + auto state(_state->lock()); + + while (state->data.empty()) { + + if (state->quit) { + if (state->exc) std::rethrow_exception(state->exc); + return; + } + + state.wait(state->avail); + } + + chunk = std::move(state->data); + + state->request.notify_one(); + } + + /* Flush the data to the sink and wake up the download thread + if it's blocked on a full buffer. We don't hold the state + lock while doing this to prevent blocking the download + thread if sink() takes a long time. */ + sink((unsigned char *) chunk.data(), chunk.size()); + } +} + +Path Downloader::downloadCached(ref store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl, int ttl) { auto url = resolveUri(url_); @@ -653,7 +820,6 @@ Path Downloader::downloadCached(ref store, const string & url_, bool unpa string expectedETag; - int ttl = settings.tarballTtl; bool skip = false; if (pathExists(fileLink) && pathExists(dataFile)) { @@ -734,8 +900,8 @@ Path Downloader::downloadCached(ref store, const string & url_, bool unpa Hash gotHash = unpack ? hashPath(expectedHash.type, store->toRealPath(storePath)).first : hashFile(expectedHash.type, store->toRealPath(storePath)); - throw nix::Error("hash mismatch in file downloaded from '%s': got hash '%s' instead of the expected hash '%s'", - url, gotHash.to_string(), expectedHash.to_string()); + throw nix::Error("hash mismatch in file downloaded from '%s':\n wanted: %s\n got: %s", + url, expectedHash.to_string(), gotHash.to_string()); } return store->toRealPath(storePath); diff --git a/src/libstore/download.hh b/src/libstore/download.hh index 0b8d29b21df..f0228f7d053 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -2,6 +2,7 @@ #include "types.hh" #include "hash.hh" +#include "globals.hh" #include #include @@ -20,9 +21,15 @@ struct DownloadRequest bool decompress = true; std::shared_ptr data; std::string mimeType; + std::function dataCallback; DownloadRequest(const std::string & uri) : uri(uri), parentAct(getCurActivity()) { } + + std::string verb() + { + return data ? "upload" : "download"; + } }; struct DownloadResult @@ -31,6 +38,7 @@ struct DownloadResult std::string etag; std::string effectiveUrl; std::shared_ptr data; + uint64_t bodySize = 0; }; class Store; @@ -41,20 +49,23 @@ struct Downloader the download. The future may throw a DownloadError exception. */ virtual void enqueueDownload(const DownloadRequest & request, - std::function success, - std::function failure) = 0; + Callback callback) = 0; std::future enqueueDownload(const DownloadRequest & request); /* Synchronously download a file. */ DownloadResult download(const DownloadRequest & request); + /* Download a file, writing its data to a sink. The sink will be + invoked on the thread of the caller. */ + void download(DownloadRequest && request, Sink & sink); + /* Check if the specified file is already in ~/.cache/nix/tarballs and is more recent than ‘tarball-ttl’ seconds. Otherwise, use the recorded ETag to verify if the server has a more recent version, and if so, download it to the Nix store. */ Path downloadCached(ref store, const string & uri, bool unpack, string name = "", - const Hash & expectedHash = Hash(), string * effectiveUri = nullptr); + const Hash & expectedHash = Hash(), string * effectiveUri = nullptr, int ttl = settings.tarballTtl); enum Error { NotFound, Forbidden, Misc, Transient, Interrupted }; }; @@ -77,7 +88,4 @@ public: bool isUri(const string & s); -/* Decode data according to the Content-Encoding header. */ -ref decodeContent(const std::string & encoding, ref data); - } diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index ba49749d830..b415d542147 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -365,7 +366,7 @@ static void readProcLink(const string & file, StringSet & paths) char buf[bufsiz]; auto res = readlink(file.c_str(), buf, bufsiz); if (res == -1) { - if (errno == ENOENT || errno == EACCES) + if (errno == ENOENT || errno == EACCES || errno == ESRCH) return; throw SysError("reading symlink"); } @@ -425,25 +426,28 @@ PathSet LocalStore::findRuntimeRoots() readProcLink((format("%1%/%2%") % fdStr % fd_ent->d_name).str(), paths); } } - if (errno) + if (errno) { + if (errno == ESRCH) + continue; throw SysError(format("iterating /proc/%1%/fd") % ent->d_name); - fdDir.reset(); - - auto mapLines = - tokenizeString>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n"); - for (const auto& line : mapLines) { - auto match = std::smatch{}; - if (std::regex_match(line, match, mapRegex)) - paths.emplace(match[1]); } + fdDir.reset(); try { + auto mapLines = + tokenizeString>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n"); + for (const auto& line : mapLines) { + auto match = std::smatch{}; + if (std::regex_match(line, match, mapRegex)) + paths.emplace(match[1]); + } + auto envString = readFile((format("/proc/%1%/environ") % ent->d_name).str(), true); auto env_end = std::sregex_iterator{}; for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i) paths.emplace(i->str()); } catch (SysError & e) { - if (errno == ENOENT || errno == EACCES) + if (errno == ENOENT || errno == EACCES || errno == ESRCH) continue; throw; } @@ -829,7 +833,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) alphabetically first (e.g. /nix/store/000...). This matters when using --max-freed etc. */ vector entries_(entries.begin(), entries.end()); - random_shuffle(entries_.begin(), entries_.end()); + std::mt19937 gen(1); + std::shuffle(entries_.begin(), entries_.end(), gen); for (auto & i : entries_) tryToDelete(state, i); diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 544566e0b57..1c2c08715a1 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -28,9 +28,10 @@ namespace nix { Settings settings; +static GlobalConfig::Register r1(&settings); + Settings::Settings() - : Config({}) - , nixPrefix(NIX_PREFIX) + : nixPrefix(NIX_PREFIX) , nixStore(canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)))) , nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR))) , nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR))) @@ -69,25 +70,39 @@ Settings::Settings() allowedImpureHostPrefixes = tokenizeString(DEFAULT_ALLOWED_IMPURE_PREFIXES); } -void Settings::loadConfFile() +void loadConfFile() { - applyConfigFile(nixConfDir + "/nix.conf"); + globalConfig.applyConfigFile(settings.nixConfDir + "/nix.conf"); /* We only want to send overrides to the daemon, i.e. stuff from ~/.nix/nix.conf or the command line. */ - resetOverriden(); + globalConfig.resetOverriden(); - applyConfigFile(getConfigDir() + "/nix/nix.conf"); + auto dirs = getConfigDirs(); + // Iterate over them in reverse so that the ones appearing first in the path take priority + for (auto dir = dirs.rbegin(); dir != dirs.rend(); dir++) { + globalConfig.applyConfigFile(*dir + "/nix/nix.conf"); + } } -void Settings::set(const string & name, const string & value) +unsigned int Settings::getDefaultCores() { - Config::set(name, value); + return std::max(1U, std::thread::hardware_concurrency()); } -unsigned int Settings::getDefaultCores() +StringSet Settings::getDefaultSystemFeatures() { - return std::max(1U, std::thread::hardware_concurrency()); + /* For backwards compatibility, accept some "features" that are + used in Nixpkgs to route builds to certain machines but don't + actually require anything special on the machines. */ + StringSet features{"nixos-test", "benchmark", "big-parallel"}; + + #if __linux__ + if (access("/dev/kvm", R_OK | W_OK) == 0) + features.insert("kvm"); + #endif + + return features; } const string nixVersion = PACKAGE_VERSION; @@ -162,23 +177,11 @@ void initPlugins() throw Error("could not dynamically open plugin file '%s': %s", file, dlerror()); } } - /* We handle settings registrations here, since plugins can add settings */ - if (RegisterSetting::settingRegistrations) { - for (auto & registration : *RegisterSetting::settingRegistrations) - settings.addSetting(registration); - delete RegisterSetting::settingRegistrations; - } - settings.handleUnknownSettings(); -} -RegisterSetting::SettingRegistrations * RegisterSetting::settingRegistrations; - -RegisterSetting::RegisterSetting(AbstractSetting * s) -{ - if (!settingRegistrations) - settingRegistrations = new SettingRegistrations; - settingRegistrations->emplace_back(s); + /* Since plugins can add settings, try to re-apply previously + unknown settings. */ + globalConfig.reapplyUnknownSettings(); + globalConfig.warnUnknownSettings(); } - } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 9360096aae8..53efc6a90fb 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -13,26 +13,6 @@ namespace nix { typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode; -extern bool useCaseHack; // FIXME - -struct CaseHackSetting : public BaseSetting -{ - CaseHackSetting(Config * options, - const std::string & name, - const std::string & description, - const std::set & aliases = {}) - : BaseSetting(useCaseHack, name, description, aliases) - { - options->addSetting(this); - } - - void set(const std::string & str) override - { - BaseSetting::set(str); - nix::useCaseHack = value; - } -}; - struct MaxBuildJobsSetting : public BaseSetting { MaxBuildJobsSetting(Config * options, @@ -52,14 +32,12 @@ class Settings : public Config { unsigned int getDefaultCores(); + StringSet getDefaultSystemFeatures(); + public: Settings(); - void loadConfFile(); - - void set(const string & name, const string & value); - Path nixPrefix; /* The directory where we store sources and derived files. */ @@ -104,9 +82,9 @@ public: /* Whether to show build log output in real time. */ bool verboseBuild = true; - /* If verboseBuild is false, the number of lines of the tail of - the log to show if a build fails. */ - size_t logLines = 10; + Setting logLines{this, 10, "log-lines", + "If verbose-build is false, the number of lines of the tail of " + "the log to show if a build fails."}; MaxBuildJobsSetting maxBuildJobs{this, 1, "max-jobs", "Maximum number of parallel build jobs. \"auto\" means use number of cores.", @@ -217,10 +195,13 @@ public: Setting showTrace{this, false, "show-trace", "Whether to show a stack trace on evaluation errors."}; - Setting enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation", - "Whether builtin functions that allow executing native code should be enabled."}; - - Setting sandboxMode{this, smDisabled, "sandbox", + Setting sandboxMode{this, + #if __linux__ + smEnabled + #else + smDisabled + #endif + , "sandbox", "Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".", {"build-use-chroot", "build-use-sandbox"}}; @@ -232,13 +213,6 @@ public: "Additional paths to make available inside the build sandbox.", {"build-extra-chroot-dirs", "build-extra-sandbox-paths"}}; - Setting restrictEval{this, false, "restrict-eval", - "Whether to restrict file system access to paths in $NIX_PATH, " - "and network access to the URI prefixes listed in 'allowed-uris'."}; - - Setting pureEval{this, false, "pure-eval", - "Whether to restrict file system and network access to files specified by cryptographic hash."}; - Setting buildRepeat{this, 0, "repeat", "The number of times to repeat a build in order to verify determinism.", {"build-repeat"}}; @@ -280,13 +254,6 @@ public: Setting secretKeyFiles{this, {}, "secret-key-files", "Secret keys with which to sign local builds."}; - Setting binaryCachesParallelConnections{this, 25, "http-connections", - "Number of parallel HTTP connections.", - {"binary-caches-parallel-connections"}}; - - Setting enableHttp2{this, true, "http2", - "Whether to enable HTTP/2 support."}; - Setting tarballTtl{this, 60 * 60, "tarball-ttl", "How soon to expire files fetched by builtins.fetchTarball and builtins.fetchurl."}; @@ -302,6 +269,10 @@ public: "These may be supported natively (e.g. armv7 on some aarch64 CPUs " "or using hacks like qemu-user."}; + Setting systemFeatures{this, getDefaultSystemFeatures(), + "system-features", + "Optional features that this system implements (like \"kvm\")."}; + Setting substituters{this, nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(), "substituters", @@ -350,18 +321,6 @@ public: /* Path to the SSL CA file used */ Path caFile; - Setting enableImportFromDerivation{this, true, "allow-import-from-derivation", - "Whether the evaluator allows importing the result of a derivation."}; - - CaseHackSetting useCaseHack{this, "use-case-hack", - "Whether to enable a Darwin-specific hack for dealing with file name collisions."}; - - Setting connectTimeout{this, 0, "connect-timeout", - "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."}; - - Setting userAgentSuffix{this, "", "user-agent-suffix", - "String appended to the user agent in HTTP requests."}; - #if __linux__ Setting filterSyscalls{this, true, "filter-syscalls", "Whether to prevent certain dangerous system calls, such as " @@ -383,9 +342,6 @@ public: Setting maxFree{this, std::numeric_limits::max(), "max-free", "Stop deleting garbage when free disk space is above the specified amount."}; - Setting allowedUris{this, {}, "allowed-uris", - "Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."}; - Setting pluginFiles{this, {}, "plugin-files", "Plugins to dynamically load at nix initialization time."}; }; @@ -398,15 +354,8 @@ extern Settings settings; anything else */ void initPlugins(); +void loadConfFile(); extern const string nixVersion; -struct RegisterSetting -{ - typedef std::vector SettingRegistrations; - static SettingRegistrations * settingRegistrations; - RegisterSetting(AbstractSetting * s); -}; - - } diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index b9e9cd5daba..8da0e2f9d82 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -13,6 +13,14 @@ class HttpBinaryCacheStore : public BinaryCacheStore Path cacheUri; + struct State + { + bool enabled = true; + std::chrono::steady_clock::time_point disabledUntil; + }; + + Sync _state; + public: HttpBinaryCacheStore( @@ -46,8 +54,33 @@ class HttpBinaryCacheStore : public BinaryCacheStore protected: + void maybeDisable() + { + auto state(_state.lock()); + if (state->enabled && settings.tryFallback) { + int t = 60; + printError("disabling binary cache '%s' for %s seconds", getUri(), t); + state->enabled = false; + state->disabledUntil = std::chrono::steady_clock::now() + std::chrono::seconds(t); + } + } + + void checkEnabled() + { + auto state(_state.lock()); + if (state->enabled) return; + if (std::chrono::steady_clock::now() > state->disabledUntil) { + state->enabled = true; + debug("re-enabling binary cache '%s'", getUri()); + return; + } + throw SubstituterDisabled("substituter '%s' is disabled", getUri()); + } + bool fileExists(const std::string & path) override { + checkEnabled(); + try { DownloadRequest request(cacheUri + "/" + path); request.head = true; @@ -59,6 +92,7 @@ class HttpBinaryCacheStore : public BinaryCacheStore bucket is unlistable, so treat 403 as 404. */ if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) return false; + maybeDisable(); throw; } } @@ -73,32 +107,51 @@ class HttpBinaryCacheStore : public BinaryCacheStore try { getDownloader()->download(req); } catch (DownloadError & e) { - throw UploadToHTTP(format("uploading to HTTP binary cache at %1% not supported: %2%") % cacheUri % e.msg()); + throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s", cacheUri, e.msg()); } } - void getFile(const std::string & path, - std::function)> success, - std::function failure) override + DownloadRequest makeRequest(const std::string & path) { DownloadRequest request(cacheUri + "/" + path); request.tries = 8; + return request; + } + + void getFile(const std::string & path, Sink & sink) override + { + checkEnabled(); + auto request(makeRequest(path)); + try { + getDownloader()->download(std::move(request), sink); + } catch (DownloadError & e) { + if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) + throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri()); + maybeDisable(); + throw; + } + } + + void getFile(const std::string & path, + Callback> callback) override + { + checkEnabled(); + + auto request(makeRequest(path)); getDownloader()->enqueueDownload(request, - [success](const DownloadResult & result) { - success(result.data); - }, - [success, failure](std::exception_ptr exc) { + {[callback, this](std::future result) { try { - std::rethrow_exception(exc); + callback(result.get().data); } catch (DownloadError & e) { if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) - return success(0); - failure(exc); + return callback(std::shared_ptr()); + maybeDisable(); + callback.rethrow(); } catch (...) { - failure(exc); + callback.rethrow(); } - }); + }}); } }; diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index 5dee25308f7..7c9bc2b68ba 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -17,6 +17,7 @@ struct LegacySSHStore : public Store const Setting sshKey{this, "", "ssh-key", "path to an SSH private key"}; const Setting compress{this, false, "compress", "whether to compress the connection"}; const Setting remoteProgram{this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"}; + const Setting remoteStore{this, "", "remote-store", "URI of the store on the remote system"}; // Hack for getting remote build log output. const Setting logFD{this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"}; @@ -27,6 +28,7 @@ struct LegacySSHStore : public Store FdSink to; FdSource from; int remoteVersion; + bool good = true; }; std::string host; @@ -41,7 +43,7 @@ struct LegacySSHStore : public Store , connections(make_ref>( std::max(1, (int) maxConnections), [this]() { return openConnection(); }, - [](const ref & r) { return true; } + [](const ref & r) { return r->good; } )) , master( host, @@ -56,7 +58,9 @@ struct LegacySSHStore : public Store ref openConnection() { auto conn = make_ref(); - conn->sshConn = master.startCommand(fmt("%s --serve --write", remoteProgram)); + conn->sshConn = master.startCommand( + fmt("%s --serve --write", remoteProgram) + + (remoteStore.get() == "" ? "" : " --store " + shellEscape(remoteStore.get()))); conn->to = FdSink(conn->sshConn->in.get()); conn->from = FdSource(conn->sshConn->out.get()); @@ -84,10 +88,9 @@ struct LegacySSHStore : public Store } void queryPathInfoUncached(const Path & path, - std::function)> success, - std::function failure) override + Callback> callback) override { - sync2async>(success, failure, [&]() -> std::shared_ptr { + try { auto conn(connections->get()); debug("querying remote host '%s' for info on '%s'", host, path); @@ -97,7 +100,7 @@ struct LegacySSHStore : public Store auto info = std::make_shared(); conn->from >> info->path; - if (info->path.empty()) return nullptr; + if (info->path.empty()) return callback(nullptr); assert(path == info->path); PathSet references; @@ -116,8 +119,8 @@ struct LegacySSHStore : public Store auto s = readString(conn->from); assert(s == ""); - return info; - }); + callback(std::move(info)); + } catch (...) { callback.rethrow(); } } void addToStore(const ValidPathInfo & info, Source & source, @@ -128,18 +131,48 @@ struct LegacySSHStore : public Store auto conn(connections->get()); - conn->to - << cmdImportPaths - << 1; - copyNAR(source, conn->to); - conn->to - << exportMagic - << info.path - << info.references - << info.deriver - << 0 - << 0; - conn->to.flush(); + if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) { + + conn->to + << cmdAddToStoreNar + << info.path + << info.deriver + << info.narHash.to_string(Base16, false) + << info.references + << info.registrationTime + << info.narSize + << info.ultimate + << info.sigs + << info.ca; + try { + copyNAR(source, conn->to); + } catch (...) { + conn->good = false; + throw; + } + conn->to.flush(); + + } else { + + conn->to + << cmdImportPaths + << 1; + try { + copyNAR(source, conn->to); + } catch (...) { + conn->good = false; + throw; + } + conn->to + << exportMagic + << info.path + << info.references + << info.deriver + << 0 + << 0; + conn->to.flush(); + + } if (readInt(conn->from) != 1) throw Error("failed to add path '%s' to remote host '%s', info.path, host"); @@ -154,28 +187,17 @@ struct LegacySSHStore : public Store copyNAR(conn->from, sink); } - PathSet queryAllValidPaths() override { unsupported(); } - - void queryReferrers(const Path & path, PathSet & referrers) override - { unsupported(); } - - PathSet queryDerivationOutputs(const Path & path) override - { unsupported(); } - - StringSet queryDerivationOutputNames(const Path & path) override - { unsupported(); } - Path queryPathFromHashPart(const string & hashPart) override - { unsupported(); } + { unsupported("queryPathFromHashPart"); } Path addToStore(const string & name, const Path & srcPath, bool recursive, HashType hashAlgo, PathFilter & filter, RepairFlag repair) override - { unsupported(); } + { unsupported("addToStore"); } Path addTextToStore(const string & name, const string & s, const PathSet & references, RepairFlag repair) override - { unsupported(); } + { unsupported("addTextToStore"); } BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv, BuildMode buildMode) override @@ -209,25 +231,7 @@ struct LegacySSHStore : public Store } void ensurePath(const Path & path) override - { unsupported(); } - - void addTempRoot(const Path & path) override - { unsupported(); } - - void addIndirectRoot(const Path & path) override - { unsupported(); } - - Roots findRoots() override - { unsupported(); } - - void collectGarbage(const GCOptions & options, GCResults & results) override - { unsupported(); } - - ref getFSAccessor() override - { unsupported(); } - - void addSignatures(const Path & storePath, const StringSet & sigs) override - { unsupported(); } + { unsupported("ensurePath"); } void computeFSClosure(const PathSet & paths, PathSet & out, bool flipDirection = false, @@ -270,6 +274,12 @@ struct LegacySSHStore : public Store { auto conn(connections->get()); } + + unsigned int getProtocol() override + { + auto conn(connections->get()); + return conn->remoteVersion; + } }; static RegisterStoreImplementation regStore([]( diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc index 2577e90aef2..b7001795be4 100644 --- a/src/libstore/local-binary-cache-store.cc +++ b/src/libstore/local-binary-cache-store.cc @@ -34,18 +34,14 @@ class LocalBinaryCacheStore : public BinaryCacheStore const std::string & data, const std::string & mimeType) override; - void getFile(const std::string & path, - std::function)> success, - std::function failure) override + void getFile(const std::string & path, Sink & sink) override { - sync2async>(success, failure, [&]() { - try { - return std::make_shared(readFile(binaryCacheDir + "/" + path)); - } catch (SysError & e) { - if (e.errNo == ENOENT) return std::shared_ptr(); - throw; - } - }); + try { + readFile(binaryCacheDir + "/" + path, sink); + } catch (SysError & e) { + if (e.errNo == ENOENT) + throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path); + } } PathSet queryAllValidPaths() override diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index b63584f28a3..485fdd69193 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -450,7 +450,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe ssize_t eaSize = llistxattr(path.c_str(), nullptr, 0); if (eaSize < 0) { - if (errno != ENOTSUP) + if (errno != ENOTSUP && errno != ENODATA) throw SysError("querying extended attributes of '%s'", path); } else if (eaSize > 0) { std::vector eaBuf(eaSize); @@ -581,7 +581,8 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & uint64_t LocalStore::addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs) { - assert(info.ca == "" || info.isContentAddressed(*this)); + if (info.ca != "" && !info.isContentAddressed(*this)) + throw Error("cannot add path '%s' to the Nix store because it claims to be content-addressed but isn't", info.path); state.stmtRegisterValidPath.use() (info.path) @@ -628,17 +629,15 @@ uint64_t LocalStore::addValidPath(State & state, void LocalStore::queryPathInfoUncached(const Path & path, - std::function)> success, - std::function failure) + Callback> callback) { - sync2async>(success, failure, [&]() { - + try { auto info = std::make_shared(); info->path = path; assertStorePath(path); - return retrySQLite>([&]() { + callback(retrySQLite>([&]() { auto state(_state.lock()); /* Get the path info. */ @@ -678,8 +677,9 @@ void LocalStore::queryPathInfoUncached(const Path & path, info->references.insert(useQueryReferences.getStr(0)); return info; - }); - }); + })); + + } catch (...) { callback.rethrow(); } } @@ -880,6 +880,12 @@ void LocalStore::querySubstitutablePathInfos(const PathSet & paths, narInfo ? narInfo->fileSize : 0, info->narSize}; } catch (InvalidPath) { + } catch (SubstituterDisabled) { + } catch (Error & e) { + if (settings.tryFallback) + printError(e.what()); + else + throw; } } } @@ -975,7 +981,8 @@ const PublicKeys & LocalStore::getPublicKeys() void LocalStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr accessor) { - assert(info.narHash); + if (!info.narHash) + throw Error("cannot add path '%s' because it lacks a hash", info.path); if (requireSigs && checkSigs && !info.checkSignatures(*this, getPublicKeys())) throw Error("cannot add path '%s' because it lacks a valid signature", info.path); @@ -1013,11 +1020,11 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, auto hashResult = hashSink.finish(); if (hashResult.first != info.narHash) - throw Error("hash mismatch importing path '%s'; expected hash '%s', got '%s'", + throw Error("hash mismatch importing path '%s';\n wanted: %s\n got: %s", info.path, info.narHash.to_string(), hashResult.first.to_string()); if (hashResult.second != info.narSize) - throw Error("size mismatch importing path '%s'; expected %s, got %s", + throw Error("size mismatch importing path '%s';\n wanted: %s\n got: %s", info.path, info.narSize, hashResult.second); autoGC(); @@ -1331,6 +1338,12 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store, } +unsigned int LocalStore::getProtocol() +{ + return PROTOCOL_VERSION; +} + + #if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL) static void makeMutable(const Path & path) diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 1209a06356f..fce963433a5 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -127,8 +127,7 @@ public: PathSet queryAllValidPaths() override; void queryPathInfoUncached(const Path & path, - std::function)> success, - std::function failure) override; + Callback> callback) override; void queryReferrers(const Path & path, PathSet & referrers) override; @@ -210,6 +209,8 @@ public: void registerValidPaths(const ValidPathInfos & infos); + unsigned int getProtocol() override; + void vacuumDB(); /* Repair the contents of the given path by redownloading it using diff --git a/src/libstore/local.mk b/src/libstore/local.mk index a7279aa3939..89fc918c30f 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -6,7 +6,7 @@ libstore_DIR := $(d) libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc) -libstore_LIBS = libutil libformat +libstore_LIBS = libutil libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread ifneq ($(OS), FreeBSD) @@ -18,7 +18,7 @@ libstore_FILES = sandbox-defaults.sb sandbox-minimal.sb sandbox-network.sb $(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox))) ifeq ($(ENABLE_S3), 1) - libstore_LDFLAGS += -laws-cpp-sdk-s3 -laws-cpp-sdk-core + libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core endif ifeq ($(OS), SunOS) diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index a82aa4e9cfa..adcce026fa1 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -33,9 +33,11 @@ void Store::computeFSClosure(const PathSet & startPaths, state->pending++; } - queryPathInfo(path, - [&, path](ref info) { - // FIXME: calls to isValidPath() should be async + queryPathInfo(path, {[&, path](std::future> fut) { + // FIXME: calls to isValidPath() should be async + + try { + auto info = fut.get(); if (flipDirection) { @@ -75,14 +77,13 @@ void Store::computeFSClosure(const PathSet & startPaths, if (!--state->pending) done.notify_one(); } - }, - - [&, path](std::exception_ptr exc) { + } catch (...) { auto state(state_.lock()); - if (!state->exc) state->exc = exc; + if (!state->exc) state->exc = std::current_exception(); assert(state->pending); if (!--state->pending) done.notify_one(); - }); + }; + }}); }; for (auto & startPath : startPaths) diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 35403e5df56..32ad7f2b27f 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -31,6 +31,7 @@ create table if not exists NARs ( refs text, deriver text, sigs text, + ca text, timestamp integer not null, present integer not null, primary key (cache, hashPart), @@ -72,7 +73,7 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache { auto state(_state.lock()); - Path dbPath = getCacheDir() + "/nix/binary-cache-v5.sqlite"; + Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite"; createDirs(dirOf(dbPath)); state->db = SQLite(dbPath); @@ -94,13 +95,13 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache state->insertNAR.create(state->db, "insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, " - "narSize, refs, deriver, sigs, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)"); + "narSize, refs, deriver, sigs, ca, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)"); state->insertMissingNAR.create(state->db, "insert or replace into NARs(cache, hashPart, timestamp, present) values (?, ?, ?, 0)"); state->queryNAR.create(state->db, - "select * from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))"); + "select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))"); /* Periodically purge expired entries from the database. */ retrySQLite([&]() { @@ -189,27 +190,28 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache if (!queryNAR.next()) return {oUnknown, 0}; - if (!queryNAR.getInt(13)) + if (!queryNAR.getInt(0)) return {oInvalid, 0}; auto narInfo = make_ref(); - auto namePart = queryNAR.getStr(2); + auto namePart = queryNAR.getStr(1); narInfo->path = cache.storeDir + "/" + hashPart + (namePart.empty() ? "" : "-" + namePart); - narInfo->url = queryNAR.getStr(3); - narInfo->compression = queryNAR.getStr(4); - if (!queryNAR.isNull(5)) - narInfo->fileHash = Hash(queryNAR.getStr(5)); - narInfo->fileSize = queryNAR.getInt(6); - narInfo->narHash = Hash(queryNAR.getStr(7)); - narInfo->narSize = queryNAR.getInt(8); - for (auto & r : tokenizeString(queryNAR.getStr(9), " ")) + narInfo->url = queryNAR.getStr(2); + narInfo->compression = queryNAR.getStr(3); + if (!queryNAR.isNull(4)) + narInfo->fileHash = Hash(queryNAR.getStr(4)); + narInfo->fileSize = queryNAR.getInt(5); + narInfo->narHash = Hash(queryNAR.getStr(6)); + narInfo->narSize = queryNAR.getInt(7); + for (auto & r : tokenizeString(queryNAR.getStr(8), " ")) narInfo->references.insert(cache.storeDir + "/" + r); - if (!queryNAR.isNull(10)) - narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(10); - for (auto & sig : tokenizeString(queryNAR.getStr(11), " ")) + if (!queryNAR.isNull(9)) + narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(9); + for (auto & sig : tokenizeString(queryNAR.getStr(10), " ")) narInfo->sigs.insert(sig); + narInfo->ca = queryNAR.getStr(11); return {oValid, narInfo}; }); @@ -243,6 +245,7 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache (concatStringsSep(" ", info->shortRefs())) (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "") (concatStringsSep(" ", info->sigs)) + (info->ca) (time(0)).exec(); } else { diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 7840167d777..991512f2179 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -104,8 +104,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats, *.app/Contents/Resources/\*.lproj seem to be the only paths affected. See https://github.com/NixOS/nix/issues/1443 for more discussion. */ - if (std::regex_search(path, std::regex("\\.app/Contents/PkgInfo$")) || - std::regex_search(path, std::regex("\\.app/Contents/Resources/.+\\.lproj$"))) + if (std::regex_search(path, std::regex("\\.app/Contents/.+$"))) { debug(format("'%1%' is not allowed to be linked in macOS") % path); return; diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc new file mode 100644 index 00000000000..dc328648273 --- /dev/null +++ b/src/libstore/parsed-derivations.cc @@ -0,0 +1,111 @@ +#include "parsed-derivations.hh" + +namespace nix { + +ParsedDerivation::ParsedDerivation(const Path & drvPath, BasicDerivation & drv) + : drvPath(drvPath), drv(drv) +{ + /* Parse the __json attribute, if any. */ + auto jsonAttr = drv.env.find("__json"); + if (jsonAttr != drv.env.end()) { + try { + structuredAttrs = nlohmann::json::parse(jsonAttr->second); + } catch (std::exception & e) { + throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what()); + } + } +} + +std::experimental::optional ParsedDerivation::getStringAttr(const std::string & name) const +{ + if (structuredAttrs) { + auto i = structuredAttrs->find(name); + if (i == structuredAttrs->end()) + return {}; + else { + if (!i->is_string()) + throw Error("attribute '%s' of derivation '%s' must be a string", name, drvPath); + return i->get(); + } + } else { + auto i = drv.env.find(name); + if (i == drv.env.end()) + return {}; + else + return i->second; + } +} + +bool ParsedDerivation::getBoolAttr(const std::string & name, bool def) const +{ + if (structuredAttrs) { + auto i = structuredAttrs->find(name); + if (i == structuredAttrs->end()) + return def; + else { + if (!i->is_boolean()) + throw Error("attribute '%s' of derivation '%s' must be a Boolean", name, drvPath); + return i->get(); + } + } else { + auto i = drv.env.find(name); + if (i == drv.env.end()) + return def; + else + return i->second == "1"; + } +} + +std::experimental::optional ParsedDerivation::getStringsAttr(const std::string & name) const +{ + if (structuredAttrs) { + auto i = structuredAttrs->find(name); + if (i == structuredAttrs->end()) + return {}; + else { + if (!i->is_array()) + throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath); + Strings res; + for (auto j = i->begin(); j != i->end(); ++j) { + if (!j->is_string()) + throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath); + res.push_back(j->get()); + } + return res; + } + } else { + auto i = drv.env.find(name); + if (i == drv.env.end()) + return {}; + else + return tokenizeString(i->second); + } +} + +StringSet ParsedDerivation::getRequiredSystemFeatures() const +{ + StringSet res; + for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings())) + res.insert(i); + return res; +} + +bool ParsedDerivation::canBuildLocally() const +{ + if (drv.platform != settings.thisSystem.get() + && !settings.extraPlatforms.get().count(drv.platform) + && !drv.isBuiltin()) + return false; + + for (auto & feature : getRequiredSystemFeatures()) + if (!settings.systemFeatures.get().count(feature)) return false; + + return true; +} + +bool ParsedDerivation::willBuildLocally() const +{ + return getBoolAttr("preferLocalBuild") && canBuildLocally(); +} + +} diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh new file mode 100644 index 00000000000..0a82c146172 --- /dev/null +++ b/src/libstore/parsed-derivations.hh @@ -0,0 +1,35 @@ +#include "derivations.hh" + +#include + +namespace nix { + +class ParsedDerivation +{ + Path drvPath; + BasicDerivation & drv; + std::experimental::optional structuredAttrs; + +public: + + ParsedDerivation(const Path & drvPath, BasicDerivation & drv); + + const std::experimental::optional & getStructuredAttrs() const + { + return structuredAttrs; + } + + std::experimental::optional getStringAttr(const std::string & name) const; + + bool getBoolAttr(const std::string & name, bool def = false) const; + + std::experimental::optional getStringsAttr(const std::string & name) const; + + StringSet getRequiredSystemFeatures() const; + + bool canBuildLocally() const; + + bool willBuildLocally() const; +}; + +} diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index 4a607b58450..4c6af567ae6 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -157,6 +157,29 @@ void deleteGenerations(const Path & profile, const std::set & gens } } +void deleteGenerationsGreaterThan(const Path & profile, int max, bool dryRun) +{ + PathLocks lock; + lockProfile(lock, profile); + + int curGen; + bool fromCurGen = false; + Generations gens = findGenerations(profile, curGen); + for (auto i = gens.rbegin(); i != gens.rend(); ++i) { + if (i->number == curGen) { + fromCurGen = true; + max--; + continue; + } + if (fromCurGen) { + if (max) { + max--; + continue; + } + deleteGeneration2(profile, i->number, dryRun); + } + } +} void deleteOldGenerations(const Path & profile, bool dryRun) { diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh index 1d4e6d3037d..5fa1533de31 100644 --- a/src/libstore/profiles.hh +++ b/src/libstore/profiles.hh @@ -39,6 +39,8 @@ void deleteGeneration(const Path & profile, unsigned int gen); void deleteGenerations(const Path & profile, const std::set & gensToDelete, bool dryRun); +void deleteGenerationsGreaterThan(const Path & profile, const int max, bool dryRun); + void deleteOldGenerations(const Path & profile, bool dryRun); void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun); diff --git a/src/libstore/references.cc b/src/libstore/references.cc index ba9f18b9ca5..5b7eb1f846a 100644 --- a/src/libstore/references.cc +++ b/src/libstore/references.cc @@ -13,7 +13,7 @@ namespace nix { static unsigned int refLength = 32; /* characters */ -static void search(const unsigned char * s, unsigned int len, +static void search(const unsigned char * s, size_t len, StringSet & hashes, StringSet & seen) { static bool initialised = false; @@ -25,7 +25,7 @@ static void search(const unsigned char * s, unsigned int len, initialised = true; } - for (unsigned int i = 0; i + refLength <= len; ) { + for (size_t i = 0; i + refLength <= len; ) { int j; bool match = true; for (j = refLength - 1; j >= 0; --j) @@ -73,7 +73,7 @@ void RefScanSink::operator () (const unsigned char * data, size_t len) search(data, len, hashes, seen); - unsigned int tailLen = len <= refLength ? len : refLength; + size_t tailLen = len <= refLength ? len : refLength; tail = string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) + string((const char *) data + len - tailLen, tailLen); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 080cef93d21..def140cfbe1 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -7,6 +7,7 @@ #include "globals.hh" #include "derivations.hh" #include "pool.hh" +#include "finally.hh" #include #include @@ -160,7 +161,8 @@ void RemoteStore::initConnection(Connection & conn) if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11) conn.to << false; - conn.processStderr(); + auto ex = conn.processStderr(); + if (ex) std::rethrow_exception(ex); } catch (Error & e) { throw Error("cannot open connection to remote store '%s': %s", getUri(), e.what()); @@ -187,28 +189,75 @@ void RemoteStore::setOptions(Connection & conn) << settings.useSubstitutes; if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) { - auto overrides = settings.getSettings(true); + std::map overrides; + globalConfig.getSettings(overrides, true); conn.to << overrides.size(); for (auto & i : overrides) - conn.to << i.first << i.second; + conn.to << i.first << i.second.value; } - conn.processStderr(); + auto ex = conn.processStderr(); + if (ex) std::rethrow_exception(ex); +} + + +/* A wrapper around Pool::Handle that marks + the connection as bad (causing it to be closed) if a non-daemon + exception is thrown before the handle is closed. Such an exception + causes a deviation from the expected protocol and therefore a + desynchronization between the client and daemon. */ +struct ConnectionHandle +{ + Pool::Handle handle; + bool daemonException = false; + + ConnectionHandle(Pool::Handle && handle) + : handle(std::move(handle)) + { } + + ConnectionHandle(ConnectionHandle && h) + : handle(std::move(h.handle)) + { } + + ~ConnectionHandle() + { + if (!daemonException && std::uncaught_exception()) { + handle.markBad(); + debug("closing daemon connection because of an exception"); + } + } + + RemoteStore::Connection * operator -> () { return &*handle; } + + void processStderr(Sink * sink = 0, Source * source = 0) + { + auto ex = handle->processStderr(sink, source); + if (ex) { + daemonException = true; + std::rethrow_exception(ex); + } + } +}; + + +ConnectionHandle RemoteStore::getConnection() +{ + return ConnectionHandle(connections->get()); } bool RemoteStore::isValidPathUncached(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopIsValidPath << path; - conn->processStderr(); + conn.processStderr(); return readInt(conn->from); } PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubstitute) { - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { PathSet res; for (auto & i : paths) @@ -216,7 +265,7 @@ PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybe return res; } else { conn->to << wopQueryValidPaths << paths; - conn->processStderr(); + conn.processStderr(); return readStorePaths(*this, conn->from); } } @@ -224,27 +273,27 @@ PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybe PathSet RemoteStore::queryAllValidPaths() { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryAllValidPaths; - conn->processStderr(); + conn.processStderr(); return readStorePaths(*this, conn->from); } PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths) { - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { PathSet res; for (auto & i : paths) { conn->to << wopHasSubstitutes << i; - conn->processStderr(); + conn.processStderr(); if (readInt(conn->from)) res.insert(i); } return res; } else { conn->to << wopQuerySubstitutablePaths << paths; - conn->processStderr(); + conn.processStderr(); return readStorePaths(*this, conn->from); } } @@ -255,14 +304,14 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, { if (paths.empty()) return; - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) { for (auto & i : paths) { SubstitutablePathInfo info; conn->to << wopQuerySubstitutablePathInfo << i; - conn->processStderr(); + conn.processStderr(); unsigned int reply = readInt(conn->from); if (reply == 0) continue; info.deriver = readString(conn->from); @@ -276,7 +325,7 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, } else { conn->to << wopQuerySubstitutablePathInfos << paths; - conn->processStderr(); + conn.processStderr(); size_t count = readNum(conn->from); for (size_t n = 0; n < count; n++) { Path path = readStorePath(*this, conn->from); @@ -293,47 +342,49 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths, void RemoteStore::queryPathInfoUncached(const Path & path, - std::function)> success, - std::function failure) -{ - sync2async>(success, failure, [&]() { - auto conn(connections->get()); - conn->to << wopQueryPathInfo << path; - try { - conn->processStderr(); - } catch (Error & e) { - // Ugly backwards compatibility hack. - if (e.msg().find("is not valid") != std::string::npos) - throw InvalidPath(e.what()); - throw; - } - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) { - bool valid; conn->from >> valid; - if (!valid) throw InvalidPath(format("path '%s' is not valid") % path); - } - auto info = std::make_shared(); - info->path = path; - info->deriver = readString(conn->from); - if (info->deriver != "") assertStorePath(info->deriver); - info->narHash = Hash(readString(conn->from), htSHA256); - info->references = readStorePaths(*this, conn->from); - conn->from >> info->registrationTime >> info->narSize; - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) { - conn->from >> info->ultimate; - info->sigs = readStrings(conn->from); - conn->from >> info->ca; + Callback> callback) +{ + try { + std::shared_ptr info; + { + auto conn(getConnection()); + conn->to << wopQueryPathInfo << path; + try { + conn.processStderr(); + } catch (Error & e) { + // Ugly backwards compatibility hack. + if (e.msg().find("is not valid") != std::string::npos) + throw InvalidPath(e.what()); + throw; + } + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) { + bool valid; conn->from >> valid; + if (!valid) throw InvalidPath(format("path '%s' is not valid") % path); + } + info = std::make_shared(); + info->path = path; + info->deriver = readString(conn->from); + if (info->deriver != "") assertStorePath(info->deriver); + info->narHash = Hash(readString(conn->from), htSHA256); + info->references = readStorePaths(*this, conn->from); + conn->from >> info->registrationTime >> info->narSize; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) { + conn->from >> info->ultimate; + info->sigs = readStrings(conn->from); + conn->from >> info->ca; + } } - return info; - }); + callback(std::move(info)); + } catch (...) { callback.rethrow(); } } void RemoteStore::queryReferrers(const Path & path, PathSet & referrers) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryReferrers << path; - conn->processStderr(); + conn.processStderr(); PathSet referrers2 = readStorePaths(*this, conn->from); referrers.insert(referrers2.begin(), referrers2.end()); } @@ -341,36 +392,36 @@ void RemoteStore::queryReferrers(const Path & path, PathSet RemoteStore::queryValidDerivers(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryValidDerivers << path; - conn->processStderr(); + conn.processStderr(); return readStorePaths(*this, conn->from); } PathSet RemoteStore::queryDerivationOutputs(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryDerivationOutputs << path; - conn->processStderr(); + conn.processStderr(); return readStorePaths(*this, conn->from); } PathSet RemoteStore::queryDerivationOutputNames(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryDerivationOutputNames << path; - conn->processStderr(); + conn.processStderr(); return readStrings(conn->from); } Path RemoteStore::queryPathFromHashPart(const string & hashPart) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopQueryPathFromHashPart << hashPart; - conn->processStderr(); + conn.processStderr(); Path path = readString(conn->from); if (!path.empty()) assertStorePath(path); return path; @@ -380,7 +431,7 @@ Path RemoteStore::queryPathFromHashPart(const string & hashPart) void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr accessor) { - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) { conn->to << wopImportPaths; @@ -399,7 +450,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, ; }); - conn->processStderr(0, source2.get()); + conn.processStderr(0, source2.get()); auto importedPaths = readStorePaths(*this, conn->from); assert(importedPaths.size() <= 1); @@ -411,8 +462,9 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, << info.references << info.registrationTime << info.narSize << info.ultimate << info.sigs << info.ca << repair << !checkSigs; - copyNAR(source, conn->to); - conn->processStderr(); + bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21; + if (!tunnel) copyNAR(source, conn->to); + conn.processStderr(0, tunnel ? &source : nullptr); } } @@ -422,7 +474,7 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath, { if (repair) throw Error("repairing is not supported when building through the Nix daemon"); - auto conn(connections->get()); + auto conn(getConnection()); Path srcPath(absPath(_srcPath)); @@ -435,16 +487,18 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath, conn->to.written = 0; conn->to.warn = true; connections->incCapacity(); - dumpPath(srcPath, conn->to, filter); - connections->decCapacity(); + { + Finally cleanup([&]() { connections->decCapacity(); }); + dumpPath(srcPath, conn->to, filter); + } conn->to.warn = false; - conn->processStderr(); + conn.processStderr(); } catch (SysError & e) { /* Daemon closed while we were sending the path. Probably OOM or I/O error. */ if (e.errNo == EPIPE) try { - conn->processStderr(); + conn.processStderr(); } catch (EndOfFile & e) { } throw; } @@ -458,17 +512,17 @@ Path RemoteStore::addTextToStore(const string & name, const string & s, { if (repair) throw Error("repairing is not supported when building through the Nix daemon"); - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopAddTextToStore << name << s << references; - conn->processStderr(); + conn.processStderr(); return readStorePath(*this, conn->from); } void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopBuildPaths; if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) { conn->to << drvPaths; @@ -487,7 +541,7 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) drvPaths2.insert(string(i, 0, i.find('!'))); conn->to << drvPaths2; } - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } @@ -495,9 +549,9 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode) BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv, BuildMode buildMode) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopBuildDerivation << drvPath << drv << buildMode; - conn->processStderr(); + conn.processStderr(); BuildResult res; unsigned int status; conn->from >> status >> res.errorMsg; @@ -508,45 +562,45 @@ BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDeriva void RemoteStore::ensurePath(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopEnsurePath << path; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } void RemoteStore::addTempRoot(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopAddTempRoot << path; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } void RemoteStore::addIndirectRoot(const Path & path) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopAddIndirectRoot << path; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } void RemoteStore::syncWithGC() { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopSyncWithGC; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } Roots RemoteStore::findRoots() { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopFindRoots; - conn->processStderr(); + conn.processStderr(); size_t count = readNum(conn->from); Roots result; while (count--) { @@ -560,7 +614,7 @@ Roots RemoteStore::findRoots() void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness @@ -568,7 +622,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) /* removed options */ << 0 << 0 << 0; - conn->processStderr(); + conn.processStderr(); results.paths = readStrings(conn->from); results.bytesFreed = readLongLong(conn->from); @@ -583,27 +637,27 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results) void RemoteStore::optimiseStore() { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopOptimiseStore; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopVerifyStore << checkContents << repair; - conn->processStderr(); + conn.processStderr(); return readInt(conn->from); } void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs) { - auto conn(connections->get()); + auto conn(getConnection()); conn->to << wopAddSignatures << storePath << sigs; - conn->processStderr(); + conn.processStderr(); readInt(conn->from); } @@ -613,13 +667,13 @@ void RemoteStore::queryMissing(const PathSet & targets, unsigned long long & downloadSize, unsigned long long & narSize) { { - auto conn(connections->get()); + auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19) // Don't hold the connection handle in the fallback case // to prevent a deadlock. goto fallback; conn->to << wopQueryMissing << targets; - conn->processStderr(); + conn.processStderr(); willBuild = readStorePaths(*this, conn->from); willSubstitute = readStorePaths(*this, conn->from); unknown = readStorePaths(*this, conn->from); @@ -634,8 +688,15 @@ void RemoteStore::queryMissing(const PathSet & targets, void RemoteStore::connect() +{ + auto conn(getConnection()); +} + + +unsigned int RemoteStore::getProtocol() { auto conn(connections->get()); + return conn->daemonVersion; } @@ -672,7 +733,7 @@ static Logger::Fields readFields(Source & from) } -void RemoteStore::Connection::processStderr(Sink * sink, Source * source) +std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source) { to.flush(); @@ -697,7 +758,7 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source) else if (msg == STDERR_ERROR) { string error = readString(from); unsigned int status = readInt(from); - throw Error(status, error); + return std::make_exception_ptr(Error(status, error)); } else if (msg == STDERR_NEXT) @@ -731,6 +792,8 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source) else throw Error("got unknown message type %x from Nix daemon", msg); } + + return nullptr; } static std::string uriScheme = "unix://"; diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 95fa59a2069..4f554b5980e 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -14,6 +14,7 @@ class Pid; struct FdSink; struct FdSource; template class Pool; +struct ConnectionHandle; /* FIXME: RemoteStore is a misnomer - should be something like @@ -40,8 +41,7 @@ public: PathSet queryAllValidPaths() override; void queryPathInfoUncached(const Path & path, - std::function)> success, - std::function failure) override; + Callback> callback) override; void queryReferrers(const Path & path, PathSet & referrers) override; @@ -98,12 +98,15 @@ public: void connect() override; + unsigned int getProtocol() override; + void flushBadConnections(); protected: struct Connection { + AutoCloseFD fd; FdSink to; FdSource from; unsigned int daemonVersion; @@ -111,7 +114,7 @@ protected: virtual ~Connection(); - void processStderr(Sink * sink = 0, Source * source = 0); + std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0); }; ref openConnectionWrapper(); @@ -124,6 +127,10 @@ protected: virtual void setOptions(Connection & conn); + ConnectionHandle getConnection(); + + friend struct ConnectionHandle; + private: std::atomic_bool failed{false}; @@ -141,11 +148,6 @@ public: private: - struct Connection : RemoteStore::Connection - { - AutoCloseFD fd; - }; - ref openConnection() override; std::experimental::optional path; }; diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 23af452094c..51de89e0d92 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -17,13 +17,15 @@ #include #include #include +#include #include -#include -#include #include #include #include #include +#include + +using namespace Aws::Transfer; namespace nix { @@ -80,8 +82,8 @@ static void initAWS() }); } -S3Helper::S3Helper(const std::string & profile, const std::string & region) - : config(makeConfig(region)) +S3Helper::S3Helper(const string & profile, const string & region, const string & scheme, const string & endpoint) + : config(makeConfig(region, scheme, endpoint)) , client(make_ref( profile == "" ? std::dynamic_pointer_cast( @@ -95,7 +97,7 @@ S3Helper::S3Helper(const std::string & profile, const std::string & region) #else Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, #endif - false)) + endpoint.empty())) { } @@ -112,11 +114,17 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy } }; -ref S3Helper::makeConfig(const string & region) +ref S3Helper::makeConfig(const string & region, const string & scheme, const string & endpoint) { initAWS(); auto res = make_ref(); res->region = region; + if (!scheme.empty()) { + res->scheme = Aws::Http::SchemeMapper::FromString(scheme.c_str()); + } + if (!endpoint.empty()) { + res->endpointOverride = endpoint; + } res->requestTimeoutMs = 600 * 1000; res->retryStrategy = std::make_shared(); res->caFile = settings.caFile; @@ -146,10 +154,8 @@ S3Helper::DownloadResult S3Helper::getObject( auto result = checkAws(fmt("AWS error fetching '%s'", key), client->GetObject(request)); - res.data = decodeContent( - result.GetContentEncoding(), - make_ref( - dynamic_cast(result.GetBody()).str())); + res.data = decompress(result.GetContentEncoding(), + dynamic_cast(result.GetBody()).str()); } catch (S3Error & e) { if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw; @@ -166,9 +172,15 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore { const Setting profile{this, "", "profile", "The name of the AWS configuration profile to use."}; const Setting region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}}; + const Setting scheme{this, "", "scheme", "The scheme to use for S3 requests, https by default."}; + const Setting endpoint{this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."}; const Setting narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"}; const Setting lsCompression{this, "", "ls-compression", "compression method for .ls files"}; const Setting logCompression{this, "", "log-compression", "compression method for log/* files"}; + const Setting multipartUpload{ + this, false, "multipart-upload", "whether to use multi-part uploads"}; + const Setting bufferSize{ + this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"}; std::string bucketName; @@ -180,7 +192,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore const Params & params, const std::string & bucketName) : S3BinaryCacheStore(params) , bucketName(bucketName) - , s3Helper(profile, region) + , s3Helper(profile, region, scheme, endpoint) { diskCache = getNarInfoDiskCache(); } @@ -194,32 +206,6 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore { if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) { - /* Create the bucket if it doesn't already exists. */ - // FIXME: HeadBucket would be more appropriate, but doesn't return - // an easily parsed 404 message. - auto res = s3Helper.client->GetBucketLocation( - Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName)); - - if (!res.IsSuccess()) { - if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET) - throw Error(format("AWS error checking bucket '%s': %s") % bucketName % res.GetError().GetMessage()); - - printInfo("creating S3 bucket '%s'...", bucketName); - - // Stupid S3 bucket locations. - auto bucketConfig = Aws::S3::Model::CreateBucketConfiguration(); - if (s3Helper.config->region != "us-east-1") - bucketConfig.SetLocationConstraint( - Aws::S3::Model::BucketLocationConstraintMapper::GetBucketLocationConstraintForName( - s3Helper.config->region)); - - checkAws(format("AWS error creating bucket '%s'") % bucketName, - s3Helper.client->CreateBucket( - Aws::S3::Model::CreateBucketRequest() - .WithBucket(bucketName) - .WithCreateBucketConfiguration(bucketConfig))); - } - BinaryCacheStore::init(); diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority); @@ -267,40 +253,100 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore return true; } + std::shared_ptr transferManager; + std::once_flag transferManagerCreated; + void uploadFile(const std::string & path, const std::string & data, const std::string & mimeType, const std::string & contentEncoding) { - auto request = - Aws::S3::Model::PutObjectRequest() - .WithBucket(bucketName) - .WithKey(path); + auto stream = std::make_shared(data); - request.SetContentType(mimeType); + auto maxThreads = std::thread::hardware_concurrency(); + + static std::shared_ptr + executor = std::make_shared(maxThreads); + + std::call_once(transferManagerCreated, [&]() + { + if (multipartUpload) { + TransferManagerConfiguration transferConfig(executor.get()); + + transferConfig.s3Client = s3Helper.client; + transferConfig.bufferSize = bufferSize; + + transferConfig.uploadProgressCallback = + [](const TransferManager *transferManager, + const std::shared_ptr + &transferHandle) + { + //FIXME: find a way to properly abort the multipart upload. + //checkInterrupt(); + debug("upload progress ('%s'): '%d' of '%d' bytes", + transferHandle->GetKey(), + transferHandle->GetBytesTransferred(), + transferHandle->GetBytesTotalSize()); + }; + + transferManager = TransferManager::Create(transferConfig); + } + }); - if (contentEncoding != "") - request.SetContentEncoding(contentEncoding); + auto now1 = std::chrono::steady_clock::now(); - auto stream = std::make_shared(data); + if (transferManager) { - request.SetBody(stream); + if (contentEncoding != "") + throw Error("setting a content encoding is not supported with S3 multi-part uploads"); - stats.put++; - stats.putBytes += data.size(); + std::shared_ptr transferHandle = + transferManager->UploadFile( + stream, bucketName, path, mimeType, + Aws::Map(), + nullptr /*, contentEncoding */); - auto now1 = std::chrono::steady_clock::now(); + transferHandle->WaitUntilFinished(); + + if (transferHandle->GetStatus() == TransferStatus::FAILED) + throw Error("AWS error: failed to upload 's3://%s/%s': %s", + bucketName, path, transferHandle->GetLastError().GetMessage()); + + if (transferHandle->GetStatus() != TransferStatus::COMPLETED) + throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state", + bucketName, path); + + } else { + + auto request = + Aws::S3::Model::PutObjectRequest() + .WithBucket(bucketName) + .WithKey(path); + + request.SetContentType(mimeType); + + if (contentEncoding != "") + request.SetContentEncoding(contentEncoding); - auto result = checkAws(format("AWS error uploading '%s'") % path, - s3Helper.client->PutObject(request)); + auto stream = std::make_shared(data); + + request.SetBody(stream); + + auto result = checkAws(fmt("AWS error uploading '%s'", path), + s3Helper.client->PutObject(request)); + } auto now2 = std::chrono::steady_clock::now(); - auto duration = std::chrono::duration_cast(now2 - now1).count(); + auto duration = + std::chrono::duration_cast(now2 - now1) + .count(); - printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") - % bucketName % path % data.size() % duration); + printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") % + bucketName % path % data.size() % duration); stats.putTimeMs += duration; + stats.putBytes += data.size(); + stats.put++; } void upsertFile(const std::string & path, const std::string & data, @@ -316,24 +362,23 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore uploadFile(path, data, mimeType, ""); } - void getFile(const std::string & path, - std::function)> success, - std::function failure) override + void getFile(const std::string & path, Sink & sink) override { - sync2async>(success, failure, [&]() { - stats.get++; + stats.get++; - auto res = s3Helper.getObject(bucketName, path); + // FIXME: stream output to sink. + auto res = s3Helper.getObject(bucketName, path); - stats.getBytes += res.data ? res.data->size() : 0; - stats.getTimeMs += res.durationMs; + stats.getBytes += res.data ? res.data->size() : 0; + stats.getTimeMs += res.durationMs; - if (res.data) - printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms", - bucketName, path, res.data->size(), res.durationMs); + if (res.data) { + printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms", + bucketName, path, res.data->size(), res.durationMs); - return res.data; - }); + sink((unsigned char *) res.data->data(), res.data->size()); + } else + throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri()); } PathSet queryAllValidPaths() override diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh index 4f996400343..ef5f23d0f25 100644 --- a/src/libstore/s3.hh +++ b/src/libstore/s3.hh @@ -14,9 +14,9 @@ struct S3Helper ref config; ref client; - S3Helper(const std::string & profile, const std::string & region); + S3Helper(const std::string & profile, const std::string & region, const std::string & scheme, const std::string & endpoint); - ref makeConfig(const std::string & region); + ref makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint); struct DownloadResult { diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh index f67d1e2580a..9fae6d5349f 100644 --- a/src/libstore/serve-protocol.hh +++ b/src/libstore/serve-protocol.hh @@ -5,7 +5,7 @@ namespace nix { #define SERVE_MAGIC_1 0x390c9deb #define SERVE_MAGIC_2 0x5452eecb -#define SERVE_PROTOCOL_VERSION 0x204 +#define SERVE_PROTOCOL_VERSION 0x205 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) @@ -18,6 +18,7 @@ typedef enum { cmdBuildPaths = 6, cmdQueryClosure = 7, cmdBuildDerivation = 8, + cmdAddToStoreNar = 9, } ServeCommand; } diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index 42d40e71d8b..a061d64f36d 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -10,6 +10,7 @@ namespace nix { [[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs) { int err = sqlite3_errcode(db); + int exterr = sqlite3_extended_errcode(db); auto path = sqlite3_db_filename(db, nullptr); if (!path) path = "(in-memory)"; @@ -21,7 +22,7 @@ namespace nix { : fmt("SQLite database '%s' is busy", path)); } else - throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(err), path); + throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path); } SQLite::SQLite(const Path & path) diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 033c580936a..5e0e44935cc 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -4,8 +4,9 @@ namespace nix { SSHMaster::SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress, int logFD) : host(host) + , fakeSSH(host == "localhost") , keyFile(keyFile) - , useMaster(useMaster) + , useMaster(useMaster && !fakeSSH) , compress(compress) , logFD(logFD) { @@ -45,12 +46,19 @@ std::unique_ptr SSHMaster::startCommand(const std::string if (logFD != -1 && dup2(logFD, STDERR_FILENO) == -1) throw SysError("duping over stderr"); - Strings args = { "ssh", host.c_str(), "-x", "-a" }; - addCommonSSHOpts(args); - if (socketPath != "") - args.insert(args.end(), {"-S", socketPath}); - if (verbosity >= lvlChatty) - args.push_back("-v"); + Strings args; + + if (fakeSSH) { + args = { "bash", "-c" }; + } else { + args = { "ssh", host.c_str(), "-x", "-a" }; + addCommonSSHOpts(args); + if (socketPath != "") + args.insert(args.end(), {"-S", socketPath}); + if (verbosity >= lvlChatty) + args.push_back("-v"); + } + args.push_back(command); execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh index 1268e6d0005..4f0f0bd29f9 100644 --- a/src/libstore/ssh.hh +++ b/src/libstore/ssh.hh @@ -10,6 +10,7 @@ class SSHMaster private: const std::string host; + bool fakeSSH; const std::string keyFile; const bool useMaster; const bool compress; diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 1a0d12ca78c..c13ff11564e 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -253,6 +253,8 @@ std::string Store::getUri() bool Store::isValidPath(const Path & storePath) { + assertStorePath(storePath); + auto hashPart = storePathToHash(storePath); { @@ -303,21 +305,23 @@ ref Store::queryPathInfo(const Path & storePath) std::promise> promise; queryPathInfo(storePath, - [&](ref info) { - promise.set_value(info); - }, - [&](std::exception_ptr exc) { - promise.set_exception(exc); - }); + {[&](std::future> result) { + try { + promise.set_value(result.get()); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); return promise.get_future().get(); } void Store::queryPathInfo(const Path & storePath, - std::function)> success, - std::function failure) + Callback> callback) { + assertStorePath(storePath); + auto hashPart = storePathToHash(storePath); try { @@ -328,7 +332,7 @@ void Store::queryPathInfo(const Path & storePath, stats.narInfoReadAverted++; if (!*res) throw InvalidPath(format("path '%s' is not valid") % storePath); - return success(ref(*res)); + return callback(ref(*res)); } } @@ -344,35 +348,36 @@ void Store::queryPathInfo(const Path & storePath, (res.second->path != storePath && storePathToName(storePath) != "")) throw InvalidPath(format("path '%s' is not valid") % storePath); } - return success(ref(res.second)); + return callback(ref(res.second)); } } - } catch (std::exception & e) { - return callFailure(failure); - } + } catch (...) { return callback.rethrow(); } queryPathInfoUncached(storePath, - [this, storePath, hashPart, success, failure](std::shared_ptr info) { + {[this, storePath, hashPart, callback](std::future> fut) { - if (diskCache) - diskCache->upsertNarInfo(getUri(), hashPart, info); + try { + auto info = fut.get(); - { - auto state_(state.lock()); - state_->pathInfoCache.upsert(hashPart, info); - } + if (diskCache) + diskCache->upsertNarInfo(getUri(), hashPart, info); - if (!info - || (info->path != storePath && storePathToName(storePath) != "")) - { - stats.narInfoMissing++; - return failure(std::make_exception_ptr(InvalidPath(format("path '%s' is not valid") % storePath))); - } + { + auto state_(state.lock()); + state_->pathInfoCache.upsert(hashPart, info); + } - callSuccess(success, failure, ref(info)); + if (!info + || (info->path != storePath && storePathToName(storePath) != "")) + { + stats.narInfoMissing++; + throw InvalidPath("path '%s' is not valid", storePath); + } - }, failure); + callback(ref(info)); + } catch (...) { callback.rethrow(); } + }}); } @@ -392,26 +397,19 @@ PathSet Store::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubsti auto doQuery = [&](const Path & path ) { checkInterrupt(); - queryPathInfo(path, - [path, &state_, &wakeup](ref info) { - auto state(state_.lock()); + queryPathInfo(path, {[path, &state_, &wakeup](std::future> fut) { + auto state(state_.lock()); + try { + auto info = fut.get(); state->valid.insert(path); - assert(state->left); - if (!--state->left) - wakeup.notify_one(); - }, - [path, &state_, &wakeup](std::exception_ptr exc) { - auto state(state_.lock()); - try { - std::rethrow_exception(exc); - } catch (InvalidPath &) { - } catch (...) { - state->exc = exc; - } - assert(state->left); - if (!--state->left) - wakeup.notify_one(); - }); + } catch (InvalidPath &) { + } catch (...) { + state->exc = std::current_exception(); + } + assert(state->left); + if (!--state->left) + wakeup.notify_one(); + }}); }; for (auto & path : paths) @@ -564,10 +562,10 @@ void Store::buildPaths(const PathSet & paths, BuildMode buildMode) { for (auto & path : paths) if (isDerivation(path)) - unsupported(); + unsupported("buildPaths"); if (queryValidPaths(paths).size() != paths.size()) - unsupported(); + unsupported("buildPaths"); } @@ -590,15 +588,19 @@ void copyStorePath(ref srcStore, ref dstStore, uint64_t total = 0; - // FIXME -#if 0 if (!info->narHash) { + StringSink sink; + srcStore->narFromPath({storePath}, sink); auto info2 = make_ref(*info); info2->narHash = hashString(htSHA256, *sink.s); if (!info->narSize) info2->narSize = sink.s->size(); + if (info->ultimate) info2->ultimate = false; info = info2; + + StringSource source(*sink.s); + dstStore->addToStore(*info, source, repair, checkSigs); + return; } -#endif if (info->ultimate) { auto info2 = make_ref(*info); @@ -613,6 +615,8 @@ void copyStorePath(ref srcStore, ref dstStore, act.progress(total, info->narSize); }); srcStore->narFromPath({storePath}, wrapperSink); + }, [&]() { + throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri()); }); dstStore->addToStore(*info, *source, repair, checkSigs); @@ -633,11 +637,12 @@ void copyPaths(ref srcStore, ref dstStore, const PathSet & storePa Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size())); std::atomic nrDone{0}; + std::atomic nrFailed{0}; std::atomic bytesExpected{0}; std::atomic nrRunning{0}; auto showProgress = [&]() { - act.progress(nrDone, missing.size(), nrRunning); + act.progress(nrDone, missing.size(), nrRunning, nrFailed); }; ThreadPool pool; @@ -666,7 +671,16 @@ void copyPaths(ref srcStore, ref dstStore, const PathSet & storePa if (!dstStore->isValidPath(storePath)) { MaintainCount mc(nrRunning); showProgress(); - copyStorePath(srcStore, dstStore, storePath, repair, checkSigs); + try { + copyStorePath(srcStore, dstStore, storePath, repair, checkSigs); + } catch (Error &e) { + nrFailed++; + if (!settings.keepGoing) + throw e; + logger->log(lvlError, format("could not copy %s: %s") % storePath % e.what()); + showProgress(); + return; + } } nrDone++; @@ -828,26 +842,50 @@ namespace nix { RegisterStoreImplementation::Implementations * RegisterStoreImplementation::implementations = 0; - -ref openStore(const std::string & uri_, - const Store::Params & extraParams) +/* Split URI into protocol+hierarchy part and its parameter set. */ +std::pair splitUriAndParams(const std::string & uri_) { auto uri(uri_); - Store::Params params(extraParams); + Store::Params params; auto q = uri.find('?'); if (q != std::string::npos) { for (auto s : tokenizeString(uri.substr(q + 1), "&")) { auto e = s.find('='); - if (e != std::string::npos) - params[s.substr(0, e)] = s.substr(e + 1); + if (e != std::string::npos) { + auto value = s.substr(e + 1); + std::string decoded; + for (size_t i = 0; i < value.size(); ) { + if (value[i] == '%') { + if (i + 2 >= value.size()) + throw Error("invalid URI parameter '%s'", value); + try { + decoded += std::stoul(std::string(value, i + 1, 2), 0, 16); + i += 3; + } catch (...) { + throw Error("invalid URI parameter '%s'", value); + } + } else + decoded += value[i++]; + } + params[s.substr(0, e)] = decoded; + } } uri = uri_.substr(0, q); } + return {uri, params}; +} + +ref openStore(const std::string & uri_, + const Store::Params & extraParams) +{ + auto [uri, uriParams] = splitUriAndParams(uri_); + auto params = extraParams; + params.insert(uriParams.begin(), uriParams.end()); for (auto fun : *RegisterStoreImplementation::implementations) { auto store = fun(uri, params); if (store) { - store->handleUnknownSettings(); + store->warnUnknownSettings(); return ref(store); } } diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index ea259f07e8a..ad0f8df11b8 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -22,6 +22,8 @@ MakeError(SubstError, Error) MakeError(BuildError, Error) /* denotes a permanent build failure */ MakeError(InvalidPath, Error) MakeError(Unsupported, Error) +MakeError(SubstituteGone, Error) +MakeError(SubstituterDisabled, Error) struct BasicDerivation; @@ -347,7 +349,8 @@ public: (i.e. you'll get /nix/store/ rather than /nix/store/-). Use queryPathInfo() to obtain the full store path. */ - virtual PathSet queryAllValidPaths() = 0; + virtual PathSet queryAllValidPaths() + { unsupported("queryAllValidPaths"); } /* Query information about a valid path. It is permitted to omit the name part of the store path. */ @@ -355,21 +358,19 @@ public: /* Asynchronous version of queryPathInfo(). */ void queryPathInfo(const Path & path, - std::function)> success, - std::function failure); + Callback> callback); protected: virtual void queryPathInfoUncached(const Path & path, - std::function)> success, - std::function failure) = 0; + Callback> callback) = 0; public: /* Queries the set of incoming FS references for a store path. The result is not cleared. */ - virtual void queryReferrers(const Path & path, - PathSet & referrers) = 0; + virtual void queryReferrers(const Path & path, PathSet & referrers) + { unsupported("queryReferrers"); } /* Return all currently valid derivations that have `path' as an output. (Note that the result of `queryDeriver()' is the @@ -378,10 +379,12 @@ public: virtual PathSet queryValidDerivers(const Path & path) { return {}; }; /* Query the outputs of the derivation denoted by `path'. */ - virtual PathSet queryDerivationOutputs(const Path & path) = 0; + virtual PathSet queryDerivationOutputs(const Path & path) + { unsupported("queryDerivationOutputs"); } /* Query the output names of the derivation denoted by `path'. */ - virtual StringSet queryDerivationOutputNames(const Path & path) = 0; + virtual StringSet queryDerivationOutputNames(const Path & path) + { unsupported("queryDerivationOutputNames"); } /* Query the full store path given the hash part of a valid store path, or "" if the path doesn't exist. */ @@ -447,14 +450,16 @@ public: /* Add a store path as a temporary root of the garbage collector. The root disappears as soon as we exit. */ - virtual void addTempRoot(const Path & path) = 0; + virtual void addTempRoot(const Path & path) + { unsupported("addTempRoot"); } /* Add an indirect root, which is merely a symlink to `path' from /nix/var/nix/gcroots/auto/. `path' is supposed to be a symlink to a store path. The garbage collector will automatically remove the indirect root when it finds that `path' has disappeared. */ - virtual void addIndirectRoot(const Path & path) = 0; + virtual void addIndirectRoot(const Path & path) + { unsupported("addIndirectRoot"); } /* Acquire the global GC lock, then immediately release it. This function must be called after registering a new permanent root, @@ -479,10 +484,12 @@ public: /* Find the roots of the garbage collector. Each root is a pair (link, storepath) where `link' is the path of the symlink outside of the Nix store that point to `storePath'. */ - virtual Roots findRoots() = 0; + virtual Roots findRoots() + { unsupported("findRoots"); } /* Perform a garbage collection. */ - virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0; + virtual void collectGarbage(const GCOptions & options, GCResults & results) + { unsupported("collectGarbage"); } /* Return a string representing information about the path that can be loaded into the database using `nix-store --load-db' or @@ -513,11 +520,13 @@ public: virtual bool verifyStore(bool checkContents, RepairFlag repair = NoRepair) { return false; }; /* Return an object to access files in the Nix store. */ - virtual ref getFSAccessor() = 0; + virtual ref getFSAccessor() + { unsupported("getFSAccessor"); } /* Add signatures to the specified store path. The signatures are not verified. */ - virtual void addSignatures(const Path & storePath, const StringSet & sigs) = 0; + virtual void addSignatures(const Path & storePath, const StringSet & sigs) + { unsupported("addSignatures"); } /* Utility functions. */ @@ -599,6 +608,12 @@ public: a notion of connection. Otherwise this is a no-op. */ virtual void connect() { }; + /* Get the protocol version of this store or it's connection. */ + virtual unsigned int getProtocol() + { + return 0; + }; + /* Get the priority of the store, used to order substituters. In particular, binary caches can specify a priority field in their "nix-cache-info" file. Lower value means higher priority. */ @@ -614,9 +629,9 @@ protected: Stats stats; /* Unsupported methods. */ - [[noreturn]] void unsupported() + [[noreturn]] void unsupported(const std::string & op) { - throw Unsupported("requested operation is not supported by store '%s'", getUri()); + throw Unsupported("operation '%s' is not supported by store '%s'", op, getUri()); } }; @@ -783,4 +798,8 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, for paths created by makeFixedOutputPath() / addToStore(). */ std::string makeFixedOutputCA(bool recursive, const Hash & hash); + +/* Split URI into protocol+hierarchy part and its parameter set. */ +std::pair splitUriAndParams(const std::string & uri); + } diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index 996e1d25355..5ebdfaf134d 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -6,7 +6,7 @@ namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f -#define PROTOCOL_VERSION 0x114 +#define PROTOCOL_VERSION 0x115 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 154e2d20430..3aa12027097 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -13,17 +13,25 @@ #include "archive.hh" #include "util.hh" - +#include "config.hh" namespace nix { +struct ArchiveSettings : Config +{ + Setting useCaseHack{this, + #if __APPLE__ + true, + #else + false, + #endif + "use-case-hack", + "Whether to enable a Darwin-specific hack for dealing with file name collisions."}; +}; -bool useCaseHack = -#if __APPLE__ - true; -#else - false; -#endif +static ArchiveSettings archiveSettings; + +static GlobalConfig::Register r1(&archiveSettings); const std::string narVersionMagic1 = "nix-archive-1"; @@ -78,7 +86,7 @@ static void dump(const Path & path, Sink & sink, PathFilter & filter) the case hack applied by restorePath(). */ std::map unhacked; for (auto & i : readDirectory(path)) - if (useCaseHack) { + if (archiveSettings.useCaseHack) { string name(i.name); size_t pos = i.name.find(caseHackSuffix); if (pos != string::npos) { @@ -243,7 +251,7 @@ static void parse(ParseSink & sink, Source & source, const Path & path) if (name <= prevName) throw Error("NAR directory is not sorted"); prevName = name; - if (useCaseHack) { + if (archiveSettings.useCaseHack) { auto i = names.find(name); if (i != names.end()) { debug(format("case collision between '%1%' and '%2%'") % i->first % name); @@ -275,7 +283,7 @@ void parseDump(ParseSink & sink, Source & source) { string version; try { - version = readString(source); + version = readString(source, narVersionMagic1.size()); } catch (SerialisationError & e) { /* This generally means the integer at the start couldn't be decoded. Ignore and throw the exception below. */ @@ -323,7 +331,7 @@ struct RestoreSink : ParseSink filesystem doesn't support preallocation (e.g. on OpenSolaris). Since preallocation is just an optimisation, ignore it. */ - if (errno && errno != EINVAL) + if (errno && errno != EINVAL && errno != EOPNOTSUPP && errno != ENOSYS) throw SysError(format("preallocating file of %1% bytes") % len); } #endif diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh index 7a0e688e420..25be426c1a4 100644 --- a/src/libutil/archive.hh +++ b/src/libutil/archive.hh @@ -78,10 +78,6 @@ void restorePath(const Path & path, Source & source); void copyNAR(Source & source, Sink & sink); -// FIXME: global variables are bad m'kay. -extern bool useCaseHack; - - extern const std::string narVersionMagic1; diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index 81cb5e98c76..0dd84e32034 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -8,246 +8,265 @@ #include #include -#if HAVE_BROTLI #include #include -#endif // HAVE_BROTLI #include namespace nix { -static const size_t bufSize = 32 * 1024; - -static void decompressNone(Source & source, Sink & sink) +// Don't feed brotli too much at once. +struct ChunkedCompressionSink : CompressionSink { - std::vector buf(bufSize); - while (true) { - size_t n; - try { - n = source.read(buf.data(), buf.size()); - } catch (EndOfFile &) { - break; + uint8_t outbuf[32 * 1024]; + + void write(const unsigned char * data, size_t len) override + { + const size_t CHUNK_SIZE = sizeof(outbuf) << 2; + while (len) { + size_t n = std::min(CHUNK_SIZE, len); + writeInternal(data, n); + data += n; + len -= n; } - sink(buf.data(), n); } -} -static void decompressXZ(Source & source, Sink & sink) + virtual void writeInternal(const unsigned char * data, size_t len) = 0; +}; + +struct NoneSink : CompressionSink { - lzma_stream strm(LZMA_STREAM_INIT); - - lzma_ret ret = lzma_stream_decoder( - &strm, UINT64_MAX, LZMA_CONCATENATED); - if (ret != LZMA_OK) - throw CompressionError("unable to initialise lzma decoder"); - - Finally free([&]() { lzma_end(&strm); }); - - lzma_action action = LZMA_RUN; - std::vector inbuf(bufSize), outbuf(bufSize); - strm.next_in = nullptr; - strm.avail_in = 0; - strm.next_out = outbuf.data(); - strm.avail_out = outbuf.size(); - bool eof = false; - - while (true) { - checkInterrupt(); - - if (strm.avail_in == 0 && !eof) { - strm.next_in = inbuf.data(); - try { - strm.avail_in = source.read((unsigned char *) strm.next_in, inbuf.size()); - } catch (EndOfFile &) { - eof = true; - } - } + Sink & nextSink; + NoneSink(Sink & nextSink) : nextSink(nextSink) { } + void finish() override { flush(); } + void write(const unsigned char * data, size_t len) override { nextSink(data, len); } +}; - if (strm.avail_in == 0) - action = LZMA_FINISH; +struct XzDecompressionSink : CompressionSink +{ + Sink & nextSink; + uint8_t outbuf[BUFSIZ]; + lzma_stream strm = LZMA_STREAM_INIT; + bool finished = false; - lzma_ret ret = lzma_code(&strm, action); + XzDecompressionSink(Sink & nextSink) : nextSink(nextSink) + { + lzma_ret ret = lzma_stream_decoder( + &strm, UINT64_MAX, LZMA_CONCATENATED); + if (ret != LZMA_OK) + throw CompressionError("unable to initialise lzma decoder"); - if (strm.avail_out < outbuf.size()) { - sink((unsigned char *) outbuf.data(), outbuf.size() - strm.avail_out); - strm.next_out = outbuf.data(); - strm.avail_out = outbuf.size(); - } + strm.next_out = outbuf; + strm.avail_out = sizeof(outbuf); + } - if (ret == LZMA_STREAM_END) return; + ~XzDecompressionSink() + { + lzma_end(&strm); + } - if (ret != LZMA_OK) - throw CompressionError("error %d while decompressing xz file", ret); + void finish() override + { + CompressionSink::flush(); + write(nullptr, 0); } -} -static void decompressBzip2(Source & source, Sink & sink) -{ - bz_stream strm; - memset(&strm, 0, sizeof(strm)); - - int ret = BZ2_bzDecompressInit(&strm, 0, 0); - if (ret != BZ_OK) - throw CompressionError("unable to initialise bzip2 decoder"); - - Finally free([&]() { BZ2_bzDecompressEnd(&strm); }); - - std::vector inbuf(bufSize), outbuf(bufSize); - strm.next_in = nullptr; - strm.avail_in = 0; - strm.next_out = outbuf.data(); - strm.avail_out = outbuf.size(); - bool eof = false; - - while (true) { - checkInterrupt(); - - if (strm.avail_in == 0 && !eof) { - strm.next_in = inbuf.data(); - try { - strm.avail_in = source.read((unsigned char *) strm.next_in, inbuf.size()); - } catch (EndOfFile &) { - eof = true; - } - } + void write(const unsigned char * data, size_t len) override + { + strm.next_in = data; + strm.avail_in = len; + + while (!finished && (!data || strm.avail_in)) { + checkInterrupt(); - int ret = BZ2_bzDecompress(&strm); + lzma_ret ret = lzma_code(&strm, data ? LZMA_RUN : LZMA_FINISH); + if (ret != LZMA_OK && ret != LZMA_STREAM_END) + throw CompressionError("error %d while decompressing xz file", ret); - if (strm.avail_in == 0 && strm.avail_out == outbuf.size() && eof) - throw CompressionError("bzip2 data ends prematurely"); + finished = ret == LZMA_STREAM_END; - if (strm.avail_out < outbuf.size()) { - sink((unsigned char *) outbuf.data(), outbuf.size() - strm.avail_out); - strm.next_out = outbuf.data(); - strm.avail_out = outbuf.size(); + if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { + nextSink(outbuf, sizeof(outbuf) - strm.avail_out); + strm.next_out = outbuf; + strm.avail_out = sizeof(outbuf); + } } + } +}; - if (ret == BZ_STREAM_END) return; +struct BzipDecompressionSink : ChunkedCompressionSink +{ + Sink & nextSink; + bz_stream strm; + bool finished = false; + BzipDecompressionSink(Sink & nextSink) : nextSink(nextSink) + { + memset(&strm, 0, sizeof(strm)); + int ret = BZ2_bzDecompressInit(&strm, 0, 0); if (ret != BZ_OK) - throw CompressionError("error while decompressing bzip2 file"); + throw CompressionError("unable to initialise bzip2 decoder"); + + strm.next_out = (char *) outbuf; + strm.avail_out = sizeof(outbuf); } -} -static void decompressBrotli(Source & source, Sink & sink) -{ -#if !HAVE_BROTLI - RunOptions options(BROTLI, {"-d"}); - options.standardIn = &source; - options.standardOut = &sink; - runProgram2(options); -#else - auto *s = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr); - if (!s) - throw CompressionError("unable to initialize brotli decoder"); - - Finally free([s]() { BrotliDecoderDestroyInstance(s); }); - - std::vector inbuf(bufSize), outbuf(bufSize); - const uint8_t * next_in = nullptr; - size_t avail_in = 0; - bool eof = false; - - while (true) { - checkInterrupt(); - - if (avail_in == 0 && !eof) { - next_in = inbuf.data(); - try { - avail_in = source.read((unsigned char *) next_in, inbuf.size()); - } catch (EndOfFile &) { - eof = true; + ~BzipDecompressionSink() + { + BZ2_bzDecompressEnd(&strm); + } + + void finish() override + { + flush(); + write(nullptr, 0); + } + + void writeInternal(const unsigned char * data, size_t len) override + { + assert(len <= std::numeric_limits::max()); + + strm.next_in = (char *) data; + strm.avail_in = len; + + while (strm.avail_in) { + checkInterrupt(); + + int ret = BZ2_bzDecompress(&strm); + if (ret != BZ_OK && ret != BZ_STREAM_END) + throw CompressionError("error while decompressing bzip2 file"); + + finished = ret == BZ_STREAM_END; + + if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { + nextSink(outbuf, sizeof(outbuf) - strm.avail_out); + strm.next_out = (char *) outbuf; + strm.avail_out = sizeof(outbuf); } } + } +}; - uint8_t * next_out = outbuf.data(); - size_t avail_out = outbuf.size(); - - auto ret = BrotliDecoderDecompressStream(s, - &avail_in, &next_in, - &avail_out, &next_out, - nullptr); - - switch (ret) { - case BROTLI_DECODER_RESULT_ERROR: - throw CompressionError("error while decompressing brotli file"); - case BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: - if (eof) - throw CompressionError("incomplete or corrupt brotli file"); - break; - case BROTLI_DECODER_RESULT_SUCCESS: - if (avail_in != 0) - throw CompressionError("unexpected input after brotli decompression"); - break; - case BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: - // I'm not sure if this can happen, but abort if this happens with empty buffer - if (avail_out == outbuf.size()) - throw CompressionError("brotli decompression requires larger buffer"); - break; - } +struct BrotliDecompressionSink : ChunkedCompressionSink +{ + Sink & nextSink; + BrotliDecoderState * state; + bool finished = false; - // Always ensure we have full buffer for next invocation - if (avail_out < outbuf.size()) - sink((unsigned char *) outbuf.data(), outbuf.size() - avail_out); + BrotliDecompressionSink(Sink & nextSink) : nextSink(nextSink) + { + state = BrotliDecoderCreateInstance(nullptr, nullptr, nullptr); + if (!state) + throw CompressionError("unable to initialize brotli decoder"); + } - if (ret == BROTLI_DECODER_RESULT_SUCCESS) return; + ~BrotliDecompressionSink() + { + BrotliDecoderDestroyInstance(state); } -#endif // HAVE_BROTLI -} + + void finish() override + { + flush(); + writeInternal(nullptr, 0); + } + + void writeInternal(const unsigned char * data, size_t len) override + { + const uint8_t * next_in = data; + size_t avail_in = len; + uint8_t * next_out = outbuf; + size_t avail_out = sizeof(outbuf); + + while (!finished && (!data || avail_in)) { + checkInterrupt(); + + if (!BrotliDecoderDecompressStream(state, + &avail_in, &next_in, + &avail_out, &next_out, + nullptr)) + throw CompressionError("error while decompressing brotli file"); + + if (avail_out < sizeof(outbuf) || avail_in == 0) { + nextSink(outbuf, sizeof(outbuf) - avail_out); + next_out = outbuf; + avail_out = sizeof(outbuf); + } + + finished = BrotliDecoderIsFinished(state); + } + } +}; ref decompress(const std::string & method, const std::string & in) { - StringSource source(in); - StringSink sink; - decompress(method, source, sink); - return sink.s; + StringSink ssink; + auto sink = makeDecompressionSink(method, ssink); + (*sink)(in); + sink->finish(); + return ssink.s; } -void decompress(const std::string & method, Source & source, Sink & sink) +ref makeDecompressionSink(const std::string & method, Sink & nextSink) { - if (method == "none") - return decompressNone(source, sink); + if (method == "none" || method == "") + return make_ref(nextSink); else if (method == "xz") - return decompressXZ(source, sink); + return make_ref(nextSink); else if (method == "bzip2") - return decompressBzip2(source, sink); + return make_ref(nextSink); else if (method == "br") - return decompressBrotli(source, sink); + return make_ref(nextSink); else throw UnknownCompressionMethod("unknown compression method '%s'", method); } -struct NoneSink : CompressionSink -{ - Sink & nextSink; - NoneSink(Sink & nextSink) : nextSink(nextSink) { } - void finish() override { flush(); } - void write(const unsigned char * data, size_t len) override { nextSink(data, len); } -}; - -struct XzSink : CompressionSink +struct XzCompressionSink : CompressionSink { Sink & nextSink; uint8_t outbuf[BUFSIZ]; lzma_stream strm = LZMA_STREAM_INIT; bool finished = false; - template - XzSink(Sink & nextSink, F&& initEncoder) : nextSink(nextSink) { - lzma_ret ret = initEncoder(); + XzCompressionSink(Sink & nextSink, bool parallel) : nextSink(nextSink) + { + lzma_ret ret; + bool done = false; + + if (parallel) { +#ifdef HAVE_LZMA_MT + lzma_mt mt_options = {}; + mt_options.flags = 0; + mt_options.timeout = 300; // Using the same setting as the xz cmd line + mt_options.preset = LZMA_PRESET_DEFAULT; + mt_options.filters = NULL; + mt_options.check = LZMA_CHECK_CRC64; + mt_options.threads = lzma_cputhreads(); + mt_options.block_size = 0; + if (mt_options.threads == 0) + mt_options.threads = 1; + // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the + // number of threads. + ret = lzma_stream_encoder_mt(&strm, &mt_options); + done = true; +#else + printMsg(lvlError, "warning: parallel XZ compression requested but not supported, falling back to single-threaded compression"); +#endif + } + + if (!done) + ret = lzma_easy_encoder(&strm, 6, LZMA_CHECK_CRC64); + if (ret != LZMA_OK) throw CompressionError("unable to initialise lzma encoder"); + // FIXME: apply the x86 BCJ filter? strm.next_out = outbuf; strm.avail_out = sizeof(outbuf); } - XzSink(Sink & nextSink) : XzSink(nextSink, [this]() { - return lzma_easy_encoder(&strm, 6, LZMA_CHECK_CRC64); - }) {} - ~XzSink() + ~XzCompressionSink() { lzma_end(&strm); } @@ -255,43 +274,25 @@ struct XzSink : CompressionSink void finish() override { CompressionSink::flush(); - - assert(!finished); - finished = true; - - while (true) { - checkInterrupt(); - - lzma_ret ret = lzma_code(&strm, LZMA_FINISH); - if (ret != LZMA_OK && ret != LZMA_STREAM_END) - throw CompressionError("error while flushing xz file"); - - if (strm.avail_out == 0 || ret == LZMA_STREAM_END) { - nextSink(outbuf, sizeof(outbuf) - strm.avail_out); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - } - - if (ret == LZMA_STREAM_END) break; - } + write(nullptr, 0); } void write(const unsigned char * data, size_t len) override { - assert(!finished); - strm.next_in = data; strm.avail_in = len; - while (strm.avail_in) { + while (!finished && (!data || strm.avail_in)) { checkInterrupt(); - lzma_ret ret = lzma_code(&strm, LZMA_RUN); - if (ret != LZMA_OK) - throw CompressionError("error while compressing xz file"); + lzma_ret ret = lzma_code(&strm, data ? LZMA_RUN : LZMA_FINISH); + if (ret != LZMA_OK && ret != LZMA_STREAM_END) + throw CompressionError("error %d while compressing xz file", ret); + + finished = ret == LZMA_STREAM_END; - if (strm.avail_out == 0) { - nextSink(outbuf, sizeof(outbuf)); + if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { + nextSink(outbuf, sizeof(outbuf) - strm.avail_out); strm.next_out = outbuf; strm.avail_out = sizeof(outbuf); } @@ -299,46 +300,24 @@ struct XzSink : CompressionSink } }; -#ifdef HAVE_LZMA_MT -struct ParallelXzSink : public XzSink -{ - ParallelXzSink(Sink &nextSink) : XzSink(nextSink, [this]() { - lzma_mt mt_options = {}; - mt_options.flags = 0; - mt_options.timeout = 300; // Using the same setting as the xz cmd line - mt_options.preset = LZMA_PRESET_DEFAULT; - mt_options.filters = NULL; - mt_options.check = LZMA_CHECK_CRC64; - mt_options.threads = lzma_cputhreads(); - mt_options.block_size = 0; - if (mt_options.threads == 0) - mt_options.threads = 1; - // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the - // number of threads. - return lzma_stream_encoder_mt(&strm, &mt_options); - }) {} -}; -#endif - -struct BzipSink : CompressionSink +struct BzipCompressionSink : ChunkedCompressionSink { Sink & nextSink; - char outbuf[BUFSIZ]; bz_stream strm; bool finished = false; - BzipSink(Sink & nextSink) : nextSink(nextSink) + BzipCompressionSink(Sink & nextSink) : nextSink(nextSink) { memset(&strm, 0, sizeof(strm)); int ret = BZ2_bzCompressInit(&strm, 9, 0, 30); if (ret != BZ_OK) throw CompressionError("unable to initialise bzip2 encoder"); - strm.next_out = outbuf; + strm.next_out = (char *) outbuf; strm.avail_out = sizeof(outbuf); } - ~BzipSink() + ~BzipCompressionSink() { BZ2_bzCompressEnd(&strm); } @@ -346,101 +325,49 @@ struct BzipSink : CompressionSink void finish() override { flush(); - - assert(!finished); - finished = true; - - while (true) { - checkInterrupt(); - - int ret = BZ2_bzCompress(&strm, BZ_FINISH); - if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END) - throw CompressionError("error while flushing bzip2 file"); - - if (strm.avail_out == 0 || ret == BZ_STREAM_END) { - nextSink((unsigned char *) outbuf, sizeof(outbuf) - strm.avail_out); - strm.next_out = outbuf; - strm.avail_out = sizeof(outbuf); - } - - if (ret == BZ_STREAM_END) break; - } + writeInternal(nullptr, 0); } - void write(const unsigned char * data, size_t len) override + void writeInternal(const unsigned char * data, size_t len) override { - assert(!finished); + assert(len <= std::numeric_limits::max()); strm.next_in = (char *) data; strm.avail_in = len; - while (strm.avail_in) { + while (!finished && (!data || strm.avail_in)) { checkInterrupt(); - int ret = BZ2_bzCompress(&strm, BZ_RUN); - if (ret != BZ_OK) - CompressionError("error while compressing bzip2 file"); + int ret = BZ2_bzCompress(&strm, data ? BZ_RUN : BZ_FINISH); + if (ret != BZ_RUN_OK && ret != BZ_FINISH_OK && ret != BZ_STREAM_END) + throw CompressionError("error %d while compressing bzip2 file", ret); - if (strm.avail_out == 0) { - nextSink((unsigned char *) outbuf, sizeof(outbuf)); - strm.next_out = outbuf; + finished = ret == BZ_STREAM_END; + + if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) { + nextSink(outbuf, sizeof(outbuf) - strm.avail_out); + strm.next_out = (char *) outbuf; strm.avail_out = sizeof(outbuf); } } } }; -struct LambdaCompressionSink : CompressionSink -{ - Sink & nextSink; - std::string data; - using CompressFnTy = std::function; - CompressFnTy compressFn; - LambdaCompressionSink(Sink& nextSink, CompressFnTy compressFn) - : nextSink(nextSink) - , compressFn(std::move(compressFn)) - { - }; - - void finish() override - { - flush(); - nextSink(compressFn(data)); - } - - void write(const unsigned char * data, size_t len) override - { - checkInterrupt(); - this->data.append((const char *) data, len); - } -}; - -struct BrotliCmdSink : LambdaCompressionSink -{ - BrotliCmdSink(Sink& nextSink) - : LambdaCompressionSink(nextSink, [](const std::string& data) { - return runProgram(BROTLI, true, {}, data); - }) - { - } -}; - -#if HAVE_BROTLI -struct BrotliSink : CompressionSink +struct BrotliCompressionSink : ChunkedCompressionSink { Sink & nextSink; uint8_t outbuf[BUFSIZ]; BrotliEncoderState *state; bool finished = false; - BrotliSink(Sink & nextSink) : nextSink(nextSink) + BrotliCompressionSink(Sink & nextSink) : nextSink(nextSink) { state = BrotliEncoderCreateInstance(nullptr, nullptr, nullptr); if (!state) throw CompressionError("unable to initialise brotli encoder"); } - ~BrotliSink() + ~BrotliCompressionSink() { BrotliEncoderDestroyInstance(state); } @@ -448,96 +375,47 @@ struct BrotliSink : CompressionSink void finish() override { flush(); - assert(!finished); - - const uint8_t *next_in = nullptr; - size_t avail_in = 0; - uint8_t *next_out = outbuf; - size_t avail_out = sizeof(outbuf); - while (!finished) { - checkInterrupt(); - - if (!BrotliEncoderCompressStream(state, - BROTLI_OPERATION_FINISH, - &avail_in, &next_in, - &avail_out, &next_out, - nullptr)) - throw CompressionError("error while finishing brotli file"); - - finished = BrotliEncoderIsFinished(state); - if (avail_out == 0 || finished) { - nextSink(outbuf, sizeof(outbuf) - avail_out); - next_out = outbuf; - avail_out = sizeof(outbuf); - } - } + writeInternal(nullptr, 0); } - void write(const unsigned char * data, size_t len) override - { - assert(!finished); - - // Don't feed brotli too much at once - const size_t CHUNK_SIZE = sizeof(outbuf) << 2; - while (len) { - size_t n = std::min(CHUNK_SIZE, len); - writeInternal(data, n); - data += n; - len -= n; - } - } - private: - void writeInternal(const unsigned char * data, size_t len) + void writeInternal(const unsigned char * data, size_t len) override { - assert(!finished); - - const uint8_t *next_in = data; + const uint8_t * next_in = data; size_t avail_in = len; - uint8_t *next_out = outbuf; + uint8_t * next_out = outbuf; size_t avail_out = sizeof(outbuf); - while (avail_in > 0) { + while (!finished && (!data || avail_in)) { checkInterrupt(); if (!BrotliEncoderCompressStream(state, - BROTLI_OPERATION_PROCESS, - &avail_in, &next_in, - &avail_out, &next_out, - nullptr)) - throw CompressionError("error while compressing brotli file"); + data ? BROTLI_OPERATION_PROCESS : BROTLI_OPERATION_FINISH, + &avail_in, &next_in, + &avail_out, &next_out, + nullptr)) + throw CompressionError("error while compressing brotli compression"); if (avail_out < sizeof(outbuf) || avail_in == 0) { nextSink(outbuf, sizeof(outbuf) - avail_out); next_out = outbuf; avail_out = sizeof(outbuf); } + + finished = BrotliEncoderIsFinished(state); } } }; -#endif // HAVE_BROTLI ref makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel) { - if (parallel) { -#ifdef HAVE_LZMA_MT - if (method == "xz") - return make_ref(nextSink); -#endif - printMsg(lvlError, format("Warning: parallel compression requested but not supported for method '%1%', falling back to single-threaded compression") % method); - } - if (method == "none") return make_ref(nextSink); else if (method == "xz") - return make_ref(nextSink); + return make_ref(nextSink, parallel); else if (method == "bzip2") - return make_ref(nextSink); + return make_ref(nextSink); else if (method == "br") -#if HAVE_BROTLI - return make_ref(nextSink); -#else - return make_ref(nextSink); -#endif + return make_ref(nextSink); else throw UnknownCompressionMethod(format("unknown compression method '%s'") % method); } diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh index f7a3e3fbd32..dd666a4e19f 100644 --- a/src/libutil/compression.hh +++ b/src/libutil/compression.hh @@ -8,17 +8,17 @@ namespace nix { -ref decompress(const std::string & method, const std::string & in); - -void decompress(const std::string & method, Source & source, Sink & sink); - -ref compress(const std::string & method, const std::string & in, const bool parallel = false); - struct CompressionSink : BufferedSink { virtual void finish() = 0; }; +ref decompress(const std::string & method, const std::string & in); + +ref makeDecompressionSink(const std::string & method, Sink & nextSink); + +ref compress(const std::string & method, const std::string & in, const bool parallel = false); + ref makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel = false); MakeError(UnknownCompressionMethod, Error); diff --git a/src/libutil/config.cc b/src/libutil/config.cc index ce6858f0d65..9023cb1bb6d 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -4,15 +4,13 @@ namespace nix { -void Config::set(const std::string & name, const std::string & value) +bool Config::set(const std::string & name, const std::string & value) { auto i = _settings.find(name); - if (i == _settings.end()) { - extras.emplace(name, value); - } else { - i->second.setting->set(value); - i->second.setting->overriden = true; - } + if (i == _settings.end()) return false; + i->second.setting->set(value); + i->second.setting->overriden = true; + return true; } void Config::addSetting(AbstractSetting * setting) @@ -23,46 +21,51 @@ void Config::addSetting(AbstractSetting * setting) bool set = false; - auto i = extras.find(setting->name); - if (i != extras.end()) { + auto i = unknownSettings.find(setting->name); + if (i != unknownSettings.end()) { setting->set(i->second); setting->overriden = true; - extras.erase(i); + unknownSettings.erase(i); set = true; } for (auto & alias : setting->aliases) { - auto i = extras.find(alias); - if (i != extras.end()) { + auto i = unknownSettings.find(alias); + if (i != unknownSettings.end()) { if (set) warn("setting '%s' is set, but it's an alias of '%s' which is also set", alias, setting->name); else { setting->set(i->second); setting->overriden = true; - extras.erase(i); + unknownSettings.erase(i); set = true; } } } } -void Config::handleUnknownSettings() +void AbstractConfig::warnUnknownSettings() { - for (auto & s : extras) + for (auto & s : unknownSettings) warn("unknown setting '%s'", s.first); } -StringMap Config::getSettings(bool overridenOnly) +void AbstractConfig::reapplyUnknownSettings() +{ + auto unknownSettings2 = std::move(unknownSettings); + for (auto & s : unknownSettings2) + set(s.first, s.second); +} + +void Config::getSettings(std::map & res, bool overridenOnly) { - StringMap res; for (auto & opt : _settings) if (!opt.second.isAlias && (!overridenOnly || opt.second.setting->overriden)) - res.emplace(opt.first, opt.second.setting->to_string()); - return res; + res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description}); } -void Config::applyConfigFile(const Path & path) +void AbstractConfig::applyConfigFile(const Path & path) { try { string contents = readFile(path); @@ -287,4 +290,49 @@ void PathSetting::set(const std::string & str) value = canonPath(str); } +bool GlobalConfig::set(const std::string & name, const std::string & value) +{ + for (auto & config : *configRegistrations) + if (config->set(name, value)) return true; + + unknownSettings.emplace(name, value); + + return false; +} + +void GlobalConfig::getSettings(std::map & res, bool overridenOnly) +{ + for (auto & config : *configRegistrations) + config->getSettings(res, overridenOnly); +} + +void GlobalConfig::resetOverriden() +{ + for (auto & config : *configRegistrations) + config->resetOverriden(); +} + +void GlobalConfig::toJSON(JSONObject & out) +{ + for (auto & config : *configRegistrations) + config->toJSON(out); +} + +void GlobalConfig::convertToArgs(Args & args, const std::string & category) +{ + for (auto & config : *configRegistrations) + config->convertToArgs(args, category); +} + +GlobalConfig globalConfig; + +GlobalConfig::ConfigRegistrations * GlobalConfig::configRegistrations; + +GlobalConfig::Register::Register(Config * config) +{ + if (!configRegistrations) + configRegistrations = new ConfigRegistrations; + configRegistrations->emplace_back(config); +} + } diff --git a/src/libutil/config.hh b/src/libutil/config.hh index d2e7faf1743..d86c65ff033 100644 --- a/src/libutil/config.hh +++ b/src/libutil/config.hh @@ -12,6 +12,40 @@ class AbstractSetting; class JSONPlaceholder; class JSONObject; +class AbstractConfig +{ +protected: + StringMap unknownSettings; + + AbstractConfig(const StringMap & initials = {}) + : unknownSettings(initials) + { } + +public: + + virtual bool set(const std::string & name, const std::string & value) = 0; + + struct SettingInfo + { + std::string value; + std::string description; + }; + + virtual void getSettings(std::map & res, bool overridenOnly = false) = 0; + + void applyConfigFile(const Path & path); + + virtual void resetOverriden() = 0; + + virtual void toJSON(JSONObject & out) = 0; + + virtual void convertToArgs(Args & args, const std::string & category) = 0; + + void warnUnknownSettings(); + + void reapplyUnknownSettings(); +}; + /* A class to simplify providing configuration settings. The typical use is to inherit Config and add Setting members: @@ -27,7 +61,7 @@ class JSONObject; }; */ -class Config +class Config : public AbstractConfig { friend class AbstractSetting; @@ -48,31 +82,23 @@ private: Settings _settings; - StringMap extras; - public: - Config(const StringMap & initials) - : extras(initials) + Config(const StringMap & initials = {}) + : AbstractConfig(initials) { } - void set(const std::string & name, const std::string & value); + bool set(const std::string & name, const std::string & value) override; void addSetting(AbstractSetting * setting); - void handleUnknownSettings(); - - StringMap getSettings(bool overridenOnly = false); + void getSettings(std::map & res, bool overridenOnly = false) override; - const Settings & _getSettings() { return _settings; } - - void applyConfigFile(const Path & path); + void resetOverriden() override; - void resetOverriden(); + void toJSON(JSONObject & out) override; - void toJSON(JSONObject & out); - - void convertToArgs(Args & args, const std::string & category); + void convertToArgs(Args & args, const std::string & category) override; }; class AbstractSetting @@ -209,4 +235,27 @@ public: void operator =(const Path & v) { this->assign(v); } }; +struct GlobalConfig : public AbstractConfig +{ + typedef std::vector ConfigRegistrations; + static ConfigRegistrations * configRegistrations; + + bool set(const std::string & name, const std::string & value) override; + + void getSettings(std::map & res, bool overridenOnly = false) override; + + void resetOverriden() override; + + void toJSON(JSONObject & out) override; + + void convertToArgs(Args & args, const std::string & category) override; + + struct Register + { + Register(Config * config); + }; +}; + +extern GlobalConfig globalConfig; + } diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index a01d651e1ef..1c14ebb187c 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -82,7 +82,7 @@ static string printHash32(const Hash & hash) string s; s.reserve(len); - for (int n = len - 1; n >= 0; n--) { + for (int n = (int) len - 1; n >= 0; n--) { unsigned int b = n * 5; unsigned int i = b / 8; unsigned int j = b % 8; @@ -105,9 +105,9 @@ string printHash16or32(const Hash & hash) std::string Hash::to_string(Base base, bool includeType) const { std::string s; - if (includeType) { + if (base == SRI || includeType) { s += printHashType(type); - s += ':'; + s += base == SRI ? '-' : ':'; } switch (base) { case Base16: @@ -117,6 +117,7 @@ std::string Hash::to_string(Base base, bool includeType) const s += printHash32(*this); break; case Base64: + case SRI: s += base64Encode(std::string((const char *) hash, hashSize)); break; } @@ -127,28 +128,33 @@ std::string Hash::to_string(Base base, bool includeType) const Hash::Hash(const std::string & s, HashType type) : type(type) { - auto colon = s.find(':'); - size_t pos = 0; - - if (colon == string::npos) { - if (type == htUnknown) + bool isSRI = false; + + auto sep = s.find(':'); + if (sep == string::npos) { + sep = s.find('-'); + if (sep != string::npos) { + isSRI = true; + } else if (type == htUnknown) throw BadHash("hash '%s' does not include a type", s); - } else { - string hts = string(s, 0, colon); + } + + if (sep != string::npos) { + string hts = string(s, 0, sep); this->type = parseHashType(hts); if (this->type == htUnknown) throw BadHash("unknown hash type '%s'", hts); if (type != htUnknown && type != this->type) throw BadHash("hash '%s' should have type '%s'", s, printHashType(type)); - pos = colon + 1; + pos = sep + 1; } init(); size_t size = s.size() - pos; - if (size == base16Len()) { + if (!isSRI && size == base16Len()) { auto parseHexDigit = [&](char c) { if (c >= '0' && c <= '9') return c - '0'; @@ -164,7 +170,7 @@ Hash::Hash(const std::string & s, HashType type) } } - else if (size == base32Len()) { + else if (!isSRI && size == base32Len()) { for (unsigned int n = 0; n < size; ++n) { char c = s[pos + size - n - 1]; @@ -187,10 +193,10 @@ Hash::Hash(const std::string & s, HashType type) } } - else if (size == base64Len()) { + else if (isSRI || size == base64Len()) { auto d = base64Decode(std::string(s, pos)); if (d.size() != hashSize) - throw BadHash("invalid base-64 hash '%s'", s); + throw BadHash("invalid %s hash '%s'", isSRI ? "SRI" : "base-64", s); assert(hashSize); memcpy(hash, d.data(), hashSize); } diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh index fd7a61df8e4..2dbc3b63081 100644 --- a/src/libutil/hash.hh +++ b/src/libutil/hash.hh @@ -20,7 +20,7 @@ const int sha512HashSize = 64; extern const string base32Chars; -enum Base : int { Base64, Base32, Base16 }; +enum Base : int { Base64, Base32, Base16, SRI }; struct Hash @@ -38,8 +38,9 @@ struct Hash Hash(HashType type) : type(type) { init(); }; /* Initialize the hash from a string representation, in the format - "[:]". If the 'type' argument is - htUnknown, then the hash type must be specified in the + "[:]" or "-" (a + Subresource Integrity hash expression). If the 'type' argument + is htUnknown, then the hash type must be specified in the string. */ Hash(const std::string & s, HashType type = htUnknown); diff --git a/src/libutil/json.cc b/src/libutil/json.cc index 813b257016e..0a6fb65f060 100644 --- a/src/libutil/json.cc +++ b/src/libutil/json.cc @@ -31,6 +31,7 @@ template<> void toJSON(std::ostream & str, const unsigned long & template<> void toJSON(std::ostream & str, const long long & n) { str << n; } template<> void toJSON(std::ostream & str, const unsigned long long & n) { str << n; } template<> void toJSON(std::ostream & str, const float & n) { str << n; } +template<> void toJSON(std::ostream & str, const double & n) { str << n; } template<> void toJSON(std::ostream & str, const std::string & s) { diff --git a/src/libutil/local.mk b/src/libutil/local.mk index 824f48fbfc9..3ccc23fd5c1 100644 --- a/src/libutil/local.mk +++ b/src/libutil/local.mk @@ -7,7 +7,3 @@ libutil_DIR := $(d) libutil_SOURCES := $(wildcard $(d)/*.cc) libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) -lboost_context - -libutil_LIBS = libformat - -libutil_CXXFLAGS = -DBROTLI=\"$(brotli)\" diff --git a/src/libutil/pool.hh b/src/libutil/pool.hh index 0b142b0597c..d49067bb95d 100644 --- a/src/libutil/pool.hh +++ b/src/libutil/pool.hh @@ -97,6 +97,7 @@ public: private: Pool & pool; std::shared_ptr r; + bool bad = false; friend Pool; @@ -112,7 +113,8 @@ public: if (!r) return; { auto state_(pool.state.lock()); - state_->idle.push_back(ref(r)); + if (!bad) + state_->idle.push_back(ref(r)); assert(state_->inUse); state_->inUse--; } @@ -121,6 +123,8 @@ public: R * operator -> () { return &*r; } R & operator * () { return *r; } + + void markBad() { bad = true; } }; Handle get() diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 33ae1ea389d..0e75eeec2bf 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -133,7 +133,7 @@ size_t FdSource::readUnbuffered(unsigned char * data, size_t len) ssize_t n; do { checkInterrupt(); - n = ::read(fd, (char *) data, bufSize); + n = ::read(fd, (char *) data, len); } while (n == -1 && errno == EINTR); if (n == -1) { _good = false; throw SysError("reading from file"); } if (n == 0) { _good = false; throw EndOfFile("unexpected end-of-file"); } @@ -157,21 +157,25 @@ size_t StringSource::read(unsigned char * data, size_t len) } -std::unique_ptr sinkToSource(std::function fun) +#if BOOST_VERSION >= 106300 && BOOST_VERSION < 106600 +#error Coroutines are broken in this version of Boost! +#endif + +std::unique_ptr sinkToSource( + std::function fun, + std::function eof) { struct SinkToSource : Source { typedef boost::coroutines2::coroutine coro_t; - coro_t::pull_type coro; + std::function fun; + std::function eof; + std::experimental::optional coro; + bool started = false; - SinkToSource(std::function fun) - : coro([&](coro_t::push_type & yield) { - LambdaSink sink([&](const unsigned char * data, size_t len) { - if (len) yield(std::string((const char *) data, len)); - }); - fun(sink); - }) + SinkToSource(std::function fun, std::function eof) + : fun(fun), eof(eof) { } @@ -181,11 +185,18 @@ std::unique_ptr sinkToSource(std::function fun) size_t read(unsigned char * data, size_t len) override { if (!coro) - throw EndOfFile("coroutine has finished"); + coro = coro_t::pull_type([&](coro_t::push_type & yield) { + LambdaSink sink([&](const unsigned char * data, size_t len) { + if (len) yield(std::string((const char *) data, len)); + }); + fun(sink); + }); + + if (!*coro) { eof(); abort(); } if (pos == cur.size()) { - if (!cur.empty()) coro(); - cur = coro.get(); + if (!cur.empty()) (*coro)(); + cur = coro->get(); pos = 0; } @@ -197,7 +208,7 @@ std::unique_ptr sinkToSource(std::function fun) } }; - return std::make_unique(fun); + return std::make_unique(fun, eof); } @@ -261,16 +272,17 @@ void readPadding(size_t len, Source & source) size_t readString(unsigned char * buf, size_t max, Source & source) { auto len = readNum(source); - if (len > max) throw Error("string is too long"); + if (len > max) throw SerialisationError("string is too long"); source(buf, len); readPadding(len, source); return len; } -string readString(Source & source) +string readString(Source & source, size_t max) { auto len = readNum(source); + if (len > max) throw SerialisationError("string is too long"); std::string res(len, 0); source((unsigned char*) res.data(), len); readPadding(len, source); diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index d0b4552e339..969e4dff383 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -77,10 +77,12 @@ struct BufferedSource : Source size_t read(unsigned char * data, size_t len) override; - /* Underlying read call, to be overridden. */ - virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0; bool hasData(); + +protected: + /* Underlying read call, to be overridden. */ + virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0; }; @@ -134,8 +136,9 @@ struct FdSource : BufferedSource return *this; } - size_t readUnbuffered(unsigned char * data, size_t len) override; bool good() override; +protected: + size_t readUnbuffered(unsigned char * data, size_t len) override; private: bool _good = true; }; @@ -211,7 +214,11 @@ struct LambdaSource : Source /* Convert a function that feeds data into a Sink into a Source. The Source executes the function as a coroutine. */ -std::unique_ptr sinkToSource(std::function fun); +std::unique_ptr sinkToSource( + std::function fun, + std::function eof = []() { + throw EndOfFile("coroutine has finished"); + }); void writePadding(size_t len, Sink & sink); @@ -227,7 +234,7 @@ inline Sink & operator << (Sink & sink, uint64_t n) buf[4] = (n >> 32) & 0xff; buf[5] = (n >> 40) & 0xff; buf[6] = (n >> 48) & 0xff; - buf[7] = (n >> 56) & 0xff; + buf[7] = (unsigned char) (n >> 56) & 0xff; sink(buf, sizeof(buf)); return sink; } @@ -259,7 +266,7 @@ T readNum(Source & source) if (n > std::numeric_limits::max()) throw SerialisationError("serialised integer %d is too large for type '%s'", n, typeid(T).name()); - return n; + return (T) n; } @@ -277,7 +284,7 @@ inline uint64_t readLongLong(Source & source) void readPadding(size_t len, Source & source); size_t readString(unsigned char * buf, size_t max, Source & source); -string readString(Source & source); +string readString(Source & source, size_t max = std::numeric_limits::max()); template T readStrings(Source & source); Source & operator >> (Source & in, string & s); diff --git a/src/libutil/sync.hh b/src/libutil/sync.hh index 3b2710f6fd3..e1d591d77a8 100644 --- a/src/libutil/sync.hh +++ b/src/libutil/sync.hh @@ -57,11 +57,11 @@ public: } template - void wait_for(std::condition_variable & cv, + std::cv_status wait_for(std::condition_variable & cv, const std::chrono::duration & duration) { assert(s); - cv.wait_for(lk, duration); + return cv.wait_for(lk, duration); } template diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 15962236ec6..7eca35577b0 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -167,7 +167,7 @@ Path dirOf(const Path & path) { Path::size_type pos = path.rfind('/'); if (pos == string::npos) - throw Error(format("invalid file name '%1%'") % path); + return "."; return pos == 0 ? "/" : Path(path, 0, pos); } @@ -202,7 +202,7 @@ bool isInDir(const Path & path, const Path & dir) bool isDirOrInDir(const Path & path, const Path & dir) { - return path == dir or isInDir(path, dir); + return path == dir || isInDir(path, dir); } @@ -311,6 +311,14 @@ string readFile(const Path & path, bool drain) } +void readFile(const Path & path, Sink & sink) +{ + AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); + if (!fd) throw SysError("opening file '%s'", path); + drainFD(fd.get(), sink); +} + + void writeFile(const Path & path, const string & s, mode_t mode) { AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); @@ -320,6 +328,23 @@ void writeFile(const Path & path, const string & s, mode_t mode) } +void writeFile(const Path & path, Source & source, mode_t mode) +{ + AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); + if (!fd) + throw SysError(format("opening file '%1%'") % path); + + std::vector buf(64 * 1024); + + while (true) { + try { + auto n = source.read(buf.data(), buf.size()); + writeFull(fd.get(), (unsigned char *) buf.data(), n); + } catch (EndOfFile &) { break; } + } +} + + string readLine(int fd) { string s; @@ -443,7 +468,7 @@ static Lazy getHome2([]() { std::vector buf(16384); struct passwd pwbuf; struct passwd * pw; - if (getpwuid_r(getuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0 + if (getpwuid_r(geteuid(), &pwbuf, buf.data(), buf.size(), &pw) != 0 || !pw || !pw->pw_dir || !pw->pw_dir[0]) throw Error("cannot determine user's home directory"); homeDir = pw->pw_dir; @@ -471,6 +496,15 @@ Path getConfigDir() return configDir; } +std::vector getConfigDirs() +{ + Path configHome = getConfigDir(); + string configDirs = getEnv("XDG_CONFIG_DIRS"); + std::vector result = tokenizeString>(configDirs, ":"); + result.insert(result.begin(), configHome); + return result; +} + Path getDataDir() { @@ -593,7 +627,7 @@ void drainFD(int fd, Sink & sink, bool block) throw SysError("making file descriptor non-blocking"); } - std::vector buf(4096); + std::vector buf(64 * 1024); while (1) { checkInterrupt(); ssize_t rd = read(fd, buf.data(), buf.size()); diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 743d238611f..bda87bee433 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -15,6 +15,7 @@ #include #include #include +#include #ifndef HAVE_STRUCT_DIRENT_D_TYPE #define DT_UNKNOWN 0 @@ -97,10 +98,13 @@ unsigned char getFileType(const Path & path); /* Read the contents of a file into a string. */ string readFile(int fd); string readFile(const Path & path, bool drain = false); +void readFile(const Path & path, Sink & sink); /* Write a string to a file. */ void writeFile(const Path & path, const string & s, mode_t mode = 0666); +void writeFile(const Path & path, Source & source, mode_t mode = 0666); + /* Read a line from a file descriptor. */ string readLine(int fd); @@ -127,6 +131,9 @@ Path getCacheDir(); /* Return $XDG_CONFIG_HOME or $HOME/.config. */ Path getConfigDir(); +/* Return the directories to search for user configuration files */ +std::vector getConfigDirs(); + /* Return $XDG_DATA_HOME or $HOME/.local/share. */ Path getDataDir(); @@ -424,44 +431,30 @@ string get(const T & map, const string & key, const string & def = "") } -/* Call ‘failure’ with the current exception as argument. If ‘failure’ - throws an exception, abort the program. */ -void callFailure(const std::function & failure, - std::exception_ptr exc = std::current_exception()); +/* A callback is a wrapper around a lambda that accepts a valid of + type T or an exception. (We abuse std::future to pass the value or + exception.) */ +template +struct Callback +{ + std::function)> fun; + Callback(std::function)> fun) : fun(fun) { } -/* Evaluate the function ‘f’. If it returns a value, call ‘success’ - with that value as its argument. If it or ‘success’ throws an - exception, call ‘failure’. If ‘failure’ throws an exception, abort - the program. */ -template -void sync2async( - const std::function & success, - const std::function & failure, - const std::function & f) -{ - try { - success(f()); - } catch (...) { - callFailure(failure); + void operator()(T && t) const + { + std::promise promise; + promise.set_value(std::move(t)); + fun(promise.get_future()); } -} - -/* Call the function ‘success’. If it throws an exception, call - ‘failure’. If that throws an exception, abort the program. */ -template -void callSuccess( - const std::function & success, - const std::function & failure, - T && arg) -{ - try { - success(arg); - } catch (...) { - callFailure(failure); + void rethrow(const std::exception_ptr & exc = std::current_exception()) const + { + std::promise promise; + promise.set_exception(exc); + fun(promise.get_future()); } -} +}; /* Start a thread that handles various signals. Also block those signals diff --git a/src/libutil/xml-writer.cc b/src/libutil/xml-writer.cc index 98bd058d18b..e5cc2e9fc71 100644 --- a/src/libutil/xml-writer.cc +++ b/src/libutil/xml-writer.cc @@ -28,7 +28,7 @@ void XMLWriter::close() } -void XMLWriter::indent_(unsigned int depth) +void XMLWriter::indent_(size_t depth) { if (!indent) return; output << string(depth * 2, ' '); @@ -75,7 +75,7 @@ void XMLWriter::writeAttrs(const XMLAttrs & attrs) { for (auto & i : attrs) { output << " " << i.first << "=\""; - for (unsigned int j = 0; j < i.second.size(); ++j) { + for (size_t j = 0; j < i.second.size(); ++j) { char c = i.second[j]; if (c == '"') output << """; else if (c == '<') output << "<"; diff --git a/src/libutil/xml-writer.hh b/src/libutil/xml-writer.hh index 3cefe3712c0..b98b445265a 100644 --- a/src/libutil/xml-writer.hh +++ b/src/libutil/xml-writer.hh @@ -44,7 +44,7 @@ public: private: void writeAttrs(const XMLAttrs & attrs); - void indent_(unsigned int depth); + void indent_(size_t depth); }; diff --git a/src/linenoise/ConvertUTF.cpp b/src/linenoise/ConvertUTF.cpp deleted file mode 100644 index f7e5915d5e8..00000000000 --- a/src/linenoise/ConvertUTF.cpp +++ /dev/null @@ -1,542 +0,0 @@ -/* - * Copyright 2001-2004 Unicode, Inc. - * - * Disclaimer - * - * This source code is provided as is by Unicode, Inc. No claims are - * made as to fitness for any particular purpose. No warranties of any - * kind are expressed or implied. The recipient agrees to determine - * applicability of information provided. If this file has been - * purchased on magnetic or optical media from Unicode, Inc., the - * sole remedy for any claim will be exchange of defective media - * within 90 days of receipt. - * - * Limitations on Rights to Redistribute This Code - * - * Unicode, Inc. hereby grants the right to freely use the information - * supplied in this file in the creation of products supporting the - * Unicode Standard, and to make copies of this file in any form - * for internal or external distribution as long as this notice - * remains attached. - */ - -/* --------------------------------------------------------------------- - - Conversions between UTF32, UTF-16, and UTF-8. Source code file. - Author: Mark E. Davis, 1994. - Rev History: Rick McGowan, fixes & updates May 2001. - Sept 2001: fixed const & error conditions per - mods suggested by S. Parent & A. Lillich. - June 2002: Tim Dodd added detection and handling of incomplete - source sequences, enhanced error detection, added casts - to eliminate compiler warnings. - July 2003: slight mods to back out aggressive FFFE detection. - Jan 2004: updated switches in from-UTF8 conversions. - Oct 2004: updated to use UNI_MAX_LEGAL_UTF32 in UTF-32 conversions. - - See the header file "ConvertUTF.h" for complete documentation. - ------------------------------------------------------------------------- */ - -#include "ConvertUTF.h" -#ifdef CVTUTF_DEBUG -#include -#endif - -namespace linenoise_ng { - -static const int halfShift = 10; /* used for shifting by 10 bits */ - -static const UTF32 halfBase = 0x0010000UL; -static const UTF32 halfMask = 0x3FFUL; - -#define UNI_SUR_HIGH_START (UTF32)0xD800 -#define UNI_SUR_HIGH_END (UTF32)0xDBFF -#define UNI_SUR_LOW_START (UTF32)0xDC00 -#define UNI_SUR_LOW_END (UTF32)0xDFFF -#define false 0 -#define true 1 - -/* --------------------------------------------------------------------- */ - -ConversionResult ConvertUTF32toUTF16 ( - const UTF32** sourceStart, const UTF32* sourceEnd, - char16_t** targetStart, char16_t* targetEnd, ConversionFlags flags) { - ConversionResult result = conversionOK; - const UTF32* source = *sourceStart; - char16_t* target = *targetStart; - while (source < sourceEnd) { - UTF32 ch; - if (target >= targetEnd) { - result = targetExhausted; break; - } - ch = *source++; - if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ - /* UTF-16 surrogate values are illegal in UTF-32; 0xffff or 0xfffe are both reserved values */ - if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { - if (flags == strictConversion) { - --source; /* return to the illegal value itself */ - result = sourceIllegal; - break; - } else { - *target++ = UNI_REPLACEMENT_CHAR; - } - } else { - *target++ = (UTF16)ch; /* normal case */ - } - } else if (ch > UNI_MAX_LEGAL_UTF32) { - if (flags == strictConversion) { - result = sourceIllegal; - } else { - *target++ = UNI_REPLACEMENT_CHAR; - } - } else { - /* target is a character in range 0xFFFF - 0x10FFFF. */ - if (target + 1 >= targetEnd) { - --source; /* Back up source pointer! */ - result = targetExhausted; break; - } - ch -= halfBase; - *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START); - *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START); - } - } - *sourceStart = source; - *targetStart = target; - return result; -} - -/* --------------------------------------------------------------------- */ - -ConversionResult ConvertUTF16toUTF32 ( - const UTF16** sourceStart, const UTF16* sourceEnd, - UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) { - ConversionResult result = conversionOK; - const UTF16* source = *sourceStart; - UTF32* target = *targetStart; - UTF32 ch, ch2; - while (source < sourceEnd) { - const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */ - ch = *source++; - /* If we have a surrogate pair, convert to UTF32 first. */ - if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) { - /* If the 16 bits following the high surrogate are in the source buffer... */ - if (source < sourceEnd) { - ch2 = *source; - /* If it's a low surrogate, convert to UTF32. */ - if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) { - ch = ((ch - UNI_SUR_HIGH_START) << halfShift) - + (ch2 - UNI_SUR_LOW_START) + halfBase; - ++source; - } else if (flags == strictConversion) { /* it's an unpaired high surrogate */ - --source; /* return to the illegal value itself */ - result = sourceIllegal; - break; - } - } else { /* We don't have the 16 bits following the high surrogate. */ - --source; /* return to the high surrogate */ - result = sourceExhausted; - break; - } - } else if (flags == strictConversion) { - /* UTF-16 surrogate values are illegal in UTF-32 */ - if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) { - --source; /* return to the illegal value itself */ - result = sourceIllegal; - break; - } - } - if (target >= targetEnd) { - source = oldSource; /* Back up source pointer! */ - result = targetExhausted; break; - } - *target++ = ch; - } - *sourceStart = source; - *targetStart = target; -#ifdef CVTUTF_DEBUG -if (result == sourceIllegal) { - fprintf(stderr, "ConvertUTF16toUTF32 illegal seq 0x%04x,%04x\n", ch, ch2); - fflush(stderr); -} -#endif - return result; -} - -/* --------------------------------------------------------------------- */ - -/* - * Index into the table below with the first byte of a UTF-8 sequence to - * get the number of trailing bytes that are supposed to follow it. - * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is - * left as-is for anyone who may want to do such conversion, which was - * allowed in earlier algorithms. - */ -static const char trailingBytesForUTF8[256] = { - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 -}; - -/* - * Magic values subtracted from a buffer value during UTF8 conversion. - * This table contains as many values as there might be trailing bytes - * in a UTF-8 sequence. - */ -static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, - 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; - -/* - * Once the bits are split out into bytes of UTF-8, this is a mask OR-ed - * into the first byte, depending on how many bytes follow. There are - * as many entries in this table as there are UTF-8 sequence types. - * (I.e., one byte sequence, two byte... etc.). Remember that sequencs - * for *legal* UTF-8 will be 4 or fewer bytes total. - */ -static const UTF8 firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC }; - -/* --------------------------------------------------------------------- */ - -/* The interface converts a whole buffer to avoid function-call overhead. - * Constants have been gathered. Loops & conditionals have been removed as - * much as possible for efficiency, in favor of drop-through switches. - * (See "Note A" at the bottom of the file for equivalent code.) - * If your compiler supports it, the "isLegalUTF8" call can be turned - * into an inline function. - */ - -/* --------------------------------------------------------------------- */ - -ConversionResult ConvertUTF16toUTF8 ( - const UTF16** sourceStart, const UTF16* sourceEnd, - UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) { - ConversionResult result = conversionOK; - const UTF16* source = *sourceStart; - UTF8* target = *targetStart; - while (source < sourceEnd) { - UTF32 ch; - unsigned short bytesToWrite = 0; - const UTF32 byteMask = 0xBF; - const UTF32 byteMark = 0x80; - const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */ - ch = *source++; - /* If we have a surrogate pair, convert to UTF32 first. */ - if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) { - /* If the 16 bits following the high surrogate are in the source buffer... */ - if (source < sourceEnd) { - UTF32 ch2 = *source; - /* If it's a low surrogate, convert to UTF32. */ - if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) { - ch = ((ch - UNI_SUR_HIGH_START) << halfShift) - + (ch2 - UNI_SUR_LOW_START) + halfBase; - ++source; - } else if (flags == strictConversion) { /* it's an unpaired high surrogate */ - --source; /* return to the illegal value itself */ - result = sourceIllegal; - break; - } - } else { /* We don't have the 16 bits following the high surrogate. */ - --source; /* return to the high surrogate */ - result = sourceExhausted; - break; - } - } else if (flags == strictConversion) { - /* UTF-16 surrogate values are illegal in UTF-32 */ - if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) { - --source; /* return to the illegal value itself */ - result = sourceIllegal; - break; - } - } - /* Figure out how many bytes the result will require */ - if (ch < (UTF32)0x80) { bytesToWrite = 1; - } else if (ch < (UTF32)0x800) { bytesToWrite = 2; - } else if (ch < (UTF32)0x10000) { bytesToWrite = 3; - } else if (ch < (UTF32)0x110000) { bytesToWrite = 4; - } else { bytesToWrite = 3; - ch = UNI_REPLACEMENT_CHAR; - } - - target += bytesToWrite; - if (target > targetEnd) { - source = oldSource; /* Back up source pointer! */ - target -= bytesToWrite; result = targetExhausted; break; - } - switch (bytesToWrite) { /* note: everything falls through. */ - case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; - case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; - case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; - case 1: *--target = (UTF8)(ch | firstByteMark[bytesToWrite]); - } - target += bytesToWrite; - } - *sourceStart = source; - *targetStart = target; - return result; -} - -/* --------------------------------------------------------------------- */ - -/* - * Utility routine to tell whether a sequence of bytes is legal UTF-8. - * This must be called with the length pre-determined by the first byte. - * If not calling this from ConvertUTF8to*, then the length can be set by: - * length = trailingBytesForUTF8[*source]+1; - * and the sequence is illegal right away if there aren't that many bytes - * available. - * If presented with a length > 4, this returns false. The Unicode - * definition of UTF-8 goes up to 4-byte sequences. - */ - -static Boolean isLegalUTF8(const UTF8 *source, int length) { - UTF8 a; - const UTF8 *srcptr = source+length; - switch (length) { - default: return false; - /* Everything else falls through when "true"... */ - case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; - case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; - case 2: if ((a = (*--srcptr)) > 0xBF) return false; - - switch (*source) { - /* no fall-through in this inner switch */ - case 0xE0: if (a < 0xA0) return false; break; - case 0xED: if (a > 0x9F) return false; break; - case 0xF0: if (a < 0x90) return false; break; - case 0xF4: if (a > 0x8F) return false; break; - default: if (a < 0x80) return false; - } - - case 1: if (*source >= 0x80 && *source < 0xC2) return false; - } - if (*source > 0xF4) return false; - return true; -} - -/* --------------------------------------------------------------------- */ - -/* - * Exported function to return whether a UTF-8 sequence is legal or not. - * This is not used here; it's just exported. - */ -Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) { - int length = trailingBytesForUTF8[*source]+1; - if (source+length > sourceEnd) { - return false; - } - return isLegalUTF8(source, length); -} - -/* --------------------------------------------------------------------- */ - -ConversionResult ConvertUTF8toUTF16 ( - const UTF8** sourceStart, const UTF8* sourceEnd, - UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) { - ConversionResult result = conversionOK; - const UTF8* source = *sourceStart; - UTF16* target = *targetStart; - while (source < sourceEnd) { - UTF32 ch = 0; - unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; - if (source + extraBytesToRead >= sourceEnd) { - result = sourceExhausted; break; - } - /* Do this check whether lenient or strict */ - if (! isLegalUTF8(source, extraBytesToRead+1)) { - result = sourceIllegal; - break; - } - /* - * The cases all fall through. See "Note A" below. - */ - switch (extraBytesToRead) { - case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ - case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ - case 3: ch += *source++; ch <<= 6; - case 2: ch += *source++; ch <<= 6; - case 1: ch += *source++; ch <<= 6; - case 0: ch += *source++; - } - ch -= offsetsFromUTF8[extraBytesToRead]; - - if (target >= targetEnd) { - source -= (extraBytesToRead+1); /* Back up source pointer! */ - result = targetExhausted; break; - } - if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ - /* UTF-16 surrogate values are illegal in UTF-32 */ - if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { - if (flags == strictConversion) { - source -= (extraBytesToRead+1); /* return to the illegal value itself */ - result = sourceIllegal; - break; - } else { - *target++ = UNI_REPLACEMENT_CHAR; - } - } else { - *target++ = (UTF16)ch; /* normal case */ - } - } else if (ch > UNI_MAX_UTF16) { - if (flags == strictConversion) { - result = sourceIllegal; - source -= (extraBytesToRead+1); /* return to the start */ - break; /* Bail out; shouldn't continue */ - } else { - *target++ = UNI_REPLACEMENT_CHAR; - } - } else { - /* target is a character in range 0xFFFF - 0x10FFFF. */ - if (target + 1 >= targetEnd) { - source -= (extraBytesToRead+1); /* Back up source pointer! */ - result = targetExhausted; break; - } - ch -= halfBase; - *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START); - *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START); - } - } - *sourceStart = source; - *targetStart = target; - return result; -} - -/* --------------------------------------------------------------------- */ - -ConversionResult ConvertUTF32toUTF8 ( - const UTF32** sourceStart, const UTF32* sourceEnd, - UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) { - ConversionResult result = conversionOK; - const UTF32* source = *sourceStart; - UTF8* target = *targetStart; - while (source < sourceEnd) { - UTF32 ch; - unsigned short bytesToWrite = 0; - const UTF32 byteMask = 0xBF; - const UTF32 byteMark = 0x80; - ch = *source++; - if (flags == strictConversion ) { - /* UTF-16 surrogate values are illegal in UTF-32 */ - if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { - --source; /* return to the illegal value itself */ - result = sourceIllegal; - break; - } - } - /* - * Figure out how many bytes the result will require. Turn any - * illegally large UTF32 things (> Plane 17) into replacement chars. - */ - if (ch < (UTF32)0x80) { bytesToWrite = 1; - } else if (ch < (UTF32)0x800) { bytesToWrite = 2; - } else if (ch < (UTF32)0x10000) { bytesToWrite = 3; - } else if (ch <= UNI_MAX_LEGAL_UTF32) { bytesToWrite = 4; - } else { bytesToWrite = 3; - ch = UNI_REPLACEMENT_CHAR; - result = sourceIllegal; - } - - target += bytesToWrite; - if (target > targetEnd) { - --source; /* Back up source pointer! */ - target -= bytesToWrite; result = targetExhausted; break; - } - switch (bytesToWrite) { /* note: everything falls through. */ - case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; - case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; - case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6; - case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]); - } - target += bytesToWrite; - } - *sourceStart = source; - *targetStart = target; - return result; -} - -/* --------------------------------------------------------------------- */ - -ConversionResult ConvertUTF8toUTF32 ( - const UTF8** sourceStart, const UTF8* sourceEnd, - UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) { - ConversionResult result = conversionOK; - const UTF8* source = *sourceStart; - UTF32* target = *targetStart; - while (source < sourceEnd) { - UTF32 ch = 0; - unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; - if (source + extraBytesToRead >= sourceEnd) { - result = sourceExhausted; break; - } - /* Do this check whether lenient or strict */ - if (! isLegalUTF8(source, extraBytesToRead+1)) { - result = sourceIllegal; - break; - } - /* - * The cases all fall through. See "Note A" below. - */ - switch (extraBytesToRead) { - case 5: ch += *source++; ch <<= 6; - case 4: ch += *source++; ch <<= 6; - case 3: ch += *source++; ch <<= 6; - case 2: ch += *source++; ch <<= 6; - case 1: ch += *source++; ch <<= 6; - case 0: ch += *source++; - } - ch -= offsetsFromUTF8[extraBytesToRead]; - - if (target >= targetEnd) { - source -= (extraBytesToRead+1); /* Back up the source pointer! */ - result = targetExhausted; break; - } - if (ch <= UNI_MAX_LEGAL_UTF32) { - /* - * UTF-16 surrogate values are illegal in UTF-32, and anything - * over Plane 17 (> 0x10FFFF) is illegal. - */ - if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { - if (flags == strictConversion) { - source -= (extraBytesToRead+1); /* return to the illegal value itself */ - result = sourceIllegal; - break; - } else { - *target++ = UNI_REPLACEMENT_CHAR; - } - } else { - *target++ = ch; - } - } else { /* i.e., ch > UNI_MAX_LEGAL_UTF32 */ - result = sourceIllegal; - *target++ = UNI_REPLACEMENT_CHAR; - } - } - *sourceStart = source; - *targetStart = target; - return result; -} - -} - -/* --------------------------------------------------------------------- - - Note A. - The fall-through switches in UTF-8 reading code save a - temp variable, some decrements & conditionals. The switches - are equivalent to the following loop: - { - int tmpBytesToRead = extraBytesToRead+1; - do { - ch += *source++; - --tmpBytesToRead; - if (tmpBytesToRead) ch <<= 6; - } while (tmpBytesToRead > 0); - } - In UTF-8 writing code, the switches on "bytesToWrite" are - similarly unrolled loops. - - --------------------------------------------------------------------- */ diff --git a/src/linenoise/ConvertUTF.h b/src/linenoise/ConvertUTF.h deleted file mode 100755 index 8a296235dcd..00000000000 --- a/src/linenoise/ConvertUTF.h +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2001-2004 Unicode, Inc. - * - * Disclaimer - * - * This source code is provided as is by Unicode, Inc. No claims are - * made as to fitness for any particular purpose. No warranties of any - * kind are expressed or implied. The recipient agrees to determine - * applicability of information provided. If this file has been - * purchased on magnetic or optical media from Unicode, Inc., the - * sole remedy for any claim will be exchange of defective media - * within 90 days of receipt. - * - * Limitations on Rights to Redistribute This Code - * - * Unicode, Inc. hereby grants the right to freely use the information - * supplied in this file in the creation of products supporting the - * Unicode Standard, and to make copies of this file in any form - * for internal or external distribution as long as this notice - * remains attached. - */ - -/* --------------------------------------------------------------------- - - Conversions between UTF32, UTF-16, and UTF-8. Header file. - - Several funtions are included here, forming a complete set of - conversions between the three formats. UTF-7 is not included - here, but is handled in a separate source file. - - Each of these routines takes pointers to input buffers and output - buffers. The input buffers are const. - - Each routine converts the text between *sourceStart and sourceEnd, - putting the result into the buffer between *targetStart and - targetEnd. Note: the end pointers are *after* the last item: e.g. - *(sourceEnd - 1) is the last item. - - The return result indicates whether the conversion was successful, - and if not, whether the problem was in the source or target buffers. - (Only the first encountered problem is indicated.) - - After the conversion, *sourceStart and *targetStart are both - updated to point to the end of last text successfully converted in - the respective buffers. - - Input parameters: - sourceStart - pointer to a pointer to the source buffer. - The contents of this are modified on return so that - it points at the next thing to be converted. - targetStart - similarly, pointer to pointer to the target buffer. - sourceEnd, targetEnd - respectively pointers to the ends of the - two buffers, for overflow checking only. - - These conversion functions take a ConversionFlags argument. When this - flag is set to strict, both irregular sequences and isolated surrogates - will cause an error. When the flag is set to lenient, both irregular - sequences and isolated surrogates are converted. - - Whether the flag is strict or lenient, all illegal sequences will cause - an error return. This includes sequences such as: , , - or in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code - must check for illegal sequences. - - When the flag is set to lenient, characters over 0x10FFFF are converted - to the replacement character; otherwise (when the flag is set to strict) - they constitute an error. - - Output parameters: - The value "sourceIllegal" is returned from some routines if the input - sequence is malformed. When "sourceIllegal" is returned, the source - value will point to the illegal value that caused the problem. E.g., - in UTF-8 when a sequence is malformed, it points to the start of the - malformed sequence. - - Author: Mark E. Davis, 1994. - Rev History: Rick McGowan, fixes & updates May 2001. - Fixes & updates, Sept 2001. - ------------------------------------------------------------------------- */ - -/* --------------------------------------------------------------------- - The following 4 definitions are compiler-specific. - The C standard does not guarantee that wchar_t has at least - 16 bits, so wchar_t is no less portable than unsigned short! - All should be unsigned values to avoid sign extension during - bit mask & shift operations. ------------------------------------------------------------------------- */ - -#if 0 -typedef unsigned long UTF32; /* at least 32 bits */ -typedef unsigned short UTF16; /* at least 16 bits */ -typedef unsigned char UTF8; /* typically 8 bits */ -#endif - -#include -#include - -namespace linenoise_ng { - -typedef uint32_t UTF32; -typedef uint16_t UTF16; -typedef uint8_t UTF8; -typedef unsigned char Boolean; /* 0 or 1 */ - -/* Some fundamental constants */ -#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD -#define UNI_MAX_BMP (UTF32)0x0000FFFF -#define UNI_MAX_UTF16 (UTF32)0x0010FFFF -#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF -#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF - -typedef enum { - conversionOK, /* conversion successful */ - sourceExhausted, /* partial character in source, but hit end */ - targetExhausted, /* insuff. room in target for conversion */ - sourceIllegal /* source sequence is illegal/malformed */ -} ConversionResult; - -typedef enum { - strictConversion = 0, - lenientConversion -} ConversionFlags; - -// /* This is for C++ and does no harm in C */ -// #ifdef __cplusplus -// extern "C" { -// #endif - -ConversionResult ConvertUTF8toUTF16 ( - const UTF8** sourceStart, const UTF8* sourceEnd, - UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags); - -ConversionResult ConvertUTF16toUTF8 ( - const UTF16** sourceStart, const UTF16* sourceEnd, - UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags); - -ConversionResult ConvertUTF8toUTF32 ( - const UTF8** sourceStart, const UTF8* sourceEnd, - UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags); - -ConversionResult ConvertUTF32toUTF8 ( - const UTF32** sourceStart, const UTF32* sourceEnd, - UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags); - -ConversionResult ConvertUTF16toUTF32 ( - const UTF16** sourceStart, const UTF16* sourceEnd, - UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags); - -ConversionResult ConvertUTF32toUTF16 ( - const UTF32** sourceStart, const UTF32* sourceEnd, - char16_t** targetStart, char16_t* targetEnd, ConversionFlags flags); - -Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd); - -// #ifdef __cplusplus -// } -// #endif - -} - -/* --------------------------------------------------------------------- */ diff --git a/src/linenoise/LICENSE b/src/linenoise/LICENSE deleted file mode 100644 index b7c58c44586..00000000000 --- a/src/linenoise/LICENSE +++ /dev/null @@ -1,66 +0,0 @@ -linenoise.cpp -============= - -Copyright (c) 2010, Salvatore Sanfilippo -Copyright (c) 2010, Pieter Noordhuis - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Redis nor the names of its contributors may be used - to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - - -wcwidth.cpp -=========== - -Markus Kuhn -- 2007-05-26 (Unicode 5.0) - -Permission to use, copy, modify, and distribute this software -for any purpose and without fee is hereby granted. The author -disclaims all warranties with regard to this software. - - - -ConvertUTF.cpp -============== - -Copyright 2001-2004 Unicode, Inc. - -Disclaimer - -This source code is provided as is by Unicode, Inc. No claims are -made as to fitness for any particular purpose. No warranties of any -kind are expressed or implied. The recipient agrees to determine -applicability of information provided. If this file has been -purchased on magnetic or optical media from Unicode, Inc., the -sole remedy for any claim will be exchange of defective media -within 90 days of receipt. - -Limitations on Rights to Redistribute This Code - -Unicode, Inc. hereby grants the right to freely use the information -supplied in this file in the creation of products supporting the -Unicode Standard, and to make copies of this file in any form -for internal or external distribution as long as this notice -remains attached. diff --git a/src/linenoise/linenoise.cpp b/src/linenoise/linenoise.cpp deleted file mode 100644 index c57505d2fa9..00000000000 --- a/src/linenoise/linenoise.cpp +++ /dev/null @@ -1,3450 +0,0 @@ -/* linenoise.c -- guerrilla line editing library against the idea that a - * line editing lib needs to be 20,000 lines of C code. - * - * Copyright (c) 2010, Salvatore Sanfilippo - * Copyright (c) 2010, Pieter Noordhuis - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * line editing lib needs to be 20,000 lines of C code. - * - * You can find the latest source code at: - * - * http://github.com/antirez/linenoise - * - * Does a number of crazy assumptions that happen to be true in 99.9999% of - * the 2010 UNIX computers around. - * - * References: - * - http://invisible-island.net/xterm/ctlseqs/ctlseqs.html - * - http://www.3waylabs.com/nw/WWW/products/wizcon/vt220.html - * - * Todo list: - * - Switch to gets() if $TERM is something we can't support. - * - Filter bogus Ctrl+ combinations. - * - Win32 support - * - * Bloat: - * - Completion? - * - History search like Ctrl+r in readline? - * - * List of escape sequences used by this program, we do everything just - * with three sequences. In order to be so cheap we may have some - * flickering effect with some slow terminal, but the lesser sequences - * the more compatible. - * - * CHA (Cursor Horizontal Absolute) - * Sequence: ESC [ n G - * Effect: moves cursor to column n (1 based) - * - * EL (Erase Line) - * Sequence: ESC [ n K - * Effect: if n is 0 or missing, clear from cursor to end of line - * Effect: if n is 1, clear from beginning of line to cursor - * Effect: if n is 2, clear entire line - * - * CUF (Cursor Forward) - * Sequence: ESC [ n C - * Effect: moves cursor forward of n chars - * - * The following are used to clear the screen: ESC [ H ESC [ 2 J - * This is actually composed of two sequences: - * - * cursorhome - * Sequence: ESC [ H - * Effect: moves the cursor to upper left corner - * - * ED2 (Clear entire screen) - * Sequence: ESC [ 2 J - * Effect: clear the whole screen - * - */ - -#ifdef _WIN32 - -#include -#include -#include - -#if defined(_MSC_VER) && _MSC_VER < 1900 -#define snprintf _snprintf // Microsoft headers use underscores in some names -#endif - -#if !defined GNUC -#define strcasecmp _stricmp -#endif - -#define strdup _strdup -#define isatty _isatty -#define write _write -#define STDIN_FILENO 0 - -#else /* _WIN32 */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#endif /* _WIN32 */ - -#include -#include -#include - -#include "linenoise.h" -#include "ConvertUTF.h" - -#include -#include -#include - -using std::string; -using std::vector; -using std::unique_ptr; -using namespace linenoise_ng; - -typedef unsigned char char8_t; - -static ConversionResult copyString8to32(char32_t* dst, size_t dstSize, - size_t& dstCount, const char* src) { - const UTF8* sourceStart = reinterpret_cast(src); - const UTF8* sourceEnd = sourceStart + strlen(src); - UTF32* targetStart = reinterpret_cast(dst); - UTF32* targetEnd = targetStart + dstSize; - - ConversionResult res = ConvertUTF8toUTF32( - &sourceStart, sourceEnd, &targetStart, targetEnd, lenientConversion); - - if (res == conversionOK) { - dstCount = targetStart - reinterpret_cast(dst); - - if (dstCount < dstSize) { - *targetStart = 0; - } - } - - return res; -} - -static ConversionResult copyString8to32(char32_t* dst, size_t dstSize, - size_t& dstCount, const char8_t* src) { - return copyString8to32(dst, dstSize, dstCount, - reinterpret_cast(src)); -} - -static size_t strlen32(const char32_t* str) { - const char32_t* ptr = str; - - while (*ptr) { - ++ptr; - } - - return ptr - str; -} - -static size_t strlen8(const char8_t* str) { - return strlen(reinterpret_cast(str)); -} - -static char8_t* strdup8(const char* src) { - return reinterpret_cast(strdup(src)); -} - -#ifdef _WIN32 -static const int FOREGROUND_WHITE = - FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE; -static const int BACKGROUND_WHITE = - BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE; -static const int INTENSITY = FOREGROUND_INTENSITY | BACKGROUND_INTENSITY; - -class WinAttributes { - public: - WinAttributes() { - CONSOLE_SCREEN_BUFFER_INFO info; - GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &info); - _defaultAttribute = info.wAttributes & INTENSITY; - _defaultColor = info.wAttributes & FOREGROUND_WHITE; - _defaultBackground = info.wAttributes & BACKGROUND_WHITE; - - _consoleAttribute = _defaultAttribute; - _consoleColor = _defaultColor | _defaultBackground; - } - - public: - int _defaultAttribute; - int _defaultColor; - int _defaultBackground; - - int _consoleAttribute; - int _consoleColor; -}; - -static WinAttributes WIN_ATTR; - -static void copyString32to16(char16_t* dst, size_t dstSize, size_t* dstCount, - const char32_t* src, size_t srcSize) { - const UTF32* sourceStart = reinterpret_cast(src); - const UTF32* sourceEnd = sourceStart + srcSize; - char16_t* targetStart = reinterpret_cast(dst); - char16_t* targetEnd = targetStart + dstSize; - - ConversionResult res = ConvertUTF32toUTF16( - &sourceStart, sourceEnd, &targetStart, targetEnd, lenientConversion); - - if (res == conversionOK) { - *dstCount = targetStart - reinterpret_cast(dst); - - if (*dstCount < dstSize) { - *targetStart = 0; - } - } -} -#endif - -static void copyString32to8(char* dst, size_t dstSize, size_t* dstCount, - const char32_t* src, size_t srcSize) { - const UTF32* sourceStart = reinterpret_cast(src); - const UTF32* sourceEnd = sourceStart + srcSize; - UTF8* targetStart = reinterpret_cast(dst); - UTF8* targetEnd = targetStart + dstSize; - - ConversionResult res = ConvertUTF32toUTF8( - &sourceStart, sourceEnd, &targetStart, targetEnd, lenientConversion); - - if (res == conversionOK) { - *dstCount = targetStart - reinterpret_cast(dst); - - if (*dstCount < dstSize) { - *targetStart = 0; - } - } -} - -static void copyString32to8(char* dst, size_t dstLen, const char32_t* src) { - size_t dstCount = 0; - copyString32to8(dst, dstLen, &dstCount, src, strlen32(src)); -} - -static void copyString32(char32_t* dst, const char32_t* src, size_t len) { - while (0 < len && *src) { - *dst++ = *src++; - --len; - } - - *dst = 0; -} - -static int strncmp32(const char32_t* left, const char32_t* right, size_t len) { - while (0 < len && *left) { - if (*left != *right) { - return *left - *right; - } - - ++left; - ++right; - --len; - } - - return 0; -} - -#ifdef _WIN32 -#include - -static size_t OutputWin(char16_t* text16, char32_t* text32, size_t len32) { - size_t count16 = 0; - - copyString32to16(text16, len32, &count16, text32, len32); - WriteConsoleW(GetStdHandle(STD_OUTPUT_HANDLE), text16, - static_cast(count16), nullptr, nullptr); - - return count16; -} - -static char32_t* HandleEsc(char32_t* p, char32_t* end) { - if (*p == '[') { - int code = 0; - - for (++p; p < end; ++p) { - char32_t c = *p; - - if ('0' <= c && c <= '9') { - code = code * 10 + (c - '0'); - } else if (c == 'm' || c == ';') { - switch (code) { - case 0: - WIN_ATTR._consoleAttribute = WIN_ATTR._defaultAttribute; - WIN_ATTR._consoleColor = - WIN_ATTR._defaultColor | WIN_ATTR._defaultBackground; - break; - - case 1: // BOLD - case 5: // BLINK - WIN_ATTR._consoleAttribute = - (WIN_ATTR._defaultAttribute ^ FOREGROUND_INTENSITY) & INTENSITY; - break; - - case 30: - WIN_ATTR._consoleColor = BACKGROUND_WHITE; - break; - - case 31: - WIN_ATTR._consoleColor = - FOREGROUND_RED | WIN_ATTR._defaultBackground; - break; - - case 32: - WIN_ATTR._consoleColor = - FOREGROUND_GREEN | WIN_ATTR._defaultBackground; - break; - - case 33: - WIN_ATTR._consoleColor = - FOREGROUND_RED | FOREGROUND_GREEN | WIN_ATTR._defaultBackground; - break; - - case 34: - WIN_ATTR._consoleColor = - FOREGROUND_BLUE | WIN_ATTR._defaultBackground; - break; - - case 35: - WIN_ATTR._consoleColor = - FOREGROUND_BLUE | FOREGROUND_RED | WIN_ATTR._defaultBackground; - break; - - case 36: - WIN_ATTR._consoleColor = FOREGROUND_BLUE | FOREGROUND_GREEN | - WIN_ATTR._defaultBackground; - break; - - case 37: - WIN_ATTR._consoleColor = FOREGROUND_GREEN | FOREGROUND_RED | - FOREGROUND_BLUE | - WIN_ATTR._defaultBackground; - break; - } - - code = 0; - } - - if (*p == 'm') { - ++p; - break; - } - } - } else { - ++p; - } - - auto handle = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(handle, - WIN_ATTR._consoleAttribute | WIN_ATTR._consoleColor); - - return p; -} - -static size_t WinWrite32(char16_t* text16, char32_t* text32, size_t len32) { - char32_t* p = text32; - char32_t* q = p; - char32_t* e = text32 + len32; - size_t count16 = 0; - - while (p < e) { - if (*p == 27) { - if (q < p) { - count16 += OutputWin(text16, q, p - q); - } - - q = p = HandleEsc(p + 1, e); - } else { - ++p; - } - } - - if (q < p) { - count16 += OutputWin(text16, q, p - q); - } - - return count16; -} -#endif - -static int write32(int fd, char32_t* text32, int len32) { -#ifdef _WIN32 - if (isatty(fd)) { - size_t len16 = 2 * len32 + 1; - unique_ptr text16(new char16_t[len16]); - size_t count16 = WinWrite32(text16.get(), text32, len32); - - return static_cast(count16); - } else { - size_t len8 = 4 * len32 + 1; - unique_ptr text8(new char[len8]); - size_t count8 = 0; - - copyString32to8(text8.get(), len8, &count8, text32, len32); - - return write(fd, text8.get(), static_cast(count8)); - } -#else - size_t len8 = 4 * len32 + 1; - unique_ptr text8(new char[len8]); - size_t count8 = 0; - - copyString32to8(text8.get(), len8, &count8, text32, len32); - - return write(fd, text8.get(), count8); -#endif -} - -class Utf32String { - public: - Utf32String() : _length(0), _data(nullptr) { - // note: parens intentional, _data must be properly initialized - _data = new char32_t[1](); - } - - explicit Utf32String(const char* src) : _length(0), _data(nullptr) { - size_t len = strlen(src); - // note: parens intentional, _data must be properly initialized - _data = new char32_t[len + 1](); - copyString8to32(_data, len + 1, _length, src); - } - - explicit Utf32String(const char8_t* src) : _length(0), _data(nullptr) { - size_t len = strlen(reinterpret_cast(src)); - // note: parens intentional, _data must be properly initialized - _data = new char32_t[len + 1](); - copyString8to32(_data, len + 1, _length, src); - } - - explicit Utf32String(const char32_t* src) : _length(0), _data(nullptr) { - for (_length = 0; src[_length] != 0; ++_length) { - } - - // note: parens intentional, _data must be properly initialized - _data = new char32_t[_length + 1](); - memcpy(_data, src, _length * sizeof(char32_t)); - } - - explicit Utf32String(const char32_t* src, int len) : _length(len), _data(nullptr) { - // note: parens intentional, _data must be properly initialized - _data = new char32_t[len + 1](); - memcpy(_data, src, len * sizeof(char32_t)); - } - - explicit Utf32String(int len) : _length(0), _data(nullptr) { - // note: parens intentional, _data must be properly initialized - _data = new char32_t[len](); - } - - explicit Utf32String(const Utf32String& that) : _length(that._length), _data(nullptr) { - // note: parens intentional, _data must be properly initialized - _data = new char32_t[_length + 1](); - memcpy(_data, that._data, sizeof(char32_t) * _length); - } - - Utf32String& operator=(const Utf32String& that) { - if (this != &that) { - delete[] _data; - _data = new char32_t[that._length](); - _length = that._length; - memcpy(_data, that._data, sizeof(char32_t) * _length); - } - - return *this; - } - - ~Utf32String() { delete[] _data; } - - public: - char32_t* get() const { return _data; } - - size_t length() const { return _length; } - - size_t chars() const { return _length; } - - void initFromBuffer() { - for (_length = 0; _data[_length] != 0; ++_length) { - } - } - - const char32_t& operator[](size_t pos) const { return _data[pos]; } - - char32_t& operator[](size_t pos) { return _data[pos]; } - - private: - size_t _length; - char32_t* _data; -}; - -class Utf8String { - Utf8String(const Utf8String&) = delete; - Utf8String& operator=(const Utf8String&) = delete; - - public: - explicit Utf8String(const Utf32String& src) { - size_t len = src.length() * 4 + 1; - _data = new char[len]; - copyString32to8(_data, len, src.get()); - } - - ~Utf8String() { delete[] _data; } - - public: - char* get() const { return _data; } - - private: - char* _data; -}; - -struct linenoiseCompletions { - vector completionStrings; -}; - -#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100 -#define LINENOISE_MAX_LINE 4096 - -// make control-characters more readable -#define ctrlChar(upperCaseASCII) (upperCaseASCII - 0x40) - -/** - * Recompute widths of all characters in a char32_t buffer - * @param text input buffer of Unicode characters - * @param widths output buffer of character widths - * @param charCount number of characters in buffer - */ -namespace linenoise_ng { -int mk_wcwidth(char32_t ucs); -} - -static void recomputeCharacterWidths(const char32_t* text, char* widths, - int charCount) { - for (int i = 0; i < charCount; ++i) { - widths[i] = mk_wcwidth(text[i]); - } -} - -/** - * Calculate a new screen position given a starting position, screen width and - * character count - * @param x initial x position (zero-based) - * @param y initial y position (zero-based) - * @param screenColumns screen column count - * @param charCount character positions to advance - * @param xOut returned x position (zero-based) - * @param yOut returned y position (zero-based) - */ -static void calculateScreenPosition(int x, int y, int screenColumns, - int charCount, int& xOut, int& yOut) { - xOut = x; - yOut = y; - int charsRemaining = charCount; - while (charsRemaining > 0) { - int charsThisRow = (x + charsRemaining < screenColumns) ? charsRemaining - : screenColumns - x; - xOut = x + charsThisRow; - yOut = y; - charsRemaining -= charsThisRow; - x = 0; - ++y; - } - if (xOut == screenColumns) { // we have to special-case line wrap - xOut = 0; - ++yOut; - } -} - -/** - * Calculate a column width using mk_wcswidth() - * @param buf32 text to calculate - * @param len length of text to calculate - */ -namespace linenoise_ng { -int mk_wcswidth(const char32_t* pwcs, size_t n); -} - -static int calculateColumnPosition(char32_t* buf32, int len) { - int width = mk_wcswidth(reinterpret_cast(buf32), len); - if (width == -1) - return len; - else - return width; -} - -static bool isControlChar(char32_t testChar) { - return (testChar < ' ') || // C0 controls - (testChar >= 0x7F && testChar <= 0x9F); // DEL and C1 controls -} - -struct PromptBase { // a convenience struct for grouping prompt info - Utf32String promptText; // our copy of the prompt text, edited - char* promptCharWidths; // character widths from mk_wcwidth() - int promptChars; // chars in promptText - int promptBytes; // bytes in promptText - int promptExtraLines; // extra lines (beyond 1) occupied by prompt - int promptIndentation; // column offset to end of prompt - int promptLastLinePosition; // index into promptText where last line begins - int promptPreviousInputLen; // promptChars of previous input line, for - // clearing - int promptCursorRowOffset; // where the cursor is relative to the start of - // the prompt - int promptScreenColumns; // width of screen in columns - int promptPreviousLen; // help erasing - int promptErrorCode; // error code (invalid UTF-8) or zero - - PromptBase() : promptPreviousInputLen(0) {} - - bool write() { - if (write32(1, promptText.get(), promptBytes) == -1) return false; - - return true; - } -}; - -struct PromptInfo : public PromptBase { - PromptInfo(const char* textPtr, int columns) { - promptExtraLines = 0; - promptLastLinePosition = 0; - promptPreviousLen = 0; - promptScreenColumns = columns; - Utf32String tempUnicode(textPtr); - - // strip control characters from the prompt -- we do allow newline - char32_t* pIn = tempUnicode.get(); - char32_t* pOut = pIn; - - int len = 0; - int x = 0; - - bool const strip = (isatty(1) == 0); - - while (*pIn) { - char32_t c = *pIn; - if ('\n' == c || !isControlChar(c)) { - *pOut = c; - ++pOut; - ++pIn; - ++len; - if ('\n' == c || ++x >= promptScreenColumns) { - x = 0; - ++promptExtraLines; - promptLastLinePosition = len; - } - } else if (c == '\x1b') { - if (strip) { - // jump over control chars - ++pIn; - if (*pIn == '[') { - ++pIn; - while (*pIn && ((*pIn == ';') || ((*pIn >= '0' && *pIn <= '9')))) { - ++pIn; - } - if (*pIn == 'm') { - ++pIn; - } - } - } else { - // copy control chars - *pOut = *pIn; - ++pOut; - ++pIn; - if (*pIn == '[') { - *pOut = *pIn; - ++pOut; - ++pIn; - while (*pIn && ((*pIn == ';') || ((*pIn >= '0' && *pIn <= '9')))) { - *pOut = *pIn; - ++pOut; - ++pIn; - } - if (*pIn == 'm') { - *pOut = *pIn; - ++pOut; - ++pIn; - } - } - } - } else { - ++pIn; - } - } - *pOut = 0; - promptChars = len; - promptBytes = static_cast(pOut - tempUnicode.get()); - promptText = tempUnicode; - - promptIndentation = len - promptLastLinePosition; - promptCursorRowOffset = promptExtraLines; - } -}; - -// Used with DynamicPrompt (history search) -// -static const Utf32String forwardSearchBasePrompt("(i-search)`"); -static const Utf32String reverseSearchBasePrompt("(reverse-i-search)`"); -static const Utf32String endSearchBasePrompt("': "); -static Utf32String - previousSearchText; // remembered across invocations of linenoise() - -// changing prompt for "(reverse-i-search)`text':" etc. -// -struct DynamicPrompt : public PromptBase { - Utf32String searchText; // text we are searching for - char* searchCharWidths; // character widths from mk_wcwidth() - int searchTextLen; // chars in searchText - int direction; // current search direction, 1=forward, -1=reverse - - DynamicPrompt(PromptBase& pi, int initialDirection) - : searchTextLen(0), direction(initialDirection) { - promptScreenColumns = pi.promptScreenColumns; - promptCursorRowOffset = 0; - Utf32String emptyString(1); - searchText = emptyString; - const Utf32String* basePrompt = - (direction > 0) ? &forwardSearchBasePrompt : &reverseSearchBasePrompt; - size_t promptStartLength = basePrompt->length(); - promptChars = - static_cast(promptStartLength + endSearchBasePrompt.length()); - promptBytes = promptChars; - promptLastLinePosition = promptChars; // TODO fix this, we are asssuming - // that the history prompt won't wrap - // (!) - promptPreviousLen = promptChars; - Utf32String tempUnicode(promptChars + 1); - memcpy(tempUnicode.get(), basePrompt->get(), - sizeof(char32_t) * promptStartLength); - memcpy(&tempUnicode[promptStartLength], endSearchBasePrompt.get(), - sizeof(char32_t) * (endSearchBasePrompt.length() + 1)); - tempUnicode.initFromBuffer(); - promptText = tempUnicode; - calculateScreenPosition(0, 0, pi.promptScreenColumns, promptChars, - promptIndentation, promptExtraLines); - } - - void updateSearchPrompt(void) { - const Utf32String* basePrompt = - (direction > 0) ? &forwardSearchBasePrompt : &reverseSearchBasePrompt; - size_t promptStartLength = basePrompt->length(); - promptChars = static_cast(promptStartLength + searchTextLen + - endSearchBasePrompt.length()); - promptBytes = promptChars; - Utf32String tempUnicode(promptChars + 1); - memcpy(tempUnicode.get(), basePrompt->get(), - sizeof(char32_t) * promptStartLength); - memcpy(&tempUnicode[promptStartLength], searchText.get(), - sizeof(char32_t) * searchTextLen); - size_t endIndex = promptStartLength + searchTextLen; - memcpy(&tempUnicode[endIndex], endSearchBasePrompt.get(), - sizeof(char32_t) * (endSearchBasePrompt.length() + 1)); - tempUnicode.initFromBuffer(); - promptText = tempUnicode; - } - - void updateSearchText(const char32_t* textPtr) { - Utf32String tempUnicode(textPtr); - searchTextLen = static_cast(tempUnicode.chars()); - searchText = tempUnicode; - updateSearchPrompt(); - } -}; - -class KillRing { - static const int capacity = 10; - int size; - int index; - char indexToSlot[10]; - vector theRing; - - public: - enum action { actionOther, actionKill, actionYank }; - action lastAction; - size_t lastYankSize; - - KillRing() : size(0), index(0), lastAction(actionOther) { - theRing.reserve(capacity); - } - - void kill(const char32_t* text, int textLen, bool forward) { - if (textLen == 0) { - return; - } - Utf32String killedText(text, textLen); - if (lastAction == actionKill && size > 0) { - int slot = indexToSlot[0]; - int currentLen = static_cast(theRing[slot].length()); - int resultLen = currentLen + textLen; - Utf32String temp(resultLen + 1); - if (forward) { - memcpy(temp.get(), theRing[slot].get(), currentLen * sizeof(char32_t)); - memcpy(&temp[currentLen], killedText.get(), textLen * sizeof(char32_t)); - } else { - memcpy(temp.get(), killedText.get(), textLen * sizeof(char32_t)); - memcpy(&temp[textLen], theRing[slot].get(), - currentLen * sizeof(char32_t)); - } - temp[resultLen] = 0; - temp.initFromBuffer(); - theRing[slot] = temp; - } else { - if (size < capacity) { - if (size > 0) { - memmove(&indexToSlot[1], &indexToSlot[0], size); - } - indexToSlot[0] = size; - size++; - theRing.push_back(killedText); - } else { - int slot = indexToSlot[capacity - 1]; - theRing[slot] = killedText; - memmove(&indexToSlot[1], &indexToSlot[0], capacity - 1); - indexToSlot[0] = slot; - } - index = 0; - } - } - - Utf32String* yank() { return (size > 0) ? &theRing[indexToSlot[index]] : 0; } - - Utf32String* yankPop() { - if (size == 0) { - return 0; - } - ++index; - if (index == size) { - index = 0; - } - return &theRing[indexToSlot[index]]; - } -}; - -class InputBuffer { - char32_t* buf32; // input buffer - char* charWidths; // character widths from mk_wcwidth() - int buflen; // buffer size in characters - int len; // length of text in input buffer - int pos; // character position in buffer ( 0 <= pos <= len ) - - void clearScreen(PromptBase& pi); - int incrementalHistorySearch(PromptBase& pi, int startChar); - int completeLine(PromptBase& pi); - void refreshLine(PromptBase& pi); - - public: - InputBuffer(char32_t* buffer, char* widthArray, int bufferLen) - : buf32(buffer), - charWidths(widthArray), - buflen(bufferLen - 1), - len(0), - pos(0) { - buf32[0] = 0; - } - void preloadBuffer(const char* preloadText) { - size_t ucharCount = 0; - copyString8to32(buf32, buflen + 1, ucharCount, preloadText); - recomputeCharacterWidths(buf32, charWidths, static_cast(ucharCount)); - len = static_cast(ucharCount); - pos = static_cast(ucharCount); - } - int getInputLine(PromptBase& pi); - int length(void) const { return len; } -}; - -// Special codes for keyboard input: -// -// Between Windows and the various Linux "terminal" programs, there is some -// pretty diverse behavior in the "scan codes" and escape sequences we are -// presented with. So ... we'll translate them all into our own pidgin -// pseudocode, trying to stay out of the way of UTF-8 and international -// characters. Here's the general plan. -// -// "User input keystrokes" (key chords, whatever) will be encoded as a single -// value. -// The low 21 bits are reserved for Unicode characters. Popular function-type -// keys -// get their own codes in the range 0x10200000 to (if needed) 0x1FE00000, -// currently -// just arrow keys, Home, End and Delete. Keypresses with Ctrl get ORed with -// 0x20000000, with Alt get ORed with 0x40000000. So, Ctrl+Alt+Home is encoded -// as 0x20000000 + 0x40000000 + 0x10A00000 == 0x70A00000. To keep things -// complicated, -// the Alt key is equivalent to prefixing the keystroke with ESC, so ESC -// followed by -// D is treated the same as Alt + D ... we'll just use Emacs terminology and -// call -// this "Meta". So, we will encode both ESC followed by D and Alt held down -// while D -// is pressed the same, as Meta-D, encoded as 0x40000064. -// -// Here are the definitions of our component constants: -// -// Maximum unsigned 32-bit value = 0xFFFFFFFF; // For reference, max 32-bit -// value -// Highest allocated Unicode char = 0x001FFFFF; // For reference, max -// Unicode value -static const int META = 0x40000000; // Meta key combination -static const int CTRL = 0x20000000; // Ctrl key combination -// static const int SPECIAL_KEY = 0x10000000; // Common bit for all special -// keys -static const int UP_ARROW_KEY = 0x10200000; // Special keys -static const int DOWN_ARROW_KEY = 0x10400000; -static const int RIGHT_ARROW_KEY = 0x10600000; -static const int LEFT_ARROW_KEY = 0x10800000; -static const int HOME_KEY = 0x10A00000; -static const int END_KEY = 0x10C00000; -static const int DELETE_KEY = 0x10E00000; -static const int PAGE_UP_KEY = 0x11000000; -static const int PAGE_DOWN_KEY = 0x11200000; - -static const char* unsupported_term[] = {"dumb", "cons25", "emacs", NULL}; -static linenoiseCompletionCallback* completionCallback = NULL; - -#ifdef _WIN32 -static HANDLE console_in, console_out; -static DWORD oldMode; -static WORD oldDisplayAttribute; -#else -static struct termios orig_termios; /* in order to restore at exit */ -#endif - -static KillRing killRing; - -static int rawmode = 0; /* for atexit() function to check if restore is needed*/ -static int atexit_registered = 0; /* register atexit just 1 time */ -static int historyMaxLen = LINENOISE_DEFAULT_HISTORY_MAX_LEN; -static int historyLen = 0; -static int historyIndex = 0; -static char8_t** history = NULL; - -// used to emulate Windows command prompt on down-arrow after a recall -// we use -2 as our "not set" value because we add 1 to the previous index on -// down-arrow, -// and zero is a valid index (so -1 is a valid "previous index") -static int historyPreviousIndex = -2; -static bool historyRecallMostRecent = false; - -static void linenoiseAtExit(void); - -static bool isUnsupportedTerm(void) { - char* term = getenv("TERM"); - if (term == NULL) return false; - for (int j = 0; unsupported_term[j]; ++j) - if (!strcasecmp(term, unsupported_term[j])) { - return true; - } - return false; -} - -static void beep() { - fprintf(stderr, "\x7"); // ctrl-G == bell/beep - fflush(stderr); -} - -void linenoiseHistoryFree(void) { - if (history) { - for (int j = 0; j < historyLen; ++j) free(history[j]); - historyLen = 0; - free(history); - history = 0; - } -} - -static int enableRawMode(void) { -#ifdef _WIN32 - if (!console_in) { - console_in = GetStdHandle(STD_INPUT_HANDLE); - console_out = GetStdHandle(STD_OUTPUT_HANDLE); - - GetConsoleMode(console_in, &oldMode); - SetConsoleMode(console_in, oldMode & - ~(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT | - ENABLE_PROCESSED_INPUT)); - } - return 0; -#else - struct termios raw; - - if (!isatty(STDIN_FILENO)) goto fatal; - if (!atexit_registered) { - atexit(linenoiseAtExit); - atexit_registered = 1; - } - if (tcgetattr(0, &orig_termios) == -1) goto fatal; - - raw = orig_termios; /* modify the original mode */ - /* input modes: no break, no CR to NL, no parity check, no strip char, - * no start/stop output control. */ - raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); - /* output modes - disable post processing */ - // this is wrong, we don't want raw output, it turns newlines into straight - // linefeeds - // raw.c_oflag &= ~(OPOST); - /* control modes - set 8 bit chars */ - raw.c_cflag |= (CS8); - /* local modes - echoing off, canonical off, no extended functions, - * no signal chars (^Z,^C) */ - raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); - /* control chars - set return condition: min number of bytes and timer. - * We want read to return every single byte, without timeout. */ - raw.c_cc[VMIN] = 1; - raw.c_cc[VTIME] = 0; /* 1 byte, no timer */ - - /* put terminal in raw mode after flushing */ - if (tcsetattr(0, TCSADRAIN, &raw) < 0) goto fatal; - rawmode = 1; - return 0; - -fatal: - errno = ENOTTY; - return -1; -#endif -} - -static void disableRawMode(void) { -#ifdef _WIN32 - SetConsoleMode(console_in, oldMode); - console_in = 0; - console_out = 0; -#else - if (rawmode && tcsetattr(0, TCSADRAIN, &orig_termios) != -1) rawmode = 0; -#endif -} - -// At exit we'll try to fix the terminal to the initial conditions -static void linenoiseAtExit(void) { disableRawMode(); } - -static int getScreenColumns(void) { - int cols; -#ifdef _WIN32 - CONSOLE_SCREEN_BUFFER_INFO inf; - GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &inf); - cols = inf.dwSize.X; -#else - struct winsize ws; - cols = (ioctl(1, TIOCGWINSZ, &ws) == -1) ? 80 : ws.ws_col; -#endif - // cols is 0 in certain circumstances like inside debugger, which creates - // further issues - return (cols > 0) ? cols : 80; -} - -static int getScreenRows(void) { - int rows; -#ifdef _WIN32 - CONSOLE_SCREEN_BUFFER_INFO inf; - GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &inf); - rows = 1 + inf.srWindow.Bottom - inf.srWindow.Top; -#else - struct winsize ws; - rows = (ioctl(1, TIOCGWINSZ, &ws) == -1) ? 24 : ws.ws_row; -#endif - return (rows > 0) ? rows : 24; -} - -static void setDisplayAttribute(bool enhancedDisplay, bool error) { -#ifdef _WIN32 - if (enhancedDisplay) { - CONSOLE_SCREEN_BUFFER_INFO inf; - GetConsoleScreenBufferInfo(console_out, &inf); - oldDisplayAttribute = inf.wAttributes; - BYTE oldLowByte = oldDisplayAttribute & 0xFF; - BYTE newLowByte; - switch (oldLowByte) { - case 0x07: - // newLowByte = FOREGROUND_BLUE | FOREGROUND_INTENSITY; // too dim - // newLowByte = FOREGROUND_BLUE; // even dimmer - newLowByte = FOREGROUND_BLUE | - FOREGROUND_GREEN; // most similar to xterm appearance - break; - case 0x70: - newLowByte = BACKGROUND_BLUE | BACKGROUND_INTENSITY; - break; - default: - newLowByte = oldLowByte ^ 0xFF; // default to inverse video - break; - } - inf.wAttributes = (inf.wAttributes & 0xFF00) | newLowByte; - SetConsoleTextAttribute(console_out, inf.wAttributes); - } else { - SetConsoleTextAttribute(console_out, oldDisplayAttribute); - } -#else - if (enhancedDisplay) { - char const* p = (error ? "\x1b[1;31m" : "\x1b[1;34m"); - if (write(1, p, 7) == -1) - return; /* bright blue (visible with both B&W bg) */ - } else { - if (write(1, "\x1b[0m", 4) == -1) return; /* reset */ - } -#endif -} - -/** - * Display the dynamic incremental search prompt and the current user input - * line. - * @param pi PromptBase struct holding information about the prompt and our - * screen position - * @param buf32 input buffer to be displayed - * @param len count of characters in the buffer - * @param pos current cursor position within the buffer (0 <= pos <= len) - */ -static void dynamicRefresh(PromptBase& pi, char32_t* buf32, int len, int pos) { - // calculate the position of the end of the prompt - int xEndOfPrompt, yEndOfPrompt; - calculateScreenPosition(0, 0, pi.promptScreenColumns, pi.promptChars, - xEndOfPrompt, yEndOfPrompt); - pi.promptIndentation = xEndOfPrompt; - - // calculate the position of the end of the input line - int xEndOfInput, yEndOfInput; - calculateScreenPosition(xEndOfPrompt, yEndOfPrompt, pi.promptScreenColumns, - calculateColumnPosition(buf32, len), xEndOfInput, - yEndOfInput); - - // calculate the desired position of the cursor - int xCursorPos, yCursorPos; - calculateScreenPosition(xEndOfPrompt, yEndOfPrompt, pi.promptScreenColumns, - calculateColumnPosition(buf32, pos), xCursorPos, - yCursorPos); - -#ifdef _WIN32 - // position at the start of the prompt, clear to end of previous input - CONSOLE_SCREEN_BUFFER_INFO inf; - GetConsoleScreenBufferInfo(console_out, &inf); - inf.dwCursorPosition.X = 0; - inf.dwCursorPosition.Y -= pi.promptCursorRowOffset /*- pi.promptExtraLines*/; - SetConsoleCursorPosition(console_out, inf.dwCursorPosition); - DWORD count; - FillConsoleOutputCharacterA(console_out, ' ', - pi.promptPreviousLen + pi.promptPreviousInputLen, - inf.dwCursorPosition, &count); - pi.promptPreviousLen = pi.promptIndentation; - pi.promptPreviousInputLen = len; - - // display the prompt - if (!pi.write()) return; - - // display the input line - if (write32(1, buf32, len) == -1) return; - - // position the cursor - GetConsoleScreenBufferInfo(console_out, &inf); - inf.dwCursorPosition.X = xCursorPos; // 0-based on Win32 - inf.dwCursorPosition.Y -= yEndOfInput - yCursorPos; - SetConsoleCursorPosition(console_out, inf.dwCursorPosition); -#else // _WIN32 - char seq[64]; - int cursorRowMovement = pi.promptCursorRowOffset - pi.promptExtraLines; - if (cursorRowMovement > 0) { // move the cursor up as required - snprintf(seq, sizeof seq, "\x1b[%dA", cursorRowMovement); - if (write(1, seq, strlen(seq)) == -1) return; - } - // position at the start of the prompt, clear to end of screen - snprintf(seq, sizeof seq, "\x1b[1G\x1b[J"); // 1-based on VT100 - if (write(1, seq, strlen(seq)) == -1) return; - - // display the prompt - if (!pi.write()) return; - - // display the input line - if (write32(1, buf32, len) == -1) return; - - // we have to generate our own newline on line wrap - if (xEndOfInput == 0 && yEndOfInput > 0) - if (write(1, "\n", 1) == -1) return; - - // position the cursor - cursorRowMovement = yEndOfInput - yCursorPos; - if (cursorRowMovement > 0) { // move the cursor up as required - snprintf(seq, sizeof seq, "\x1b[%dA", cursorRowMovement); - if (write(1, seq, strlen(seq)) == -1) return; - } - // position the cursor within the line - snprintf(seq, sizeof seq, "\x1b[%dG", xCursorPos + 1); // 1-based on VT100 - if (write(1, seq, strlen(seq)) == -1) return; -#endif - - pi.promptCursorRowOffset = - pi.promptExtraLines + yCursorPos; // remember row for next pass -} - -/** - * Refresh the user's input line: the prompt is already onscreen and is not - * redrawn here - * @param pi PromptBase struct holding information about the prompt and our - * screen position - */ -void InputBuffer::refreshLine(PromptBase& pi) { - // check for a matching brace/bracket/paren, remember its position if found - int highlight = -1; - bool indicateError = false; - if (pos < len) { - /* this scans for a brace matching buf32[pos] to highlight */ - unsigned char part1, part2; - int scanDirection = 0; - if (strchr("}])", buf32[pos])) { - scanDirection = -1; /* backwards */ - if (buf32[pos] == '}') { - part1 = '}'; part2 = '{'; - } else if (buf32[pos] == ']') { - part1 = ']'; part2 = '['; - } else { - part1 = ')'; part2 = '('; - } - } - else if (strchr("{[(", buf32[pos])) { - scanDirection = 1; /* forwards */ - if (buf32[pos] == '{') { - //part1 = '{'; part2 = '}'; - part1 = '}'; part2 = '{'; - } else if (buf32[pos] == '[') { - //part1 = '['; part2 = ']'; - part1 = ']'; part2 = '['; - } else { - //part1 = '('; part2 = ')'; - part1 = ')'; part2 = '('; - } - } - - if (scanDirection) { - int unmatched = scanDirection; - int unmatchedOther = 0; - for (int i = pos + scanDirection; i >= 0 && i < len; i += scanDirection) { - /* TODO: the right thing when inside a string */ - if (strchr("}])", buf32[i])) { - if (buf32[i] == part1) { - --unmatched; - } else { - --unmatchedOther; - } - } else if (strchr("{[(", buf32[i])) { - if (buf32[i] == part2) { - ++unmatched; - } else { - ++unmatchedOther; - } - } -/* - if (strchr("}])", buf32[i])) - --unmatched; - else if (strchr("{[(", buf32[i])) - ++unmatched; -*/ - if (unmatched == 0) { - highlight = i; - indicateError = (unmatchedOther != 0); - break; - } - } - } - } - - // calculate the position of the end of the input line - int xEndOfInput, yEndOfInput; - calculateScreenPosition(pi.promptIndentation, 0, pi.promptScreenColumns, - calculateColumnPosition(buf32, len), xEndOfInput, - yEndOfInput); - - // calculate the desired position of the cursor - int xCursorPos, yCursorPos; - calculateScreenPosition(pi.promptIndentation, 0, pi.promptScreenColumns, - calculateColumnPosition(buf32, pos), xCursorPos, - yCursorPos); - -#ifdef _WIN32 - // position at the end of the prompt, clear to end of previous input - CONSOLE_SCREEN_BUFFER_INFO inf; - GetConsoleScreenBufferInfo(console_out, &inf); - inf.dwCursorPosition.X = pi.promptIndentation; // 0-based on Win32 - inf.dwCursorPosition.Y -= pi.promptCursorRowOffset - pi.promptExtraLines; - SetConsoleCursorPosition(console_out, inf.dwCursorPosition); - DWORD count; - if (len < pi.promptPreviousInputLen) - FillConsoleOutputCharacterA(console_out, ' ', pi.promptPreviousInputLen, - inf.dwCursorPosition, &count); - pi.promptPreviousInputLen = len; - - // display the input line - if (highlight == -1) { - if (write32(1, buf32, len) == -1) return; - } else { - if (write32(1, buf32, highlight) == -1) return; - setDisplayAttribute(true, indicateError); /* bright blue (visible with both B&W bg) */ - if (write32(1, &buf32[highlight], 1) == -1) return; - setDisplayAttribute(false, indicateError); - if (write32(1, buf32 + highlight + 1, len - highlight - 1) == -1) return; - } - - // position the cursor - GetConsoleScreenBufferInfo(console_out, &inf); - inf.dwCursorPosition.X = xCursorPos; // 0-based on Win32 - inf.dwCursorPosition.Y -= yEndOfInput - yCursorPos; - SetConsoleCursorPosition(console_out, inf.dwCursorPosition); -#else // _WIN32 - char seq[64]; - int cursorRowMovement = pi.promptCursorRowOffset - pi.promptExtraLines; - if (cursorRowMovement > 0) { // move the cursor up as required - snprintf(seq, sizeof seq, "\x1b[%dA", cursorRowMovement); - if (write(1, seq, strlen(seq)) == -1) return; - } - // position at the end of the prompt, clear to end of screen - snprintf(seq, sizeof seq, "\x1b[%dG\x1b[J", - pi.promptIndentation + 1); // 1-based on VT100 - if (write(1, seq, strlen(seq)) == -1) return; - - if (highlight == -1) { // write unhighlighted text - if (write32(1, buf32, len) == -1) return; - } else { // highlight the matching brace/bracket/parenthesis - if (write32(1, buf32, highlight) == -1) return; - setDisplayAttribute(true, indicateError); - if (write32(1, &buf32[highlight], 1) == -1) return; - setDisplayAttribute(false, indicateError); - if (write32(1, buf32 + highlight + 1, len - highlight - 1) == -1) return; - } - - // we have to generate our own newline on line wrap - if (xEndOfInput == 0 && yEndOfInput > 0) - if (write(1, "\n", 1) == -1) return; - - // position the cursor - cursorRowMovement = yEndOfInput - yCursorPos; - if (cursorRowMovement > 0) { // move the cursor up as required - snprintf(seq, sizeof seq, "\x1b[%dA", cursorRowMovement); - if (write(1, seq, strlen(seq)) == -1) return; - } - // position the cursor within the line - snprintf(seq, sizeof seq, "\x1b[%dG", xCursorPos + 1); // 1-based on VT100 - if (write(1, seq, strlen(seq)) == -1) return; -#endif - - pi.promptCursorRowOffset = - pi.promptExtraLines + yCursorPos; // remember row for next pass -} - -#ifndef _WIN32 - -/** - * Read a UTF-8 sequence from the non-Windows keyboard and return the Unicode - * (char32_t) character it - * encodes - * - * @return char32_t Unicode character - */ -static char32_t readUnicodeCharacter(void) { - static char8_t utf8String[5]; - static size_t utf8Count = 0; - while (true) { - char8_t c; - - /* Continue reading if interrupted by signal. */ - ssize_t nread; - do { - nread = read(0, &c, 1); - } while ((nread == -1) && (errno == EINTR)); - - if (nread <= 0) return 0; - if (c <= 0x7F) { // short circuit ASCII - utf8Count = 0; - return c; - } else if (utf8Count < sizeof(utf8String) - 1) { - utf8String[utf8Count++] = c; - utf8String[utf8Count] = 0; - char32_t unicodeChar[2]; - size_t ucharCount; - ConversionResult res = - copyString8to32(unicodeChar, 2, ucharCount, utf8String); - if (res == conversionOK && ucharCount) { - utf8Count = 0; - return unicodeChar[0]; - } - } else { - utf8Count = - 0; // this shouldn't happen: got four bytes but no UTF-8 character - } - } -} - -namespace EscapeSequenceProcessing { // move these out of global namespace - -// This chunk of code does parsing of the escape sequences sent by various Linux -// terminals. -// -// It handles arrow keys, Home, End and Delete keys by interpreting the -// sequences sent by -// gnome terminal, xterm, rxvt, konsole, aterm and yakuake including the Alt and -// Ctrl key -// combinations that are understood by linenoise. -// -// The parsing uses tables, a bunch of intermediate dispatch routines and a -// doDispatch -// loop that reads the tables and sends control to "deeper" routines to continue -// the -// parsing. The starting call to doDispatch( c, initialDispatch ) will -// eventually return -// either a character (with optional CTRL and META bits set), or -1 if parsing -// fails, or -// zero if an attempt to read from the keyboard fails. -// -// This is rather sloppy escape sequence processing, since we're not paying -// attention to what the -// actual TERM is set to and are processing all key sequences for all terminals, -// but it works with -// the most common keystrokes on the most common terminals. It's intricate, but -// the nested 'if' -// statements required to do it directly would be worse. This way has the -// advantage of allowing -// changes and extensions without having to touch a lot of code. - -// This is a typedef for the routine called by doDispatch(). It takes the -// current character -// as input, does any required processing including reading more characters and -// calling other -// dispatch routines, then eventually returns the final (possibly extended or -// special) character. -// -typedef char32_t (*CharacterDispatchRoutine)(char32_t); - -// This structure is used by doDispatch() to hold a list of characters to test -// for and -// a list of routines to call if the character matches. The dispatch routine -// list is one -// longer than the character list; the final entry is used if no character -// matches. -// -struct CharacterDispatch { - unsigned int len; // length of the chars list - const char* chars; // chars to test - CharacterDispatchRoutine* dispatch; // array of routines to call -}; - -// This dispatch routine is given a dispatch table and then farms work out to -// routines -// listed in the table based on the character it is called with. The dispatch -// routines can -// read more input characters to decide what should eventually be returned. -// Eventually, -// a called routine returns either a character or -1 to indicate parsing -// failure. -// -static char32_t doDispatch(char32_t c, CharacterDispatch& dispatchTable) { - for (unsigned int i = 0; i < dispatchTable.len; ++i) { - if (static_cast(dispatchTable.chars[i]) == c) { - return dispatchTable.dispatch[i](c); - } - } - return dispatchTable.dispatch[dispatchTable.len](c); -} - -static char32_t thisKeyMetaCtrl = - 0; // holds pre-set Meta and/or Ctrl modifiers - -// Final dispatch routines -- return something -// -static char32_t normalKeyRoutine(char32_t c) { return thisKeyMetaCtrl | c; } -static char32_t upArrowKeyRoutine(char32_t) { - return thisKeyMetaCtrl | UP_ARROW_KEY; -} -static char32_t downArrowKeyRoutine(char32_t) { - return thisKeyMetaCtrl | DOWN_ARROW_KEY; -} -static char32_t rightArrowKeyRoutine(char32_t) { - return thisKeyMetaCtrl | RIGHT_ARROW_KEY; -} -static char32_t leftArrowKeyRoutine(char32_t) { - return thisKeyMetaCtrl | LEFT_ARROW_KEY; -} -static char32_t homeKeyRoutine(char32_t) { return thisKeyMetaCtrl | HOME_KEY; } -static char32_t endKeyRoutine(char32_t) { return thisKeyMetaCtrl | END_KEY; } -static char32_t pageUpKeyRoutine(char32_t) { - return thisKeyMetaCtrl | PAGE_UP_KEY; -} -static char32_t pageDownKeyRoutine(char32_t) { - return thisKeyMetaCtrl | PAGE_DOWN_KEY; -} -static char32_t deleteCharRoutine(char32_t) { - return thisKeyMetaCtrl | ctrlChar('H'); -} // key labeled Backspace -static char32_t deleteKeyRoutine(char32_t) { - return thisKeyMetaCtrl | DELETE_KEY; -} // key labeled Delete -static char32_t ctrlUpArrowKeyRoutine(char32_t) { - return thisKeyMetaCtrl | CTRL | UP_ARROW_KEY; -} -static char32_t ctrlDownArrowKeyRoutine(char32_t) { - return thisKeyMetaCtrl | CTRL | DOWN_ARROW_KEY; -} -static char32_t ctrlRightArrowKeyRoutine(char32_t) { - return thisKeyMetaCtrl | CTRL | RIGHT_ARROW_KEY; -} -static char32_t ctrlLeftArrowKeyRoutine(char32_t) { - return thisKeyMetaCtrl | CTRL | LEFT_ARROW_KEY; -} -static char32_t escFailureRoutine(char32_t) { - beep(); - return -1; -} - -// Handle ESC [ 1 ; 3 (or 5) escape sequences -// -static CharacterDispatchRoutine escLeftBracket1Semicolon3or5Routines[] = { - upArrowKeyRoutine, downArrowKeyRoutine, rightArrowKeyRoutine, - leftArrowKeyRoutine, escFailureRoutine}; -static CharacterDispatch escLeftBracket1Semicolon3or5Dispatch = { - 4, "ABCD", escLeftBracket1Semicolon3or5Routines}; - -// Handle ESC [ 1 ; escape sequences -// -static char32_t escLeftBracket1Semicolon3Routine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - thisKeyMetaCtrl |= META; - return doDispatch(c, escLeftBracket1Semicolon3or5Dispatch); -} -static char32_t escLeftBracket1Semicolon5Routine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - thisKeyMetaCtrl |= CTRL; - return doDispatch(c, escLeftBracket1Semicolon3or5Dispatch); -} -static CharacterDispatchRoutine escLeftBracket1SemicolonRoutines[] = { - escLeftBracket1Semicolon3Routine, escLeftBracket1Semicolon5Routine, - escFailureRoutine}; -static CharacterDispatch escLeftBracket1SemicolonDispatch = { - 2, "35", escLeftBracket1SemicolonRoutines}; - -// Handle ESC [ 1 escape sequences -// -static char32_t escLeftBracket1SemicolonRoutine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escLeftBracket1SemicolonDispatch); -} -static CharacterDispatchRoutine escLeftBracket1Routines[] = { - homeKeyRoutine, escLeftBracket1SemicolonRoutine, escFailureRoutine}; -static CharacterDispatch escLeftBracket1Dispatch = {2, "~;", - escLeftBracket1Routines}; - -// Handle ESC [ 3 escape sequences -// -static CharacterDispatchRoutine escLeftBracket3Routines[] = {deleteKeyRoutine, - escFailureRoutine}; -static CharacterDispatch escLeftBracket3Dispatch = {1, "~", - escLeftBracket3Routines}; - -// Handle ESC [ 4 escape sequences -// -static CharacterDispatchRoutine escLeftBracket4Routines[] = {endKeyRoutine, - escFailureRoutine}; -static CharacterDispatch escLeftBracket4Dispatch = {1, "~", - escLeftBracket4Routines}; - -// Handle ESC [ 5 escape sequences -// -static CharacterDispatchRoutine escLeftBracket5Routines[] = {pageUpKeyRoutine, - escFailureRoutine}; -static CharacterDispatch escLeftBracket5Dispatch = {1, "~", - escLeftBracket5Routines}; - -// Handle ESC [ 6 escape sequences -// -static CharacterDispatchRoutine escLeftBracket6Routines[] = {pageDownKeyRoutine, - escFailureRoutine}; -static CharacterDispatch escLeftBracket6Dispatch = {1, "~", - escLeftBracket6Routines}; - -// Handle ESC [ 7 escape sequences -// -static CharacterDispatchRoutine escLeftBracket7Routines[] = {homeKeyRoutine, - escFailureRoutine}; -static CharacterDispatch escLeftBracket7Dispatch = {1, "~", - escLeftBracket7Routines}; - -// Handle ESC [ 8 escape sequences -// -static CharacterDispatchRoutine escLeftBracket8Routines[] = {endKeyRoutine, - escFailureRoutine}; -static CharacterDispatch escLeftBracket8Dispatch = {1, "~", - escLeftBracket8Routines}; - -// Handle ESC [ escape sequences -// -static char32_t escLeftBracket0Routine(char32_t c) { - return escFailureRoutine(c); -} -static char32_t escLeftBracket1Routine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escLeftBracket1Dispatch); -} -static char32_t escLeftBracket2Routine(char32_t c) { - return escFailureRoutine(c); // Insert key, unused -} -static char32_t escLeftBracket3Routine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escLeftBracket3Dispatch); -} -static char32_t escLeftBracket4Routine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escLeftBracket4Dispatch); -} -static char32_t escLeftBracket5Routine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escLeftBracket5Dispatch); -} -static char32_t escLeftBracket6Routine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escLeftBracket6Dispatch); -} -static char32_t escLeftBracket7Routine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escLeftBracket7Dispatch); -} -static char32_t escLeftBracket8Routine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escLeftBracket8Dispatch); -} -static char32_t escLeftBracket9Routine(char32_t c) { - return escFailureRoutine(c); -} - -// Handle ESC [ escape sequences -// -static CharacterDispatchRoutine escLeftBracketRoutines[] = { - upArrowKeyRoutine, downArrowKeyRoutine, rightArrowKeyRoutine, - leftArrowKeyRoutine, homeKeyRoutine, endKeyRoutine, - escLeftBracket0Routine, escLeftBracket1Routine, escLeftBracket2Routine, - escLeftBracket3Routine, escLeftBracket4Routine, escLeftBracket5Routine, - escLeftBracket6Routine, escLeftBracket7Routine, escLeftBracket8Routine, - escLeftBracket9Routine, escFailureRoutine}; -static CharacterDispatch escLeftBracketDispatch = {16, "ABCDHF0123456789", - escLeftBracketRoutines}; - -// Handle ESC O escape sequences -// -static CharacterDispatchRoutine escORoutines[] = { - upArrowKeyRoutine, downArrowKeyRoutine, rightArrowKeyRoutine, - leftArrowKeyRoutine, homeKeyRoutine, endKeyRoutine, - ctrlUpArrowKeyRoutine, ctrlDownArrowKeyRoutine, ctrlRightArrowKeyRoutine, - ctrlLeftArrowKeyRoutine, escFailureRoutine}; -static CharacterDispatch escODispatch = {10, "ABCDHFabcd", escORoutines}; - -// Initial ESC dispatch -- could be a Meta prefix or the start of an escape -// sequence -// -static char32_t escLeftBracketRoutine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escLeftBracketDispatch); -} -static char32_t escORoutine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escODispatch); -} -static char32_t setMetaRoutine(char32_t c); // need forward reference -static CharacterDispatchRoutine escRoutines[] = {escLeftBracketRoutine, - escORoutine, setMetaRoutine}; -static CharacterDispatch escDispatch = {2, "[O", escRoutines}; - -// Initial dispatch -- we are not in the middle of anything yet -// -static char32_t escRoutine(char32_t c) { - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escDispatch); -} -static CharacterDispatchRoutine initialRoutines[] = { - escRoutine, deleteCharRoutine, normalKeyRoutine}; -static CharacterDispatch initialDispatch = {2, "\x1B\x7F", initialRoutines}; - -// Special handling for the ESC key because it does double duty -// -static char32_t setMetaRoutine(char32_t c) { - thisKeyMetaCtrl = META; - if (c == 0x1B) { // another ESC, stay in ESC processing mode - c = readUnicodeCharacter(); - if (c == 0) return 0; - return doDispatch(c, escDispatch); - } - return doDispatch(c, initialDispatch); -} - -} // namespace EscapeSequenceProcessing // move these out of global namespace - -#endif // #ifndef _WIN32 - -// linenoiseReadChar -- read a keystroke or keychord from the keyboard, and -// translate it -// into an encoded "keystroke". When convenient, extended keys are translated -// into their -// simpler Emacs keystrokes, so an unmodified "left arrow" becomes Ctrl-B. -// -// A return value of zero means "no input available", and a return value of -1 -// means "invalid key". -// -static char32_t linenoiseReadChar(void) { -#ifdef _WIN32 - - INPUT_RECORD rec; - DWORD count; - int modifierKeys = 0; - bool escSeen = false; - while (true) { - ReadConsoleInputW(console_in, &rec, 1, &count); -#if 0 // helper for debugging keystrokes, display info in the debug "Output" - // window in the debugger - { - if ( rec.EventType == KEY_EVENT ) { - //if ( rec.Event.KeyEvent.uChar.UnicodeChar ) { - char buf[1024]; - sprintf( - buf, - "Unicode character 0x%04X, repeat count %d, virtual keycode 0x%04X, " - "virtual scancode 0x%04X, key %s%s%s%s%s\n", - rec.Event.KeyEvent.uChar.UnicodeChar, - rec.Event.KeyEvent.wRepeatCount, - rec.Event.KeyEvent.wVirtualKeyCode, - rec.Event.KeyEvent.wVirtualScanCode, - rec.Event.KeyEvent.bKeyDown ? "down" : "up", - (rec.Event.KeyEvent.dwControlKeyState & LEFT_CTRL_PRESSED) ? - " L-Ctrl" : "", - (rec.Event.KeyEvent.dwControlKeyState & RIGHT_CTRL_PRESSED) ? - " R-Ctrl" : "", - (rec.Event.KeyEvent.dwControlKeyState & LEFT_ALT_PRESSED) ? - " L-Alt" : "", - (rec.Event.KeyEvent.dwControlKeyState & RIGHT_ALT_PRESSED) ? - " R-Alt" : "" - ); - OutputDebugStringA( buf ); - //} - } - } -#endif - if (rec.EventType != KEY_EVENT) { - continue; - } - // Windows provides for entry of characters that are not on your keyboard by - // sending the - // Unicode characters as a "key up" with virtual keycode 0x12 (VK_MENU == - // Alt key) ... - // accept these characters, otherwise only process characters on "key down" - if (!rec.Event.KeyEvent.bKeyDown && - rec.Event.KeyEvent.wVirtualKeyCode != VK_MENU) { - continue; - } - modifierKeys = 0; - // AltGr is encoded as ( LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED ), so don't - // treat this - // combination as either CTRL or META we just turn off those two bits, so it - // is still - // possible to combine CTRL and/or META with an AltGr key by using - // right-Ctrl and/or - // left-Alt - if ((rec.Event.KeyEvent.dwControlKeyState & - (LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED)) == - (LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED)) { - rec.Event.KeyEvent.dwControlKeyState &= - ~(LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED); - } - if (rec.Event.KeyEvent.dwControlKeyState & - (RIGHT_CTRL_PRESSED | LEFT_CTRL_PRESSED)) { - modifierKeys |= CTRL; - } - if (rec.Event.KeyEvent.dwControlKeyState & - (RIGHT_ALT_PRESSED | LEFT_ALT_PRESSED)) { - modifierKeys |= META; - } - if (escSeen) { - modifierKeys |= META; - } - if (rec.Event.KeyEvent.uChar.UnicodeChar == 0) { - switch (rec.Event.KeyEvent.wVirtualKeyCode) { - case VK_LEFT: - return modifierKeys | LEFT_ARROW_KEY; - case VK_RIGHT: - return modifierKeys | RIGHT_ARROW_KEY; - case VK_UP: - return modifierKeys | UP_ARROW_KEY; - case VK_DOWN: - return modifierKeys | DOWN_ARROW_KEY; - case VK_DELETE: - return modifierKeys | DELETE_KEY; - case VK_HOME: - return modifierKeys | HOME_KEY; - case VK_END: - return modifierKeys | END_KEY; - case VK_PRIOR: - return modifierKeys | PAGE_UP_KEY; - case VK_NEXT: - return modifierKeys | PAGE_DOWN_KEY; - default: - continue; // in raw mode, ReadConsoleInput shows shift, ctrl ... - } // ... ignore them - } else if (rec.Event.KeyEvent.uChar.UnicodeChar == - ctrlChar('[')) { // ESC, set flag for later - escSeen = true; - continue; - } else { - // we got a real character, return it - return modifierKeys | rec.Event.KeyEvent.uChar.UnicodeChar; - } - } - -#else - char32_t c; - c = readUnicodeCharacter(); - if (c == 0) return 0; - -// If _DEBUG_LINUX_KEYBOARD is set, then ctrl-^ puts us into a keyboard -// debugging mode -// where we print out decimal and decoded values for whatever the "terminal" -// program -// gives us on different keystrokes. Hit ctrl-C to exit this mode. -// -#define _DEBUG_LINUX_KEYBOARD -#if defined(_DEBUG_LINUX_KEYBOARD) - if (c == ctrlChar('^')) { // ctrl-^, special debug mode, prints all keys hit, - // ctrl-C to get out - printf( - "\nEntering keyboard debugging mode (on ctrl-^), press ctrl-C to exit " - "this mode\n"); - while (true) { - unsigned char keys[10]; - int ret = read(0, keys, 10); - - if (ret <= 0) { - printf("\nret: %d\n", ret); - } - for (int i = 0; i < ret; ++i) { - char32_t key = static_cast(keys[i]); - char* friendlyTextPtr; - char friendlyTextBuf[10]; - const char* prefixText = (key < 0x80) ? "" : "0x80+"; - char32_t keyCopy = (key < 0x80) ? key : key - 0x80; - if (keyCopy >= '!' && keyCopy <= '~') { // printable - friendlyTextBuf[0] = '\''; - friendlyTextBuf[1] = keyCopy; - friendlyTextBuf[2] = '\''; - friendlyTextBuf[3] = 0; - friendlyTextPtr = friendlyTextBuf; - } else if (keyCopy == ' ') { - friendlyTextPtr = const_cast("space"); - } else if (keyCopy == 27) { - friendlyTextPtr = const_cast("ESC"); - } else if (keyCopy == 0) { - friendlyTextPtr = const_cast("NUL"); - } else if (keyCopy == 127) { - friendlyTextPtr = const_cast("DEL"); - } else { - friendlyTextBuf[0] = '^'; - friendlyTextBuf[1] = keyCopy + 0x40; - friendlyTextBuf[2] = 0; - friendlyTextPtr = friendlyTextBuf; - } - printf("%d x%02X (%s%s) ", key, key, prefixText, friendlyTextPtr); - } - printf("\x1b[1G\n"); // go to first column of new line - - // drop out of this loop on ctrl-C - if (keys[0] == ctrlChar('C')) { - printf("Leaving keyboard debugging mode (on ctrl-C)\n"); - fflush(stdout); - return -2; - } - } - } -#endif // _DEBUG_LINUX_KEYBOARD - - EscapeSequenceProcessing::thisKeyMetaCtrl = - 0; // no modifiers yet at initialDispatch - return EscapeSequenceProcessing::doDispatch( - c, EscapeSequenceProcessing::initialDispatch); -#endif // #_WIN32 -} - -/** - * Free memory used in a recent command completion session - * - * @param lc pointer to a linenoiseCompletions struct - */ -static void freeCompletions(linenoiseCompletions* lc) { - lc->completionStrings.clear(); -} - -/** - * convert {CTRL + 'A'}, {CTRL + 'a'} and {CTRL + ctrlChar( 'A' )} into - * ctrlChar( 'A' ) - * leave META alone - * - * @param c character to clean up - * @return cleaned-up character - */ -static int cleanupCtrl(int c) { - if (c & CTRL) { - int d = c & 0x1FF; - if (d >= 'a' && d <= 'z') { - c = (c + ('a' - ctrlChar('A'))) & ~CTRL; - } - if (d >= 'A' && d <= 'Z') { - c = (c + ('A' - ctrlChar('A'))) & ~CTRL; - } - if (d >= ctrlChar('A') && d <= ctrlChar('Z')) { - c = c & ~CTRL; - } - } - return c; -} - -// break characters that may precede items to be completed -static const char breakChars[] = " =+-/\\*?\"'`&<>;|@{([])}"; - -// maximum number of completions to display without asking -static const size_t completionCountCutoff = 100; - -/** - * Handle command completion, using a completionCallback() routine to provide - * possible substitutions - * This routine handles the mechanics of updating the user's input buffer with - * possible replacement - * of text as the user selects a proposed completion string, or cancels the - * completion attempt. - * @param pi PromptBase struct holding information about the prompt and our - * screen position - */ -int InputBuffer::completeLine(PromptBase& pi) { - linenoiseCompletions lc; - char32_t c = 0; - - // completionCallback() expects a parsable entity, so find the previous break - // character and - // extract a copy to parse. we also handle the case where tab is hit while - // not at end-of-line. - int startIndex = pos; - while (--startIndex >= 0) { - if (strchr(breakChars, buf32[startIndex])) { - break; - } - } - ++startIndex; - int itemLength = pos - startIndex; - Utf32String unicodeCopy(&buf32[startIndex], itemLength); - Utf8String parseItem(unicodeCopy); - - // get a list of completions - completionCallback(parseItem.get(), &lc); - - // if no completions, we are done - if (lc.completionStrings.size() == 0) { - beep(); - freeCompletions(&lc); - return 0; - } - - // at least one completion - int longestCommonPrefix = 0; - int displayLength = 0; - if (lc.completionStrings.size() == 1) { - longestCommonPrefix = static_cast(lc.completionStrings[0].length()); - } else { - bool keepGoing = true; - while (keepGoing) { - for (size_t j = 0; j < lc.completionStrings.size() - 1; ++j) { - char32_t c1 = lc.completionStrings[j][longestCommonPrefix]; - char32_t c2 = lc.completionStrings[j + 1][longestCommonPrefix]; - if ((0 == c1) || (0 == c2) || (c1 != c2)) { - keepGoing = false; - break; - } - } - if (keepGoing) { - ++longestCommonPrefix; - } - } - } - if (lc.completionStrings.size() != 1) { // beep if ambiguous - beep(); - } - - // if we can extend the item, extend it and return to main loop - if (longestCommonPrefix > itemLength) { - displayLength = len + longestCommonPrefix - itemLength; - if (displayLength > buflen) { - longestCommonPrefix -= displayLength - buflen; // don't overflow buffer - displayLength = buflen; // truncate the insertion - beep(); // and make a noise - } - Utf32String displayText(displayLength + 1); - memcpy(displayText.get(), buf32, sizeof(char32_t) * startIndex); - memcpy(&displayText[startIndex], &lc.completionStrings[0][0], - sizeof(char32_t) * longestCommonPrefix); - int tailIndex = startIndex + longestCommonPrefix; - memcpy(&displayText[tailIndex], &buf32[pos], - sizeof(char32_t) * (displayLength - tailIndex + 1)); - copyString32(buf32, displayText.get(), displayLength); - pos = startIndex + longestCommonPrefix; - len = displayLength; - refreshLine(pi); - return 0; - } - - // we can't complete any further, wait for second tab - do { - c = linenoiseReadChar(); - c = cleanupCtrl(c); - } while (c == static_cast(-1)); - - // if any character other than tab, pass it to the main loop - if (c != ctrlChar('I')) { - freeCompletions(&lc); - return c; - } - - // we got a second tab, maybe show list of possible completions - bool showCompletions = true; - bool onNewLine = false; - if (lc.completionStrings.size() > completionCountCutoff) { - int savePos = - pos; // move cursor to EOL to avoid overwriting the command line - pos = len; - refreshLine(pi); - pos = savePos; - printf("\nDisplay all %u possibilities? (y or n)", - static_cast(lc.completionStrings.size())); - fflush(stdout); - onNewLine = true; - while (c != 'y' && c != 'Y' && c != 'n' && c != 'N' && c != ctrlChar('C')) { - do { - c = linenoiseReadChar(); - c = cleanupCtrl(c); - } while (c == static_cast(-1)); - } - switch (c) { - case 'n': - case 'N': - showCompletions = false; - freeCompletions(&lc); - break; - case ctrlChar('C'): - showCompletions = false; - freeCompletions(&lc); - if (write(1, "^C", 2) == -1) return -1; // Display the ^C we got - c = 0; - break; - } - } - - // if showing the list, do it the way readline does it - bool stopList = false; - if (showCompletions) { - int longestCompletion = 0; - for (size_t j = 0; j < lc.completionStrings.size(); ++j) { - itemLength = static_cast(lc.completionStrings[j].length()); - if (itemLength > longestCompletion) { - longestCompletion = itemLength; - } - } - longestCompletion += 2; - int columnCount = pi.promptScreenColumns / longestCompletion; - if (columnCount < 1) { - columnCount = 1; - } - if (!onNewLine) { // skip this if we showed "Display all %d possibilities?" - int savePos = - pos; // move cursor to EOL to avoid overwriting the command line - pos = len; - refreshLine(pi); - pos = savePos; - } - size_t pauseRow = getScreenRows() - 1; - size_t rowCount = - (lc.completionStrings.size() + columnCount - 1) / columnCount; - for (size_t row = 0; row < rowCount; ++row) { - if (row == pauseRow) { - printf("\n--More--"); - fflush(stdout); - c = 0; - bool doBeep = false; - while (c != ' ' && c != '\r' && c != '\n' && c != 'y' && c != 'Y' && - c != 'n' && c != 'N' && c != 'q' && c != 'Q' && - c != ctrlChar('C')) { - if (doBeep) { - beep(); - } - doBeep = true; - do { - c = linenoiseReadChar(); - c = cleanupCtrl(c); - } while (c == static_cast(-1)); - } - switch (c) { - case ' ': - case 'y': - case 'Y': - printf("\r \r"); - pauseRow += getScreenRows() - 1; - break; - case '\r': - case '\n': - printf("\r \r"); - ++pauseRow; - break; - case 'n': - case 'N': - case 'q': - case 'Q': - printf("\r \r"); - stopList = true; - break; - case ctrlChar('C'): - if (write(1, "^C", 2) == -1) return -1; // Display the ^C we got - stopList = true; - break; - } - } else { - printf("\n"); - } - if (stopList) { - break; - } - for (int column = 0; column < columnCount; ++column) { - size_t index = (column * rowCount) + row; - if (index < lc.completionStrings.size()) { - itemLength = static_cast(lc.completionStrings[index].length()); - fflush(stdout); - if (write32(1, lc.completionStrings[index].get(), itemLength) == -1) - return -1; - if (((column + 1) * rowCount) + row < lc.completionStrings.size()) { - for (int k = itemLength; k < longestCompletion; ++k) { - printf(" "); - } - } - } - } - } - fflush(stdout); - freeCompletions(&lc); - } - - // display the prompt on a new line, then redisplay the input buffer - if (!stopList || c == ctrlChar('C')) { - if (write(1, "\n", 1) == -1) return 0; - } - if (!pi.write()) return 0; -#ifndef _WIN32 - // we have to generate our own newline on line wrap on Linux - if (pi.promptIndentation == 0 && pi.promptExtraLines > 0) - if (write(1, "\n", 1) == -1) return 0; -#endif - pi.promptCursorRowOffset = pi.promptExtraLines; - refreshLine(pi); - return 0; -} - -/** - * Clear the screen ONLY (no redisplay of anything) - */ -void linenoiseClearScreen(void) { -#ifdef _WIN32 - COORD coord = {0, 0}; - CONSOLE_SCREEN_BUFFER_INFO inf; - HANDLE screenHandle = GetStdHandle(STD_OUTPUT_HANDLE); - GetConsoleScreenBufferInfo(screenHandle, &inf); - SetConsoleCursorPosition(screenHandle, coord); - DWORD count; - FillConsoleOutputCharacterA(screenHandle, ' ', inf.dwSize.X * inf.dwSize.Y, - coord, &count); -#else - if (write(1, "\x1b[H\x1b[2J", 7) <= 0) return; -#endif -} - -void InputBuffer::clearScreen(PromptBase& pi) { - linenoiseClearScreen(); - if (!pi.write()) return; -#ifndef _WIN32 - // we have to generate our own newline on line wrap on Linux - if (pi.promptIndentation == 0 && pi.promptExtraLines > 0) - if (write(1, "\n", 1) == -1) return; -#endif - pi.promptCursorRowOffset = pi.promptExtraLines; - refreshLine(pi); -} - -/** - * Incremental history search -- take over the prompt and keyboard as the user - * types a search - * string, deletes characters from it, changes direction, and either accepts the - * found line (for - * execution orediting) or cancels. - * @param pi PromptBase struct holding information about the (old, - * static) prompt and our - * screen position - * @param startChar the character that began the search, used to set the initial - * direction - */ -int InputBuffer::incrementalHistorySearch(PromptBase& pi, int startChar) { - size_t bufferSize; - size_t ucharCount = 0; - - // if not already recalling, add the current line to the history list so we - // don't have to - // special case it - if (historyIndex == historyLen - 1) { - free(history[historyLen - 1]); - bufferSize = sizeof(char32_t) * len + 1; - unique_ptr tempBuffer(new char[bufferSize]); - copyString32to8(tempBuffer.get(), bufferSize, buf32); - history[historyLen - 1] = strdup8(tempBuffer.get()); - } - int historyLineLength = len; - int historyLinePosition = pos; - char32_t emptyBuffer[1]; - char emptyWidths[1]; - InputBuffer empty(emptyBuffer, emptyWidths, 1); - empty.refreshLine(pi); // erase the old input first - DynamicPrompt dp(pi, (startChar == ctrlChar('R')) ? -1 : 1); - - dp.promptPreviousLen = pi.promptPreviousLen; - dp.promptPreviousInputLen = pi.promptPreviousInputLen; - dynamicRefresh(dp, buf32, historyLineLength, - historyLinePosition); // draw user's text with our prompt - - // loop until we get an exit character - int c = 0; - bool keepLooping = true; - bool useSearchedLine = true; - bool searchAgain = false; - char32_t* activeHistoryLine = 0; - while (keepLooping) { - c = linenoiseReadChar(); - c = cleanupCtrl(c); // convert CTRL + into normal ctrl - - switch (c) { - // these characters keep the selected text but do not execute it - case ctrlChar('A'): // ctrl-A, move cursor to start of line - case HOME_KEY: - case ctrlChar('B'): // ctrl-B, move cursor left by one character - case LEFT_ARROW_KEY: - case META + 'b': // meta-B, move cursor left by one word - case META + 'B': - case CTRL + LEFT_ARROW_KEY: - case META + LEFT_ARROW_KEY: // Emacs allows Meta, bash & readline don't - case ctrlChar('D'): - case META + 'd': // meta-D, kill word to right of cursor - case META + 'D': - case ctrlChar('E'): // ctrl-E, move cursor to end of line - case END_KEY: - case ctrlChar('F'): // ctrl-F, move cursor right by one character - case RIGHT_ARROW_KEY: - case META + 'f': // meta-F, move cursor right by one word - case META + 'F': - case CTRL + RIGHT_ARROW_KEY: - case META + RIGHT_ARROW_KEY: // Emacs allows Meta, bash & readline don't - case META + ctrlChar('H'): - case ctrlChar('J'): - case ctrlChar('K'): // ctrl-K, kill from cursor to end of line - case ctrlChar('M'): - case ctrlChar('N'): // ctrl-N, recall next line in history - case ctrlChar('P'): // ctrl-P, recall previous line in history - case DOWN_ARROW_KEY: - case UP_ARROW_KEY: - case ctrlChar('T'): // ctrl-T, transpose characters - case ctrlChar( - 'U'): // ctrl-U, kill all characters to the left of the cursor - case ctrlChar('W'): - case META + 'y': // meta-Y, "yank-pop", rotate popped text - case META + 'Y': - case 127: - case DELETE_KEY: - case META + '<': // start of history - case PAGE_UP_KEY: - case META + '>': // end of history - case PAGE_DOWN_KEY: - keepLooping = false; - break; - - // these characters revert the input line to its previous state - case ctrlChar('C'): // ctrl-C, abort this line - case ctrlChar('G'): - case ctrlChar('L'): // ctrl-L, clear screen and redisplay line - keepLooping = false; - useSearchedLine = false; - if (c != ctrlChar('L')) { - c = -1; // ctrl-C and ctrl-G just abort the search and do nothing - // else - } - break; - - // these characters stay in search mode and update the display - case ctrlChar('S'): - case ctrlChar('R'): - if (dp.searchTextLen == - 0) { // if no current search text, recall previous text - if (previousSearchText.length()) { - dp.updateSearchText(previousSearchText.get()); - } - } - if ((dp.direction == 1 && c == ctrlChar('R')) || - (dp.direction == -1 && c == ctrlChar('S'))) { - dp.direction = 0 - dp.direction; // reverse direction - dp.updateSearchPrompt(); // change the prompt - } else { - searchAgain = true; // same direction, search again - } - break; - -// job control is its own thing -#ifndef _WIN32 - case ctrlChar('Z'): // ctrl-Z, job control - disableRawMode(); // Returning to Linux (whatever) shell, leave raw - // mode - raise(SIGSTOP); // Break out in mid-line - enableRawMode(); // Back from Linux shell, re-enter raw mode - { - bufferSize = historyLineLength + 1; - unique_ptr tempUnicode(new char32_t[bufferSize]); - copyString8to32(tempUnicode.get(), bufferSize, ucharCount, - history[historyIndex]); - dynamicRefresh(dp, tempUnicode.get(), historyLineLength, - historyLinePosition); - } - continue; - break; -#endif - - // these characters update the search string, and hence the selected input - // line - case ctrlChar('H'): // backspace/ctrl-H, delete char to left of cursor - if (dp.searchTextLen > 0) { - unique_ptr tempUnicode(new char32_t[dp.searchTextLen]); - --dp.searchTextLen; - dp.searchText[dp.searchTextLen] = 0; - copyString32(tempUnicode.get(), dp.searchText.get(), - dp.searchTextLen); - dp.updateSearchText(tempUnicode.get()); - } else { - beep(); - } - break; - - case ctrlChar('Y'): // ctrl-Y, yank killed text - break; - - default: - if (!isControlChar(c) && c <= 0x0010FFFF) { // not an action character - unique_ptr tempUnicode( - new char32_t[dp.searchTextLen + 2]); - copyString32(tempUnicode.get(), dp.searchText.get(), - dp.searchTextLen); - tempUnicode[dp.searchTextLen] = c; - tempUnicode[dp.searchTextLen + 1] = 0; - dp.updateSearchText(tempUnicode.get()); - } else { - beep(); - } - } // switch - - // if we are staying in search mode, search now - if (keepLooping) { - bufferSize = historyLineLength + 1; - if (activeHistoryLine) { - delete[] activeHistoryLine; - activeHistoryLine = nullptr; - } - activeHistoryLine = new char32_t[bufferSize]; - copyString8to32(activeHistoryLine, bufferSize, ucharCount, - history[historyIndex]); - if (dp.searchTextLen > 0) { - bool found = false; - int historySearchIndex = historyIndex; - int lineLength = static_cast(ucharCount); - int lineSearchPos = historyLinePosition; - if (searchAgain) { - lineSearchPos += dp.direction; - } - searchAgain = false; - while (true) { - while ((dp.direction > 0) ? (lineSearchPos < lineLength) - : (lineSearchPos >= 0)) { - if (strncmp32(dp.searchText.get(), - &activeHistoryLine[lineSearchPos], - dp.searchTextLen) == 0) { - found = true; - break; - } - lineSearchPos += dp.direction; - } - if (found) { - historyIndex = historySearchIndex; - historyLineLength = lineLength; - historyLinePosition = lineSearchPos; - break; - } else if ((dp.direction > 0) ? (historySearchIndex < historyLen - 1) - : (historySearchIndex > 0)) { - historySearchIndex += dp.direction; - bufferSize = strlen8(history[historySearchIndex]) + 1; - delete[] activeHistoryLine; - activeHistoryLine = nullptr; - activeHistoryLine = new char32_t[bufferSize]; - copyString8to32(activeHistoryLine, bufferSize, ucharCount, - history[historySearchIndex]); - lineLength = static_cast(ucharCount); - lineSearchPos = - (dp.direction > 0) ? 0 : (lineLength - dp.searchTextLen); - } else { - beep(); - break; - } - }; // while - } - if (activeHistoryLine) { - delete[] activeHistoryLine; - activeHistoryLine = nullptr; - } - bufferSize = historyLineLength + 1; - activeHistoryLine = new char32_t[bufferSize]; - copyString8to32(activeHistoryLine, bufferSize, ucharCount, - history[historyIndex]); - dynamicRefresh(dp, activeHistoryLine, historyLineLength, - historyLinePosition); // draw user's text with our prompt - } - } // while - - // leaving history search, restore previous prompt, maybe make searched line - // current - PromptBase pb; - pb.promptChars = pi.promptIndentation; - pb.promptBytes = pi.promptBytes; - Utf32String tempUnicode(pb.promptBytes + 1); - - copyString32(tempUnicode.get(), &pi.promptText[pi.promptLastLinePosition], - pb.promptBytes - pi.promptLastLinePosition); - tempUnicode.initFromBuffer(); - pb.promptText = tempUnicode; - pb.promptExtraLines = 0; - pb.promptIndentation = pi.promptIndentation; - pb.promptLastLinePosition = 0; - pb.promptPreviousInputLen = historyLineLength; - pb.promptCursorRowOffset = dp.promptCursorRowOffset; - pb.promptScreenColumns = pi.promptScreenColumns; - pb.promptPreviousLen = dp.promptChars; - if (useSearchedLine && activeHistoryLine) { - historyRecallMostRecent = true; - copyString32(buf32, activeHistoryLine, buflen + 1); - len = historyLineLength; - pos = historyLinePosition; - } - if (activeHistoryLine) { - delete[] activeHistoryLine; - activeHistoryLine = nullptr; - } - dynamicRefresh(pb, buf32, len, - pos); // redraw the original prompt with current input - pi.promptPreviousInputLen = len; - pi.promptCursorRowOffset = pi.promptExtraLines + pb.promptCursorRowOffset; - previousSearchText = - dp.searchText; // save search text for possible reuse on ctrl-R ctrl-R - return c; // pass a character or -1 back to main loop -} - -static bool isCharacterAlphanumeric(char32_t testChar) { -#ifdef _WIN32 - return (iswalnum((wint_t)testChar) != 0 ? true : false); -#else - return (iswalnum(testChar) != 0 ? true : false); -#endif -} - -#ifndef _WIN32 -static bool gotResize = false; -#endif -static int keyType = 0; - -int InputBuffer::getInputLine(PromptBase& pi) { - keyType = 0; - - // The latest history entry is always our current buffer - if (len > 0) { - size_t bufferSize = sizeof(char32_t) * len + 1; - unique_ptr tempBuffer(new char[bufferSize]); - copyString32to8(tempBuffer.get(), bufferSize, buf32); - linenoiseHistoryAdd(tempBuffer.get()); - } else { - linenoiseHistoryAdd(""); - } - historyIndex = historyLen - 1; - historyRecallMostRecent = false; - - // display the prompt - if (!pi.write()) return -1; - -#ifndef _WIN32 - // we have to generate our own newline on line wrap on Linux - if (pi.promptIndentation == 0 && pi.promptExtraLines > 0) - if (write(1, "\n", 1) == -1) return -1; -#endif - - // the cursor starts out at the end of the prompt - pi.promptCursorRowOffset = pi.promptExtraLines; - - // kill and yank start in "other" mode - killRing.lastAction = KillRing::actionOther; - - // when history search returns control to us, we execute its terminating - // keystroke - int terminatingKeystroke = -1; - - // if there is already text in the buffer, display it first - if (len > 0) { - refreshLine(pi); - } - - // loop collecting characters, respond to line editing characters - while (true) { - int c; - if (terminatingKeystroke == -1) { - c = linenoiseReadChar(); // get a new keystroke - - keyType = 0; - if (c != 0) { - // set flag that we got some input - if (c == ctrlChar('C')) { - keyType = 1; - } else if (c == ctrlChar('D')) { - keyType = 2; - } - } - -#ifndef _WIN32 - if (c == 0 && gotResize) { - // caught a window resize event - // now redraw the prompt and line - gotResize = false; - pi.promptScreenColumns = getScreenColumns(); - dynamicRefresh(pi, buf32, len, - pos); // redraw the original prompt with current input - continue; - } -#endif - } else { - c = terminatingKeystroke; // use the terminating keystroke from search - terminatingKeystroke = -1; // clear it once we've used it - } - - c = cleanupCtrl(c); // convert CTRL + into normal ctrl - - if (c == 0) { - return len; - } - - if (c == -1) { - refreshLine(pi); - continue; - } - - if (c == -2) { - if (!pi.write()) return -1; - refreshLine(pi); - continue; - } - - // ctrl-I/tab, command completion, needs to be before switch statement - if (c == ctrlChar('I') && completionCallback) { - killRing.lastAction = KillRing::actionOther; - historyRecallMostRecent = false; - - // completeLine does the actual completion and replacement - c = completeLine(pi); - - if (c < 0) // return on error - return len; - - if (c == 0) // read next character when 0 - continue; - - // deliberate fall-through here, so we use the terminating character - } - - switch (c) { - case ctrlChar('A'): // ctrl-A, move cursor to start of line - case HOME_KEY: - killRing.lastAction = KillRing::actionOther; - pos = 0; - refreshLine(pi); - break; - - case ctrlChar('B'): // ctrl-B, move cursor left by one character - case LEFT_ARROW_KEY: - killRing.lastAction = KillRing::actionOther; - if (pos > 0) { - --pos; - refreshLine(pi); - } - break; - - case META + 'b': // meta-B, move cursor left by one word - case META + 'B': - case CTRL + LEFT_ARROW_KEY: - case META + LEFT_ARROW_KEY: // Emacs allows Meta, bash & readline don't - killRing.lastAction = KillRing::actionOther; - if (pos > 0) { - while (pos > 0 && !isCharacterAlphanumeric(buf32[pos - 1])) { - --pos; - } - while (pos > 0 && isCharacterAlphanumeric(buf32[pos - 1])) { - --pos; - } - refreshLine(pi); - } - break; - - case ctrlChar('C'): // ctrl-C, abort this line - killRing.lastAction = KillRing::actionOther; - historyRecallMostRecent = false; - errno = EAGAIN; - --historyLen; - free(history[historyLen]); - // we need one last refresh with the cursor at the end of the line - // so we don't display the next prompt over the previous input line - pos = len; // pass len as pos for EOL - refreshLine(pi); - if (write(1, "^C", 2) == -1) return -1; // Display the ^C we got - return -1; - - case META + 'c': // meta-C, give word initial Cap - case META + 'C': - killRing.lastAction = KillRing::actionOther; - historyRecallMostRecent = false; - if (pos < len) { - while (pos < len && !isCharacterAlphanumeric(buf32[pos])) { - ++pos; - } - if (pos < len && isCharacterAlphanumeric(buf32[pos])) { - if (buf32[pos] >= 'a' && buf32[pos] <= 'z') { - buf32[pos] += 'A' - 'a'; - } - ++pos; - } - while (pos < len && isCharacterAlphanumeric(buf32[pos])) { - if (buf32[pos] >= 'A' && buf32[pos] <= 'Z') { - buf32[pos] += 'a' - 'A'; - } - ++pos; - } - refreshLine(pi); - } - break; - - // ctrl-D, delete the character under the cursor - // on an empty line, exit the shell - case ctrlChar('D'): - killRing.lastAction = KillRing::actionOther; - if (len > 0 && pos < len) { - historyRecallMostRecent = false; - memmove(buf32 + pos, buf32 + pos + 1, sizeof(char32_t) * (len - pos)); - --len; - refreshLine(pi); - } else if (len == 0) { - --historyLen; - free(history[historyLen]); - return -1; - } - break; - - case META + 'd': // meta-D, kill word to right of cursor - case META + 'D': - if (pos < len) { - historyRecallMostRecent = false; - int endingPos = pos; - while (endingPos < len && - !isCharacterAlphanumeric(buf32[endingPos])) { - ++endingPos; - } - while (endingPos < len && isCharacterAlphanumeric(buf32[endingPos])) { - ++endingPos; - } - killRing.kill(&buf32[pos], endingPos - pos, true); - memmove(buf32 + pos, buf32 + endingPos, - sizeof(char32_t) * (len - endingPos + 1)); - len -= endingPos - pos; - refreshLine(pi); - } - killRing.lastAction = KillRing::actionKill; - break; - - case ctrlChar('E'): // ctrl-E, move cursor to end of line - case END_KEY: - killRing.lastAction = KillRing::actionOther; - pos = len; - refreshLine(pi); - break; - - case ctrlChar('F'): // ctrl-F, move cursor right by one character - case RIGHT_ARROW_KEY: - killRing.lastAction = KillRing::actionOther; - if (pos < len) { - ++pos; - refreshLine(pi); - } - break; - - case META + 'f': // meta-F, move cursor right by one word - case META + 'F': - case CTRL + RIGHT_ARROW_KEY: - case META + RIGHT_ARROW_KEY: // Emacs allows Meta, bash & readline don't - killRing.lastAction = KillRing::actionOther; - if (pos < len) { - while (pos < len && !isCharacterAlphanumeric(buf32[pos])) { - ++pos; - } - while (pos < len && isCharacterAlphanumeric(buf32[pos])) { - ++pos; - } - refreshLine(pi); - } - break; - - case ctrlChar('H'): // backspace/ctrl-H, delete char to left of cursor - killRing.lastAction = KillRing::actionOther; - if (pos > 0) { - historyRecallMostRecent = false; - memmove(buf32 + pos - 1, buf32 + pos, - sizeof(char32_t) * (1 + len - pos)); - --pos; - --len; - refreshLine(pi); - } - break; - - // meta-Backspace, kill word to left of cursor - case META + ctrlChar('H'): - if (pos > 0) { - historyRecallMostRecent = false; - int startingPos = pos; - while (pos > 0 && !isCharacterAlphanumeric(buf32[pos - 1])) { - --pos; - } - while (pos > 0 && isCharacterAlphanumeric(buf32[pos - 1])) { - --pos; - } - killRing.kill(&buf32[pos], startingPos - pos, false); - memmove(buf32 + pos, buf32 + startingPos, - sizeof(char32_t) * (len - startingPos + 1)); - len -= startingPos - pos; - refreshLine(pi); - } - killRing.lastAction = KillRing::actionKill; - break; - - case ctrlChar('J'): // ctrl-J/linefeed/newline, accept line - case ctrlChar('M'): // ctrl-M/return/enter - killRing.lastAction = KillRing::actionOther; - // we need one last refresh with the cursor at the end of the line - // so we don't display the next prompt over the previous input line - pos = len; // pass len as pos for EOL - refreshLine(pi); - historyPreviousIndex = historyRecallMostRecent ? historyIndex : -2; - --historyLen; - free(history[historyLen]); - return len; - - case ctrlChar('K'): // ctrl-K, kill from cursor to end of line - killRing.kill(&buf32[pos], len - pos, true); - buf32[pos] = '\0'; - len = pos; - refreshLine(pi); - killRing.lastAction = KillRing::actionKill; - historyRecallMostRecent = false; - break; - - case ctrlChar('L'): // ctrl-L, clear screen and redisplay line - clearScreen(pi); - break; - - case META + 'l': // meta-L, lowercase word - case META + 'L': - killRing.lastAction = KillRing::actionOther; - if (pos < len) { - historyRecallMostRecent = false; - while (pos < len && !isCharacterAlphanumeric(buf32[pos])) { - ++pos; - } - while (pos < len && isCharacterAlphanumeric(buf32[pos])) { - if (buf32[pos] >= 'A' && buf32[pos] <= 'Z') { - buf32[pos] += 'a' - 'A'; - } - ++pos; - } - refreshLine(pi); - } - break; - - case ctrlChar('N'): // ctrl-N, recall next line in history - case ctrlChar('P'): // ctrl-P, recall previous line in history - case DOWN_ARROW_KEY: - case UP_ARROW_KEY: - killRing.lastAction = KillRing::actionOther; - // if not already recalling, add the current line to the history list so - // we don't - // have to special case it - if (historyIndex == historyLen - 1) { - free(history[historyLen - 1]); - size_t tempBufferSize = sizeof(char32_t) * len + 1; - unique_ptr tempBuffer(new char[tempBufferSize]); - copyString32to8(tempBuffer.get(), tempBufferSize, buf32); - history[historyLen - 1] = strdup8(tempBuffer.get()); - } - if (historyLen > 1) { - if (c == UP_ARROW_KEY) { - c = ctrlChar('P'); - } - if (historyPreviousIndex != -2 && c != ctrlChar('P')) { - historyIndex = - 1 + historyPreviousIndex; // emulate Windows down-arrow - } else { - historyIndex += (c == ctrlChar('P')) ? -1 : 1; - } - historyPreviousIndex = -2; - if (historyIndex < 0) { - historyIndex = 0; - break; - } else if (historyIndex >= historyLen) { - historyIndex = historyLen - 1; - break; - } - historyRecallMostRecent = true; - size_t ucharCount = 0; - copyString8to32(buf32, buflen, ucharCount, history[historyIndex]); - len = pos = static_cast(ucharCount); - refreshLine(pi); - } - break; - - case ctrlChar('R'): // ctrl-R, reverse history search - case ctrlChar('S'): // ctrl-S, forward history search - terminatingKeystroke = incrementalHistorySearch(pi, c); - break; - - case ctrlChar('T'): // ctrl-T, transpose characters - killRing.lastAction = KillRing::actionOther; - if (pos > 0 && len > 1) { - historyRecallMostRecent = false; - size_t leftCharPos = (pos == len) ? pos - 2 : pos - 1; - char32_t aux = buf32[leftCharPos]; - buf32[leftCharPos] = buf32[leftCharPos + 1]; - buf32[leftCharPos + 1] = aux; - if (pos != len) ++pos; - refreshLine(pi); - } - break; - - case ctrlChar( - 'U'): // ctrl-U, kill all characters to the left of the cursor - if (pos > 0) { - historyRecallMostRecent = false; - killRing.kill(&buf32[0], pos, false); - len -= pos; - memmove(buf32, buf32 + pos, sizeof(char32_t) * (len + 1)); - pos = 0; - refreshLine(pi); - } - killRing.lastAction = KillRing::actionKill; - break; - - case META + 'u': // meta-U, uppercase word - case META + 'U': - killRing.lastAction = KillRing::actionOther; - if (pos < len) { - historyRecallMostRecent = false; - while (pos < len && !isCharacterAlphanumeric(buf32[pos])) { - ++pos; - } - while (pos < len && isCharacterAlphanumeric(buf32[pos])) { - if (buf32[pos] >= 'a' && buf32[pos] <= 'z') { - buf32[pos] += 'A' - 'a'; - } - ++pos; - } - refreshLine(pi); - } - break; - - // ctrl-W, kill to whitespace (not word) to left of cursor - case ctrlChar('W'): - if (pos > 0) { - historyRecallMostRecent = false; - int startingPos = pos; - while (pos > 0 && buf32[pos - 1] == ' ') { - --pos; - } - while (pos > 0 && buf32[pos - 1] != ' ') { - --pos; - } - killRing.kill(&buf32[pos], startingPos - pos, false); - memmove(buf32 + pos, buf32 + startingPos, - sizeof(char32_t) * (len - startingPos + 1)); - len -= startingPos - pos; - refreshLine(pi); - } - killRing.lastAction = KillRing::actionKill; - break; - - case ctrlChar('Y'): // ctrl-Y, yank killed text - historyRecallMostRecent = false; - { - Utf32String* restoredText = killRing.yank(); - if (restoredText) { - bool truncated = false; - size_t ucharCount = restoredText->length(); - if (ucharCount > static_cast(buflen - len)) { - ucharCount = buflen - len; - truncated = true; - } - memmove(buf32 + pos + ucharCount, buf32 + pos, - sizeof(char32_t) * (len - pos + 1)); - memmove(buf32 + pos, restoredText->get(), - sizeof(char32_t) * ucharCount); - pos += static_cast(ucharCount); - len += static_cast(ucharCount); - refreshLine(pi); - killRing.lastAction = KillRing::actionYank; - killRing.lastYankSize = ucharCount; - if (truncated) { - beep(); - } - } else { - beep(); - } - } - break; - - case META + 'y': // meta-Y, "yank-pop", rotate popped text - case META + 'Y': - if (killRing.lastAction == KillRing::actionYank) { - historyRecallMostRecent = false; - Utf32String* restoredText = killRing.yankPop(); - if (restoredText) { - bool truncated = false; - size_t ucharCount = restoredText->length(); - if (ucharCount > - static_cast(killRing.lastYankSize + buflen - len)) { - ucharCount = killRing.lastYankSize + buflen - len; - truncated = true; - } - if (ucharCount > killRing.lastYankSize) { - memmove(buf32 + pos + ucharCount - killRing.lastYankSize, - buf32 + pos, sizeof(char32_t) * (len - pos + 1)); - memmove(buf32 + pos - killRing.lastYankSize, restoredText->get(), - sizeof(char32_t) * ucharCount); - } else { - memmove(buf32 + pos - killRing.lastYankSize, restoredText->get(), - sizeof(char32_t) * ucharCount); - memmove(buf32 + pos + ucharCount - killRing.lastYankSize, - buf32 + pos, sizeof(char32_t) * (len - pos + 1)); - } - pos += static_cast(ucharCount - killRing.lastYankSize); - len += static_cast(ucharCount - killRing.lastYankSize); - killRing.lastYankSize = ucharCount; - refreshLine(pi); - if (truncated) { - beep(); - } - break; - } - } - beep(); - break; - -#ifndef _WIN32 - case ctrlChar('Z'): // ctrl-Z, job control - disableRawMode(); // Returning to Linux (whatever) shell, leave raw - // mode - raise(SIGSTOP); // Break out in mid-line - enableRawMode(); // Back from Linux shell, re-enter raw mode - if (!pi.write()) break; // Redraw prompt - refreshLine(pi); // Refresh the line - break; -#endif - - // DEL, delete the character under the cursor - case 127: - case DELETE_KEY: - killRing.lastAction = KillRing::actionOther; - if (len > 0 && pos < len) { - historyRecallMostRecent = false; - memmove(buf32 + pos, buf32 + pos + 1, sizeof(char32_t) * (len - pos)); - --len; - refreshLine(pi); - } - break; - - case META + '<': // meta-<, beginning of history - case PAGE_UP_KEY: // Page Up, beginning of history - case META + '>': // meta->, end of history - case PAGE_DOWN_KEY: // Page Down, end of history - killRing.lastAction = KillRing::actionOther; - // if not already recalling, add the current line to the history list so - // we don't - // have to special case it - if (historyIndex == historyLen - 1) { - free(history[historyLen - 1]); - size_t tempBufferSize = sizeof(char32_t) * len + 1; - unique_ptr tempBuffer(new char[tempBufferSize]); - copyString32to8(tempBuffer.get(), tempBufferSize, buf32); - history[historyLen - 1] = strdup8(tempBuffer.get()); - } - if (historyLen > 1) { - historyIndex = - (c == META + '<' || c == PAGE_UP_KEY) ? 0 : historyLen - 1; - historyPreviousIndex = -2; - historyRecallMostRecent = true; - size_t ucharCount = 0; - copyString8to32(buf32, buflen, ucharCount, history[historyIndex]); - len = pos = static_cast(ucharCount); - refreshLine(pi); - } - break; - - // not one of our special characters, maybe insert it in the buffer - default: - killRing.lastAction = KillRing::actionOther; - historyRecallMostRecent = false; - if (c & (META | CTRL)) { // beep on unknown Ctrl and/or Meta keys - beep(); - break; - } - if (len < buflen) { - if (isControlChar(c)) { // don't insert control characters - beep(); - break; - } - if (len == pos) { // at end of buffer - buf32[pos] = c; - ++pos; - ++len; - buf32[len] = '\0'; - int inputLen = calculateColumnPosition(buf32, len); - if (pi.promptIndentation + inputLen < pi.promptScreenColumns) { - if (inputLen > pi.promptPreviousInputLen) - pi.promptPreviousInputLen = inputLen; - /* Avoid a full update of the line in the - * trivial case. */ - if (write32(1, reinterpret_cast(&c), 1) == -1) - return -1; - } else { - refreshLine(pi); - } - } else { // not at end of buffer, have to move characters to our - // right - memmove(buf32 + pos + 1, buf32 + pos, - sizeof(char32_t) * (len - pos)); - buf32[pos] = c; - ++len; - ++pos; - buf32[len] = '\0'; - refreshLine(pi); - } - } else { - beep(); // buffer is full, beep on new characters - } - break; - } - } - return len; -} - -static string preloadedBufferContents; // used with linenoisePreloadBuffer -static string preloadErrorMessage; - -/** - * linenoisePreloadBuffer provides text to be inserted into the command buffer - * - * the provided text will be processed to be usable and will be used to preload - * the input buffer on the next call to linenoise() - * - * @param preloadText text to begin with on the next call to linenoise() - */ -void linenoisePreloadBuffer(const char* preloadText) { - if (!preloadText) { - return; - } - int bufferSize = static_cast(strlen(preloadText) + 1); - unique_ptr tempBuffer(new char[bufferSize]); - strncpy(&tempBuffer[0], preloadText, bufferSize); - - // remove characters that won't display correctly - char* pIn = &tempBuffer[0]; - char* pOut = pIn; - bool controlsStripped = false; - bool whitespaceSeen = false; - while (*pIn) { - unsigned char c = - *pIn++; // we need unsigned so chars 0x80 and above are allowed - if ('\r' == c) { // silently skip CR - continue; - } - if ('\n' == c || '\t' == c) { // note newline or tab - whitespaceSeen = true; - continue; - } - if (isControlChar( - c)) { // remove other control characters, flag for message - controlsStripped = true; - *pOut++ = ' '; - continue; - } - if (whitespaceSeen) { // convert whitespace to a single space - *pOut++ = ' '; - whitespaceSeen = false; - } - *pOut++ = c; - } - *pOut = 0; - int processedLength = static_cast(pOut - tempBuffer.get()); - bool lineTruncated = false; - if (processedLength > (LINENOISE_MAX_LINE - 1)) { - lineTruncated = true; - tempBuffer[LINENOISE_MAX_LINE - 1] = 0; - } - preloadedBufferContents = tempBuffer.get(); - if (controlsStripped) { - preloadErrorMessage += - " [Edited line: control characters were converted to spaces]\n"; - } - if (lineTruncated) { - preloadErrorMessage += " [Edited line: the line length was reduced from "; - char buf[128]; - snprintf(buf, sizeof(buf), "%d to %d]\n", processedLength, - (LINENOISE_MAX_LINE - 1)); - preloadErrorMessage += buf; - } -} - -/** - * linenoise is a readline replacement. - * - * call it with a prompt to display and it will return a line of input from the - * user - * - * @param prompt text of prompt to display to the user - * @return the returned string belongs to the caller on return and must be - * freed to prevent - * memory leaks - */ -char* linenoise(const char* prompt) { -#ifndef _WIN32 - gotResize = false; -#endif - if (isatty(STDIN_FILENO)) { // input is from a terminal - char32_t buf32[LINENOISE_MAX_LINE]; - char charWidths[LINENOISE_MAX_LINE]; - if (!preloadErrorMessage.empty()) { - printf("%s", preloadErrorMessage.c_str()); - fflush(stdout); - preloadErrorMessage.clear(); - } - PromptInfo pi(prompt, getScreenColumns()); - if (isUnsupportedTerm()) { - if (!pi.write()) return 0; - fflush(stdout); - if (preloadedBufferContents.empty()) { - unique_ptr buf8(new char[LINENOISE_MAX_LINE]); - if (fgets(buf8.get(), LINENOISE_MAX_LINE, stdin) == NULL) { - return NULL; - } - size_t len = strlen(buf8.get()); - while (len && (buf8[len - 1] == '\n' || buf8[len - 1] == '\r')) { - --len; - buf8[len] = '\0'; - } - return strdup(buf8.get()); // caller must free buffer - } else { - char* buf8 = strdup(preloadedBufferContents.c_str()); - preloadedBufferContents.clear(); - return buf8; // caller must free buffer - } - } else { - if (enableRawMode() == -1) { - return NULL; - } - InputBuffer ib(buf32, charWidths, LINENOISE_MAX_LINE); - if (!preloadedBufferContents.empty()) { - ib.preloadBuffer(preloadedBufferContents.c_str()); - preloadedBufferContents.clear(); - } - int count = ib.getInputLine(pi); - disableRawMode(); - printf("\n"); - if (count == -1) { - return NULL; - } - size_t bufferSize = sizeof(char32_t) * ib.length() + 1; - unique_ptr buf8(new char[bufferSize]); - copyString32to8(buf8.get(), bufferSize, buf32); - return strdup(buf8.get()); // caller must free buffer - } - } else { // input not from a terminal, we should work with piped input, i.e. - // redirected stdin - unique_ptr buf8(new char[LINENOISE_MAX_LINE]); - if (fgets(buf8.get(), LINENOISE_MAX_LINE, stdin) == NULL) { - return NULL; - } - - // if fgets() gave us the newline, remove it - int count = static_cast(strlen(buf8.get())); - if (count > 0 && buf8[count - 1] == '\n') { - --count; - buf8[count] = '\0'; - } - return strdup(buf8.get()); // caller must free buffer - } -} - -/* Register a callback function to be called for tab-completion. */ -void linenoiseSetCompletionCallback(linenoiseCompletionCallback* fn) { - completionCallback = fn; -} - -void linenoiseAddCompletion(linenoiseCompletions* lc, const char* str) { - lc->completionStrings.push_back(Utf32String(str)); -} - -int linenoiseHistoryAdd(const char* line) { - if (historyMaxLen == 0) { - return 0; - } - if (history == NULL) { - history = - reinterpret_cast(malloc(sizeof(char8_t*) * historyMaxLen)); - if (history == NULL) { - return 0; - } - memset(history, 0, (sizeof(char*) * historyMaxLen)); - } - char8_t* linecopy = strdup8(line); - if (!linecopy) { - return 0; - } - - // convert newlines in multi-line code to spaces before storing - char8_t* p = linecopy; - while (*p) { - if (*p == '\n') { - *p = ' '; - } - ++p; - } - - // prevent duplicate history entries - if (historyLen > 0 && history[historyLen - 1] != nullptr && - strcmp(reinterpret_cast(history[historyLen - 1]), - reinterpret_cast(linecopy)) == 0) { - free(linecopy); - return 0; - } - - if (historyLen == historyMaxLen) { - free(history[0]); - memmove(history, history + 1, sizeof(char*) * (historyMaxLen - 1)); - --historyLen; - if (--historyPreviousIndex < -1) { - historyPreviousIndex = -2; - } - } - - history[historyLen] = linecopy; - ++historyLen; - return 1; -} - -int linenoiseHistorySetMaxLen(int len) { - if (len < 1) { - return 0; - } - if (history) { - int tocopy = historyLen; - char8_t** newHistory = - reinterpret_cast(malloc(sizeof(char8_t*) * len)); - if (newHistory == NULL) { - return 0; - } - if (len < tocopy) { - tocopy = len; - } - memcpy(newHistory, history + historyMaxLen - tocopy, - sizeof(char8_t*) * tocopy); - free(history); - history = newHistory; - } - historyMaxLen = len; - if (historyLen > historyMaxLen) { - historyLen = historyMaxLen; - } - return 1; -} - -/* Fetch a line of the history by (zero-based) index. If the requested - * line does not exist, NULL is returned. The return value is a heap-allocated - * copy of the line, and the caller is responsible for de-allocating it. */ -char* linenoiseHistoryLine(int index) { - if (index < 0 || index >= historyLen) return NULL; - - return strdup(reinterpret_cast(history[index])); -} - -/* Save the history in the specified file. On success 0 is returned - * otherwise -1 is returned. */ -int linenoiseHistorySave(const char* filename) { -#if _WIN32 - FILE* fp = fopen(filename, "wt"); -#else - int fd = open(filename, O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR); - - if (fd < 0) { - return -1; - } - - FILE* fp = fdopen(fd, "wt"); -#endif - - if (fp == NULL) { - return -1; - } - - for (int j = 0; j < historyLen; ++j) { - if (history[j][0] != '\0') { - fprintf(fp, "%s\n", history[j]); - } - } - - fclose(fp); - - return 0; -} - -/* Load the history from the specified file. If the file does not exist - * zero is returned and no operation is performed. - * - * If the file exists and the operation succeeded 0 is returned, otherwise - * on error -1 is returned. */ -int linenoiseHistoryLoad(const char* filename) { - FILE* fp = fopen(filename, "rt"); - if (fp == NULL) { - return -1; - } - - char buf[LINENOISE_MAX_LINE]; - while (fgets(buf, LINENOISE_MAX_LINE, fp) != NULL) { - char* p = strchr(buf, '\r'); - if (!p) { - p = strchr(buf, '\n'); - } - if (p) { - *p = '\0'; - } - if (p != buf) { - linenoiseHistoryAdd(buf); - } - } - fclose(fp); - return 0; -} - -/* Set if to use or not the multi line mode. */ -/* note that this is a stub only, as linenoise-ng always multi-line */ -void linenoiseSetMultiLine(int) {} - -/* This special mode is used by linenoise in order to print scan codes - * on screen for debugging / development purposes. It is implemented - * by the linenoise_example program using the --keycodes option. */ -void linenoisePrintKeyCodes(void) { - char quit[4]; - - printf( - "Linenoise key codes debugging mode.\n" - "Press keys to see scan codes. Type 'quit' at any time to exit.\n"); - if (enableRawMode() == -1) return; - memset(quit, ' ', 4); - while (1) { - char c; - int nread; - -#if _WIN32 - nread = _read(STDIN_FILENO, &c, 1); -#else - nread = read(STDIN_FILENO, &c, 1); -#endif - if (nread <= 0) continue; - memmove(quit, quit + 1, sizeof(quit) - 1); /* shift string to left. */ - quit[sizeof(quit) - 1] = c; /* Insert current char on the right. */ - if (memcmp(quit, "quit", sizeof(quit)) == 0) break; - - printf("'%c' %02x (%d) (type quit to exit)\n", isprint(c) ? c : '?', (int)c, - (int)c); - printf("\r"); /* Go left edge manually, we are in raw mode. */ - fflush(stdout); - } - disableRawMode(); -} - -#ifndef _WIN32 -static void WindowSizeChanged(int) { - // do nothing here but setting this flag - gotResize = true; -} -#endif - -int linenoiseInstallWindowChangeHandler(void) { -#ifndef _WIN32 - struct sigaction sa; - sigemptyset(&sa.sa_mask); - sa.sa_flags = 0; - sa.sa_handler = &WindowSizeChanged; - - if (sigaction(SIGWINCH, &sa, nullptr) == -1) { - return errno; - } -#endif - return 0; -} - -int linenoiseKeyType(void) { - return keyType; -} diff --git a/src/linenoise/linenoise.h b/src/linenoise/linenoise.h deleted file mode 100644 index 3a8eb9f7ee6..00000000000 --- a/src/linenoise/linenoise.h +++ /dev/null @@ -1,73 +0,0 @@ -/* linenoise.h -- guerrilla line editing library against the idea that a - * line editing lib needs to be 20,000 lines of C code. - * - * See linenoise.c for more information. - * - * Copyright (c) 2010, Salvatore Sanfilippo - * Copyright (c) 2010, Pieter Noordhuis - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __LINENOISE_H -#define __LINENOISE_H - -#define LINENOISE_VERSION "1.0.0" -#define LINENOISE_VERSION_MAJOR 1 -#define LINENOISE_VERSION_MINOR 1 - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct linenoiseCompletions linenoiseCompletions; - -typedef void(linenoiseCompletionCallback)(const char*, linenoiseCompletions*); -void linenoiseSetCompletionCallback(linenoiseCompletionCallback* fn); -void linenoiseAddCompletion(linenoiseCompletions* lc, const char* str); - -char* linenoise(const char* prompt); -void linenoisePreloadBuffer(const char* preloadText); -int linenoiseHistoryAdd(const char* line); -int linenoiseHistorySetMaxLen(int len); -char* linenoiseHistoryLine(int index); -int linenoiseHistorySave(const char* filename); -int linenoiseHistoryLoad(const char* filename); -void linenoiseHistoryFree(void); -void linenoiseClearScreen(void); -void linenoiseSetMultiLine(int ml); -void linenoisePrintKeyCodes(void); -/* the following are extensions to the original linenoise API */ -int linenoiseInstallWindowChangeHandler(void); -/* returns type of key pressed: 1 = CTRL-C, 2 = CTRL-D, 0 = other */ -int linenoiseKeyType(void); - -#ifdef __cplusplus -} -#endif - -#endif /* __LINENOISE_H */ diff --git a/src/linenoise/wcwidth.cpp b/src/linenoise/wcwidth.cpp deleted file mode 100644 index deec0ba6b57..00000000000 --- a/src/linenoise/wcwidth.cpp +++ /dev/null @@ -1,315 +0,0 @@ -/* - * This is an implementation of wcwidth() and wcswidth() (defined in - * IEEE Std 1002.1-2001) for Unicode. - * - * http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html - * http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html - * - * In fixed-width output devices, Latin characters all occupy a single - * "cell" position of equal width, whereas ideographic CJK characters - * occupy two such cells. Interoperability between terminal-line - * applications and (teletype-style) character terminals using the - * UTF-8 encoding requires agreement on which character should advance - * the cursor by how many cell positions. No established formal - * standards exist at present on which Unicode character shall occupy - * how many cell positions on character terminals. These routines are - * a first attempt of defining such behavior based on simple rules - * applied to data provided by the Unicode Consortium. - * - * For some graphical characters, the Unicode standard explicitly - * defines a character-cell width via the definition of the East Asian - * FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes. - * In all these cases, there is no ambiguity about which width a - * terminal shall use. For characters in the East Asian Ambiguous (A) - * class, the width choice depends purely on a preference of backward - * compatibility with either historic CJK or Western practice. - * Choosing single-width for these characters is easy to justify as - * the appropriate long-term solution, as the CJK practice of - * displaying these characters as double-width comes from historic - * implementation simplicity (8-bit encoded characters were displayed - * single-width and 16-bit ones double-width, even for Greek, - * Cyrillic, etc.) and not any typographic considerations. - * - * Much less clear is the choice of width for the Not East Asian - * (Neutral) class. Existing practice does not dictate a width for any - * of these characters. It would nevertheless make sense - * typographically to allocate two character cells to characters such - * as for instance EM SPACE or VOLUME INTEGRAL, which cannot be - * represented adequately with a single-width glyph. The following - * routines at present merely assign a single-cell width to all - * neutral characters, in the interest of simplicity. This is not - * entirely satisfactory and should be reconsidered before - * establishing a formal standard in this area. At the moment, the - * decision which Not East Asian (Neutral) characters should be - * represented by double-width glyphs cannot yet be answered by - * applying a simple rule from the Unicode database content. Setting - * up a proper standard for the behavior of UTF-8 character terminals - * will require a careful analysis not only of each Unicode character, - * but also of each presentation form, something the author of these - * routines has avoided to do so far. - * - * http://www.unicode.org/unicode/reports/tr11/ - * - * Markus Kuhn -- 2007-05-26 (Unicode 5.0) - * - * Permission to use, copy, modify, and distribute this software - * for any purpose and without fee is hereby granted. The author - * disclaims all warranties with regard to this software. - * - * Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c - */ - -#include -#include -#include - -namespace linenoise_ng { - -struct interval { - char32_t first; - char32_t last; -}; - -/* auxiliary function for binary search in interval table */ -static int bisearch(char32_t ucs, const struct interval *table, int max) { - int min = 0; - int mid; - - if (ucs < table[0].first || ucs > table[max].last) - return 0; - while (max >= min) { - mid = (min + max) / 2; - if (ucs > table[mid].last) - min = mid + 1; - else if (ucs < table[mid].first) - max = mid - 1; - else - return 1; - } - - return 0; -} - - -/* The following two functions define the column width of an ISO 10646 - * character as follows: - * - * - The null character (U+0000) has a column width of 0. - * - * - Other C0/C1 control characters and DEL will lead to a return - * value of -1. - * - * - Non-spacing and enclosing combining characters (general - * category code Mn or Me in the Unicode database) have a - * column width of 0. - * - * - SOFT HYPHEN (U+00AD) has a column width of 1. - * - * - Other format characters (general category code Cf in the Unicode - * database) and ZERO WIDTH SPACE (U+200B) have a column width of 0. - * - * - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF) - * have a column width of 0. - * - * - Spacing characters in the East Asian Wide (W) or East Asian - * Full-width (F) category as defined in Unicode Technical - * Report #11 have a column width of 2. - * - * - All remaining characters (including all printable - * ISO 8859-1 and WGL4 characters, Unicode control characters, - * etc.) have a column width of 1. - * - * This implementation assumes that wchar_t characters are encoded - * in ISO 10646. - */ - -int mk_wcwidth(char32_t ucs) -{ - /* sorted list of non-overlapping intervals of non-spacing characters */ - /* generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c" */ - static const struct interval combining[] = { - { 0x0300, 0x036F }, { 0x0483, 0x0486 }, { 0x0488, 0x0489 }, - { 0x0591, 0x05BD }, { 0x05BF, 0x05BF }, { 0x05C1, 0x05C2 }, - { 0x05C4, 0x05C5 }, { 0x05C7, 0x05C7 }, { 0x0600, 0x0603 }, - { 0x0610, 0x0615 }, { 0x064B, 0x065E }, { 0x0670, 0x0670 }, - { 0x06D6, 0x06E4 }, { 0x06E7, 0x06E8 }, { 0x06EA, 0x06ED }, - { 0x070F, 0x070F }, { 0x0711, 0x0711 }, { 0x0730, 0x074A }, - { 0x07A6, 0x07B0 }, { 0x07EB, 0x07F3 }, { 0x0901, 0x0902 }, - { 0x093C, 0x093C }, { 0x0941, 0x0948 }, { 0x094D, 0x094D }, - { 0x0951, 0x0954 }, { 0x0962, 0x0963 }, { 0x0981, 0x0981 }, - { 0x09BC, 0x09BC }, { 0x09C1, 0x09C4 }, { 0x09CD, 0x09CD }, - { 0x09E2, 0x09E3 }, { 0x0A01, 0x0A02 }, { 0x0A3C, 0x0A3C }, - { 0x0A41, 0x0A42 }, { 0x0A47, 0x0A48 }, { 0x0A4B, 0x0A4D }, - { 0x0A70, 0x0A71 }, { 0x0A81, 0x0A82 }, { 0x0ABC, 0x0ABC }, - { 0x0AC1, 0x0AC5 }, { 0x0AC7, 0x0AC8 }, { 0x0ACD, 0x0ACD }, - { 0x0AE2, 0x0AE3 }, { 0x0B01, 0x0B01 }, { 0x0B3C, 0x0B3C }, - { 0x0B3F, 0x0B3F }, { 0x0B41, 0x0B43 }, { 0x0B4D, 0x0B4D }, - { 0x0B56, 0x0B56 }, { 0x0B82, 0x0B82 }, { 0x0BC0, 0x0BC0 }, - { 0x0BCD, 0x0BCD }, { 0x0C3E, 0x0C40 }, { 0x0C46, 0x0C48 }, - { 0x0C4A, 0x0C4D }, { 0x0C55, 0x0C56 }, { 0x0CBC, 0x0CBC }, - { 0x0CBF, 0x0CBF }, { 0x0CC6, 0x0CC6 }, { 0x0CCC, 0x0CCD }, - { 0x0CE2, 0x0CE3 }, { 0x0D41, 0x0D43 }, { 0x0D4D, 0x0D4D }, - { 0x0DCA, 0x0DCA }, { 0x0DD2, 0x0DD4 }, { 0x0DD6, 0x0DD6 }, - { 0x0E31, 0x0E31 }, { 0x0E34, 0x0E3A }, { 0x0E47, 0x0E4E }, - { 0x0EB1, 0x0EB1 }, { 0x0EB4, 0x0EB9 }, { 0x0EBB, 0x0EBC }, - { 0x0EC8, 0x0ECD }, { 0x0F18, 0x0F19 }, { 0x0F35, 0x0F35 }, - { 0x0F37, 0x0F37 }, { 0x0F39, 0x0F39 }, { 0x0F71, 0x0F7E }, - { 0x0F80, 0x0F84 }, { 0x0F86, 0x0F87 }, { 0x0F90, 0x0F97 }, - { 0x0F99, 0x0FBC }, { 0x0FC6, 0x0FC6 }, { 0x102D, 0x1030 }, - { 0x1032, 0x1032 }, { 0x1036, 0x1037 }, { 0x1039, 0x1039 }, - { 0x1058, 0x1059 }, { 0x1160, 0x11FF }, { 0x135F, 0x135F }, - { 0x1712, 0x1714 }, { 0x1732, 0x1734 }, { 0x1752, 0x1753 }, - { 0x1772, 0x1773 }, { 0x17B4, 0x17B5 }, { 0x17B7, 0x17BD }, - { 0x17C6, 0x17C6 }, { 0x17C9, 0x17D3 }, { 0x17DD, 0x17DD }, - { 0x180B, 0x180D }, { 0x18A9, 0x18A9 }, { 0x1920, 0x1922 }, - { 0x1927, 0x1928 }, { 0x1932, 0x1932 }, { 0x1939, 0x193B }, - { 0x1A17, 0x1A18 }, { 0x1B00, 0x1B03 }, { 0x1B34, 0x1B34 }, - { 0x1B36, 0x1B3A }, { 0x1B3C, 0x1B3C }, { 0x1B42, 0x1B42 }, - { 0x1B6B, 0x1B73 }, { 0x1DC0, 0x1DCA }, { 0x1DFE, 0x1DFF }, - { 0x200B, 0x200F }, { 0x202A, 0x202E }, { 0x2060, 0x2063 }, - { 0x206A, 0x206F }, { 0x20D0, 0x20EF }, { 0x302A, 0x302F }, - { 0x3099, 0x309A }, { 0xA806, 0xA806 }, { 0xA80B, 0xA80B }, - { 0xA825, 0xA826 }, { 0xFB1E, 0xFB1E }, { 0xFE00, 0xFE0F }, - { 0xFE20, 0xFE23 }, { 0xFEFF, 0xFEFF }, { 0xFFF9, 0xFFFB }, - { 0x10A01, 0x10A03 }, { 0x10A05, 0x10A06 }, { 0x10A0C, 0x10A0F }, - { 0x10A38, 0x10A3A }, { 0x10A3F, 0x10A3F }, { 0x1D167, 0x1D169 }, - { 0x1D173, 0x1D182 }, { 0x1D185, 0x1D18B }, { 0x1D1AA, 0x1D1AD }, - { 0x1D242, 0x1D244 }, { 0xE0001, 0xE0001 }, { 0xE0020, 0xE007F }, - { 0xE0100, 0xE01EF } - }; - - /* test for 8-bit control characters */ - if (ucs == 0) - return 0; - if (ucs < 32 || (ucs >= 0x7f && ucs < 0xa0)) - return -1; - - /* binary search in table of non-spacing characters */ - if (bisearch(ucs, combining, - sizeof(combining) / sizeof(struct interval) - 1)) - return 0; - - /* if we arrive here, ucs is not a combining or C0/C1 control character */ - - return 1 + - (ucs >= 0x1100 && - (ucs <= 0x115f || /* Hangul Jamo init. consonants */ - ucs == 0x2329 || ucs == 0x232a || - (ucs >= 0x2e80 && ucs <= 0xa4cf && - ucs != 0x303f) || /* CJK ... Yi */ - (ucs >= 0xac00 && ucs <= 0xd7a3) || /* Hangul Syllables */ - (ucs >= 0xf900 && ucs <= 0xfaff) || /* CJK Compatibility Ideographs */ - (ucs >= 0xfe10 && ucs <= 0xfe19) || /* Vertical forms */ - (ucs >= 0xfe30 && ucs <= 0xfe6f) || /* CJK Compatibility Forms */ - (ucs >= 0xff00 && ucs <= 0xff60) || /* Fullwidth Forms */ - (ucs >= 0xffe0 && ucs <= 0xffe6) || - (ucs >= 0x20000 && ucs <= 0x2fffd) || - (ucs >= 0x30000 && ucs <= 0x3fffd))); -} - - -int mk_wcswidth(const char32_t* pwcs, size_t n) -{ - int w, width = 0; - - for (;*pwcs && n-- > 0; pwcs++) - if ((w = mk_wcwidth(*pwcs)) < 0) - return -1; - else - width += w; - - return width; -} - - -/* - * The following functions are the same as mk_wcwidth() and - * mk_wcswidth(), except that spacing characters in the East Asian - * Ambiguous (A) category as defined in Unicode Technical Report #11 - * have a column width of 2. This variant might be useful for users of - * CJK legacy encodings who want to migrate to UCS without changing - * the traditional terminal character-width behaviour. It is not - * otherwise recommended for general use. - */ -int mk_wcwidth_cjk(wchar_t ucs) -{ - /* sorted list of non-overlapping intervals of East Asian Ambiguous - * characters, generated by "uniset +WIDTH-A -cat=Me -cat=Mn -cat=Cf c" */ - static const struct interval ambiguous[] = { - { 0x00A1, 0x00A1 }, { 0x00A4, 0x00A4 }, { 0x00A7, 0x00A8 }, - { 0x00AA, 0x00AA }, { 0x00AE, 0x00AE }, { 0x00B0, 0x00B4 }, - { 0x00B6, 0x00BA }, { 0x00BC, 0x00BF }, { 0x00C6, 0x00C6 }, - { 0x00D0, 0x00D0 }, { 0x00D7, 0x00D8 }, { 0x00DE, 0x00E1 }, - { 0x00E6, 0x00E6 }, { 0x00E8, 0x00EA }, { 0x00EC, 0x00ED }, - { 0x00F0, 0x00F0 }, { 0x00F2, 0x00F3 }, { 0x00F7, 0x00FA }, - { 0x00FC, 0x00FC }, { 0x00FE, 0x00FE }, { 0x0101, 0x0101 }, - { 0x0111, 0x0111 }, { 0x0113, 0x0113 }, { 0x011B, 0x011B }, - { 0x0126, 0x0127 }, { 0x012B, 0x012B }, { 0x0131, 0x0133 }, - { 0x0138, 0x0138 }, { 0x013F, 0x0142 }, { 0x0144, 0x0144 }, - { 0x0148, 0x014B }, { 0x014D, 0x014D }, { 0x0152, 0x0153 }, - { 0x0166, 0x0167 }, { 0x016B, 0x016B }, { 0x01CE, 0x01CE }, - { 0x01D0, 0x01D0 }, { 0x01D2, 0x01D2 }, { 0x01D4, 0x01D4 }, - { 0x01D6, 0x01D6 }, { 0x01D8, 0x01D8 }, { 0x01DA, 0x01DA }, - { 0x01DC, 0x01DC }, { 0x0251, 0x0251 }, { 0x0261, 0x0261 }, - { 0x02C4, 0x02C4 }, { 0x02C7, 0x02C7 }, { 0x02C9, 0x02CB }, - { 0x02CD, 0x02CD }, { 0x02D0, 0x02D0 }, { 0x02D8, 0x02DB }, - { 0x02DD, 0x02DD }, { 0x02DF, 0x02DF }, { 0x0391, 0x03A1 }, - { 0x03A3, 0x03A9 }, { 0x03B1, 0x03C1 }, { 0x03C3, 0x03C9 }, - { 0x0401, 0x0401 }, { 0x0410, 0x044F }, { 0x0451, 0x0451 }, - { 0x2010, 0x2010 }, { 0x2013, 0x2016 }, { 0x2018, 0x2019 }, - { 0x201C, 0x201D }, { 0x2020, 0x2022 }, { 0x2024, 0x2027 }, - { 0x2030, 0x2030 }, { 0x2032, 0x2033 }, { 0x2035, 0x2035 }, - { 0x203B, 0x203B }, { 0x203E, 0x203E }, { 0x2074, 0x2074 }, - { 0x207F, 0x207F }, { 0x2081, 0x2084 }, { 0x20AC, 0x20AC }, - { 0x2103, 0x2103 }, { 0x2105, 0x2105 }, { 0x2109, 0x2109 }, - { 0x2113, 0x2113 }, { 0x2116, 0x2116 }, { 0x2121, 0x2122 }, - { 0x2126, 0x2126 }, { 0x212B, 0x212B }, { 0x2153, 0x2154 }, - { 0x215B, 0x215E }, { 0x2160, 0x216B }, { 0x2170, 0x2179 }, - { 0x2190, 0x2199 }, { 0x21B8, 0x21B9 }, { 0x21D2, 0x21D2 }, - { 0x21D4, 0x21D4 }, { 0x21E7, 0x21E7 }, { 0x2200, 0x2200 }, - { 0x2202, 0x2203 }, { 0x2207, 0x2208 }, { 0x220B, 0x220B }, - { 0x220F, 0x220F }, { 0x2211, 0x2211 }, { 0x2215, 0x2215 }, - { 0x221A, 0x221A }, { 0x221D, 0x2220 }, { 0x2223, 0x2223 }, - { 0x2225, 0x2225 }, { 0x2227, 0x222C }, { 0x222E, 0x222E }, - { 0x2234, 0x2237 }, { 0x223C, 0x223D }, { 0x2248, 0x2248 }, - { 0x224C, 0x224C }, { 0x2252, 0x2252 }, { 0x2260, 0x2261 }, - { 0x2264, 0x2267 }, { 0x226A, 0x226B }, { 0x226E, 0x226F }, - { 0x2282, 0x2283 }, { 0x2286, 0x2287 }, { 0x2295, 0x2295 }, - { 0x2299, 0x2299 }, { 0x22A5, 0x22A5 }, { 0x22BF, 0x22BF }, - { 0x2312, 0x2312 }, { 0x2460, 0x24E9 }, { 0x24EB, 0x254B }, - { 0x2550, 0x2573 }, { 0x2580, 0x258F }, { 0x2592, 0x2595 }, - { 0x25A0, 0x25A1 }, { 0x25A3, 0x25A9 }, { 0x25B2, 0x25B3 }, - { 0x25B6, 0x25B7 }, { 0x25BC, 0x25BD }, { 0x25C0, 0x25C1 }, - { 0x25C6, 0x25C8 }, { 0x25CB, 0x25CB }, { 0x25CE, 0x25D1 }, - { 0x25E2, 0x25E5 }, { 0x25EF, 0x25EF }, { 0x2605, 0x2606 }, - { 0x2609, 0x2609 }, { 0x260E, 0x260F }, { 0x2614, 0x2615 }, - { 0x261C, 0x261C }, { 0x261E, 0x261E }, { 0x2640, 0x2640 }, - { 0x2642, 0x2642 }, { 0x2660, 0x2661 }, { 0x2663, 0x2665 }, - { 0x2667, 0x266A }, { 0x266C, 0x266D }, { 0x266F, 0x266F }, - { 0x273D, 0x273D }, { 0x2776, 0x277F }, { 0xE000, 0xF8FF }, - { 0xFFFD, 0xFFFD }, { 0xF0000, 0xFFFFD }, { 0x100000, 0x10FFFD } - }; - - /* binary search in table of non-spacing characters */ - if (bisearch(ucs, ambiguous, - sizeof(ambiguous) / sizeof(struct interval) - 1)) - return 2; - - return mk_wcwidth(ucs); -} - - -int mk_wcswidth_cjk(const wchar_t *pwcs, size_t n) -{ - int w, width = 0; - - for (;*pwcs && n-- > 0; pwcs++) - if ((w = mk_wcwidth_cjk(*pwcs)) < 0) - return -1; - else - width += w; - - return width; -} - -} diff --git a/src/nix-build/local.mk b/src/nix-build/local.mk deleted file mode 100644 index a2d1c91dfd9..00000000000 --- a/src/nix-build/local.mk +++ /dev/null @@ -1,9 +0,0 @@ -programs += nix-build - -nix-build_DIR := $(d) - -nix-build_SOURCES := $(d)/nix-build.cc - -nix-build_LIBS = libmain libexpr libstore libutil libformat - -$(eval $(call install-symlink, nix-build, $(bindir)/nix-shell)) diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index a63b3e07ae7..618895d387d 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -16,6 +16,7 @@ #include "get-drvs.hh" #include "common-eval-args.hh" #include "attr-path.hh" +#include "legacy.hh" using namespace nix; using namespace std::string_literals; @@ -66,11 +67,8 @@ std::vector shellwords(const string & s) return res; } -void mainWrapped(int argc, char * * argv) +static void _main(int argc, char * * argv) { - initNix(); - initGC(); - auto dryRun = false; auto runEnv = std::regex_search(argv[0], std::regex("nix-shell$")); auto pure = false; @@ -85,7 +83,6 @@ void mainWrapped(int argc, char * * argv) BuildMode buildMode = bmNormal; bool readStdin = false; - auto shell = getEnv("SHELL", "/bin/sh"); std::string envCommand; // interactive shell Strings envExclude; @@ -99,6 +96,9 @@ void mainWrapped(int argc, char * * argv) std::string outLink = "./result"; + // List of environment variables kept for --pure + std::set keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL"}; + Strings args; for (int i = 1; i < argc; ++i) args.push_back(argv[i]); @@ -218,6 +218,9 @@ void mainWrapped(int argc, char * * argv) } } + else if (*arg == "--keep") + keepVars.insert(getArg(*arg, arg, end)); + else if (*arg == "-") readStdin = true; @@ -239,10 +242,10 @@ void mainWrapped(int argc, char * * argv) auto store = openStore(); - EvalState state(myArgs.searchPath, store); - state.repair = repair; + auto state = std::make_unique(myArgs.searchPath, store); + state->repair = repair; - Bindings & autoArgs = *myArgs.getAutoArgs(state); + Bindings & autoArgs = *myArgs.getAutoArgs(*state); if (packages) { std::ostringstream joined; @@ -268,7 +271,7 @@ void mainWrapped(int argc, char * * argv) std::vector exprs; if (readStdin) - exprs = {state.parseStdin()}; + exprs = {state->parseStdin()}; else for (auto i : left) { auto absolute = i; @@ -276,13 +279,13 @@ void mainWrapped(int argc, char * * argv) absolute = canonPath(absPath(i), true); } catch (Error e) {}; if (fromArgs) - exprs.push_back(state.parseExprFromString(i, absPath("."))); + exprs.push_back(state->parseExprFromString(i, absPath("."))); else if (store->isStorePath(absolute) && std::regex_match(absolute, std::regex(".*\\.drv(!.*)?"))) - drvs.push_back(DrvInfo(state, store, absolute)); + drvs.push_back(DrvInfo(*state, store, absolute)); else /* If we're in a #! script, interpret filenames relative to the script. */ - exprs.push_back(state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state, + exprs.push_back(state->parseExprFromFile(resolveExprPath(state->checkSourcePath(lookupFileArg(*state, inShebang && !packages ? absPath(i, absPath(dirOf(script))) : i))))); } @@ -291,15 +294,17 @@ void mainWrapped(int argc, char * * argv) for (auto e : exprs) { Value vRoot; - state.eval(e, vRoot); + state->eval(e, vRoot); for (auto & i : attrPaths) { - Value & v(*findAlongAttrPath(state, i, autoArgs, vRoot)); - state.forceValue(v); - getDerivations(state, v, "", autoArgs, drvs, false); + Value & v(*findAlongAttrPath(*state, i, autoArgs, vRoot)); + state->forceValue(v); + getDerivations(*state, v, "", autoArgs, drvs, false); } } + state->printStats(); + auto buildPaths = [&](const PathSet & paths) { /* Note: we do this even when !printMissing to efficiently fetch binary cache data. */ @@ -332,12 +337,12 @@ void mainWrapped(int argc, char * * argv) if (shell == "") { try { - auto expr = state.parseExprFromString("(import {}).bashInteractive", absPath(".")); + auto expr = state->parseExprFromString("(import {}).bashInteractive", absPath(".")); Value v; - state.eval(expr, v); + state->eval(expr, v); - auto drv = getDerivation(state, v, false); + auto drv = getDerivation(*state, v, false); if (!drv) throw Error("the 'bashInteractive' attribute in did not evaluate to a derivation"); @@ -354,7 +359,7 @@ void mainWrapped(int argc, char * * argv) // Build or fetch all dependencies of the derivation. for (const auto & input : drv.inputDrvs) if (std::all_of(envExclude.cbegin(), envExclude.cend(), [&](const string & exclude) { return !std::regex_search(input.first, std::regex(exclude)); })) - pathsToBuild.insert(input.first); + pathsToBuild.insert(makeDrvPathWithOutputs(input.first, input.second)); for (const auto & src : drv.inputSrcs) pathsToBuild.insert(src); @@ -368,7 +373,6 @@ void mainWrapped(int argc, char * * argv) auto tmp = getEnv("TMPDIR", getEnv("XDG_RUNTIME_DIR", "/tmp")); if (pure) { - std::set keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL"}; decltype(env) newEnv; for (auto & i : env) if (keepVars.count(i.first)) @@ -411,17 +415,20 @@ void mainWrapped(int argc, char * * argv) "dontAddDisableDepTrack=1; " "[ -e $stdenv/setup ] && source $stdenv/setup; " "%3%" + "PATH=\"%4%:$PATH\"; " + "SHELL=%5%; " "set +e; " R"s([ -n "$PS1" ] && PS1='\n\[\033[1;32m\][nix-shell:\w]\$\[\033[0m\] '; )s" "if [ \"$(type -t runHook)\" = function ]; then runHook shellHook; fi; " "unset NIX_ENFORCE_PURITY; " - "unset NIX_INDENT_MAKE; " "shopt -u nullglob; " - "unset TZ; %4%" - "%5%", + "unset TZ; %6%" + "%7%", (Path) tmpDir, (pure ? "" : "p=$PATH; "), (pure ? "" : "PATH=$PATH:$p; unset p; "), + dirOf(shell), + shell, (getenv("TZ") ? (string("export TZ='") + getenv("TZ") + "'; ") : ""), envCommand)); @@ -495,9 +502,5 @@ void mainWrapped(int argc, char * * argv) } } -int main(int argc, char * * argv) -{ - return handleExceptions(argv[0], [&]() { - return mainWrapped(argc, argv); - }); -} +static RegisterLegacyCommand s1("nix-build", _main); +static RegisterLegacyCommand s2("nix-shell", _main); diff --git a/src/nix-channel/local.mk b/src/nix-channel/local.mk deleted file mode 100644 index c14e8c359ca..00000000000 --- a/src/nix-channel/local.mk +++ /dev/null @@ -1,7 +0,0 @@ -programs += nix-channel - -nix-channel_DIR := $(d) - -nix-channel_LIBS = libmain libformat libstore libutil - -nix-channel_SOURCES := $(d)/nix-channel.cc diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index d1b47ede803..8b66cc7e314 100755 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -1,9 +1,11 @@ #include "shared.hh" #include "globals.hh" #include "download.hh" +#include "store-api.hh" +#include "legacy.hh" + #include #include -#include "store-api.hh" #include using namespace nix; @@ -86,7 +88,7 @@ static void update(const StringSet & channelNames) // definition from a consistent location if the redirect changes mid-download. std::string effectiveUrl; auto dl = getDownloader(); - auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl); + auto filename = dl->downloadCached(store, url, false, "", Hash(), &effectiveUrl, 0); url = chomp(std::move(effectiveUrl)); // If the URL contains a version number, append it to the name @@ -157,11 +159,9 @@ static void update(const StringSet & channelNames) replaceSymlink(profile, channelLink); } -int main(int argc, char ** argv) +static int _main(int argc, char ** argv) { - return handleExceptions(argv[0], [&]() { - initNix(); - + { // Figure out the name of the `.nix-channels' file to use auto home = getHome(); channelsList = home + "/.nix-channels"; @@ -169,7 +169,7 @@ int main(int argc, char ** argv) // Figure out the name of the channels profile. ; - auto pw = getpwuid(getuid()); + auto pw = getpwuid(geteuid()); std::string name = pw ? pw->pw_name : getEnv("USER", ""); if (name.empty()) throw Error("cannot figure out user name"); @@ -255,5 +255,9 @@ int main(int argc, char ** argv) runProgram(settings.nixBinDir + "/nix-env", false, envArgs); break; } - }); + + return 0; + } } + +static RegisterLegacyCommand s1("nix-channel", _main); diff --git a/src/nix-collect-garbage/local.mk b/src/nix-collect-garbage/local.mk deleted file mode 100644 index 02d14cf6219..00000000000 --- a/src/nix-collect-garbage/local.mk +++ /dev/null @@ -1,7 +0,0 @@ -programs += nix-collect-garbage - -nix-collect-garbage_DIR := $(d) - -nix-collect-garbage_SOURCES := $(d)/nix-collect-garbage.cc - -nix-collect-garbage_LIBS = libmain libstore libutil libformat diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index 37fe22f4813..d4060ac937f 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -2,6 +2,7 @@ #include "profiles.hh" #include "shared.hh" #include "globals.hh" +#include "legacy.hh" #include #include @@ -48,12 +49,10 @@ void removeOldGenerations(std::string dir) } } -int main(int argc, char * * argv) +static int _main(int argc, char * * argv) { - bool removeOld = false; - - return handleExceptions(argv[0], [&]() { - initNix(); + { + bool removeOld = false; GCOptions options; @@ -90,5 +89,9 @@ int main(int argc, char * * argv) PrintFreed freed(true, results); store->collectGarbage(options, results); } - }); + + return 0; + } } + +static RegisterLegacyCommand s1("nix-collect-garbage", _main); diff --git a/src/nix-copy-closure/local.mk b/src/nix-copy-closure/local.mk deleted file mode 100644 index 5018ab975b4..00000000000 --- a/src/nix-copy-closure/local.mk +++ /dev/null @@ -1,7 +0,0 @@ -programs += nix-copy-closure - -nix-copy-closure_DIR := $(d) - -nix-copy-closure_LIBS = libmain libformat libstore libutil - -nix-copy-closure_SOURCES := $(d)/nix-copy-closure.cc diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc index dfb1b8fc5dc..fdcde8b076b 100755 --- a/src/nix-copy-closure/nix-copy-closure.cc +++ b/src/nix-copy-closure/nix-copy-closure.cc @@ -1,13 +1,12 @@ #include "shared.hh" #include "store-api.hh" +#include "legacy.hh" using namespace nix; -int main(int argc, char ** argv) +static int _main(int argc, char ** argv) { - return handleExceptions(argv[0], [&]() { - initNix(); - + { auto gzip = false; auto toMode = true; auto includeOutputs = false; @@ -61,5 +60,9 @@ int main(int argc, char ** argv) from->computeFSClosure(storePaths2, closure, false, includeOutputs); copyPaths(from, to, closure, NoRepair, NoCheckSigs, useSubstitutes); - }); + + return 0; + } } + +static RegisterLegacyCommand s1("nix-copy-closure", _main); diff --git a/src/nix-daemon/local.mk b/src/nix-daemon/local.mk deleted file mode 100644 index 5a4474465b3..00000000000 --- a/src/nix-daemon/local.mk +++ /dev/null @@ -1,13 +0,0 @@ -programs += nix-daemon - -nix-daemon_DIR := $(d) - -nix-daemon_SOURCES := $(d)/nix-daemon.cc - -nix-daemon_LIBS = libmain libstore libutil libformat - -nix-daemon_LDFLAGS = -pthread - -ifeq ($(OS), SunOS) - nix-daemon_LDFLAGS += -lsocket -endif diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc index 35603af7082..8368c326614 100644 --- a/src/nix-daemon/nix-daemon.cc +++ b/src/nix-daemon/nix-daemon.cc @@ -9,6 +9,7 @@ #include "monitor-fd.hh" #include "derivations.hh" #include "finally.hh" +#include "legacy.hh" #include @@ -120,8 +121,6 @@ struct TunnelLogger : public Logger want to send out stderr to the client. */ void startWork() { - std::vector pendingMsgs; - auto state(state_.lock()); state->canSendStderr = true; @@ -197,7 +196,8 @@ struct TunnelSource : BufferedSource { Source & from; TunnelSource(Source & from) : from(from) { } - size_t readUnbuffered(unsigned char * data, size_t len) +protected: + size_t readUnbuffered(unsigned char * data, size_t len) override { to << STDERR_READ << len; to.flush(); @@ -234,7 +234,7 @@ struct RetrieveRegularNARSink : ParseSink }; -static void performOp(TunnelLogger * logger, ref store, +static void performOp(TunnelLogger * logger, ref store, bool trusted, unsigned int clientVersion, Source & from, Sink & to, unsigned int op) { @@ -363,7 +363,11 @@ static void performOp(TunnelLogger * logger, ref store, logger->startWork(); if (!savedRegular.regular) throw Error("regular file expected"); - Path path = store->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo); + + auto store2 = store.dynamic_pointer_cast(); + if (!store2) throw Error("operation is only supported by LocalStore"); + + Path path = store2->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo); logger->stopWork(); to << path; @@ -554,7 +558,8 @@ static void performOp(TunnelLogger * logger, ref store, ; else if (trusted || name == settings.buildTimeout.name - || name == settings.connectTimeout.name) + || name == "connect-timeout" + || (name == "builders" && value == "")) settings.set(name, value); else if (setSubstituters(settings.substituters)) ; @@ -691,12 +696,23 @@ static void performOp(TunnelLogger * logger, ref store, if (!trusted) info.ultimate = false; - TeeSink tee(from); - parseDump(tee, tee.source); + std::string saved; + std::unique_ptr source; + if (GET_PROTOCOL_MINOR(clientVersion) >= 21) + source = std::make_unique(from); + else { + TeeSink tee(from); + parseDump(tee, tee.source); + saved = std::move(*tee.source.data); + source = std::make_unique(saved); + } logger->startWork(); - store.cast()->addToStore(info, tee.source.data, (RepairFlag) repair, + + // FIXME: race if addToStore doesn't read source? + store->addToStore(info, *source, (RepairFlag) repair, dontCheckSigs ? NoCheckSigs : CheckSigs, nullptr); + logger->stopWork(); break; } @@ -767,7 +783,7 @@ static void processConnection(bool trusted) Store::Params params; // FIXME: get params from somewhere // Disable caching since the client already does that. params["path-info-cache-size"] = "0"; - auto store = make_ref(params); + auto store = openStore(settings.storeUri, params); tunnelLogger->stopWork(); to.flush(); @@ -1043,11 +1059,9 @@ static void daemonLoop(char * * argv) } -int main(int argc, char * * argv) +static int _main(int argc, char * * argv) { - return handleExceptions(argv[0], [&]() { - initNix(); - + { auto stdio = false; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { @@ -1107,7 +1121,7 @@ int main(int argc, char * * argv) if (res == -1) throw SysError("splicing data from stdin to daemon socket"); else if (res == 0) - return; + return 0; } } } else { @@ -1116,5 +1130,9 @@ int main(int argc, char * * argv) } else { daemonLoop(argv); } - }); + + return 0; + } } + +static RegisterLegacyCommand s1("nix-daemon", _main); diff --git a/src/nix-env/local.mk b/src/nix-env/local.mk deleted file mode 100644 index e80719cd76f..00000000000 --- a/src/nix-env/local.mk +++ /dev/null @@ -1,7 +0,0 @@ -programs += nix-env - -nix-env_DIR := $(d) - -nix-env_SOURCES := $(wildcard $(d)/*.cc) - -nix-env_LIBS = libexpr libmain libstore libutil libformat diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 97e66cbd937..56ed75daee4 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -13,6 +13,7 @@ #include "json.hh" #include "value-to-json.hh" #include "xml-writer.hh" +#include "legacy.hh" #include #include @@ -150,10 +151,8 @@ static void loadSourceExpr(EvalState & state, const Path & path, Value & v) if (stat(path.c_str(), &st) == -1) throw SysError(format("getting information about '%1%'") % path); - if (isNixExpr(path, st)) { + if (isNixExpr(path, st)) state.evalFile(path, v); - return; - } /* The path is a directory. Put the Nix expressions in the directory in a set, with the file name of each expression as @@ -161,13 +160,15 @@ static void loadSourceExpr(EvalState & state, const Path & path, Value & v) set flat, not nested, to make it easier for a user to have a ~/.nix-defexpr directory that includes some system-wide directory). */ - if (S_ISDIR(st.st_mode)) { + else if (S_ISDIR(st.st_mode)) { state.mkAttrs(v, 1024); state.mkList(*state.allocAttr(v, state.symbols.create("_combineChannels")), 0); StringSet attrs; getAllExprs(state, path, attrs, v); v.attrs->sort(); } + + else throw Error("path '%s' is not a directory or a Nix expression", path); } @@ -198,13 +199,13 @@ static Path getDefNixExprPath() } -static int getPriority(EvalState & state, DrvInfo & drv) +static long getPriority(EvalState & state, DrvInfo & drv) { return drv.queryMetaInt("priority", 0); } -static int comparePriorities(EvalState & state, DrvInfo & drv1, DrvInfo & drv2) +static long comparePriorities(EvalState & state, DrvInfo & drv1, DrvInfo & drv2) { return getPriority(state, drv2) - getPriority(state, drv1); } @@ -270,7 +271,7 @@ static DrvInfos filterBySelector(EvalState & state, const DrvInfos & allElems, for (auto & j : matches) { DrvName drvName(j.first.queryName()); - int d = 1; + long d = 1; Newest::iterator k = newest.find(drvName.name); @@ -578,7 +579,7 @@ static void upgradeDerivations(Globals & globals, (upgradeType == utEq && d == 0) || upgradeType == utAlways) { - int d2 = -1; + long d2 = -1; if (bestElem != availElems.end()) { d2 = comparePriorities(*globals.state, *bestElem, *j); if (d2 == 0) d2 = compareVersions(bestVersion, newName.version); @@ -784,22 +785,22 @@ typedef list Table; void printTable(Table & table) { - unsigned int nrColumns = table.size() > 0 ? table.front().size() : 0; + auto nrColumns = table.size() > 0 ? table.front().size() : 0; - vector widths; + vector widths; widths.resize(nrColumns); for (auto & i : table) { assert(i.size() == nrColumns); Strings::iterator j; - unsigned int column; + size_t column; for (j = i.begin(), column = 0; j != i.end(); ++j, ++column) if (j->size() > widths[column]) widths[column] = j->size(); } for (auto & i : table) { Strings::iterator j; - unsigned int column; + size_t column; for (j = i.begin(), column = 0; j != i.end(); ++j, ++column) { string s = *j; replace(s.begin(), s.end(), '\n', ' '); @@ -1284,6 +1285,14 @@ static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opAr deleteOldGenerations(globals.profile, globals.dryRun); } else if (opArgs.size() == 1 && opArgs.front().find('d') != string::npos) { deleteGenerationsOlderThan(globals.profile, opArgs.front(), globals.dryRun); + } else if (opArgs.size() == 1 && opArgs.front().find('+') != string::npos) { + if(opArgs.front().size() < 2) + throw Error(format("invalid number of generations ‘%1%’") % opArgs.front()); + string str_max = string(opArgs.front(), 1, opArgs.front().size()); + int max; + if (!string2Int(str_max, max) || max == 0) + throw Error(format("invalid number of generations to keep ‘%1%’") % opArgs.front()); + deleteGenerationsGreaterThan(globals.profile, max, globals.dryRun); } else { std::set gens; for (auto & i : opArgs) { @@ -1303,12 +1312,9 @@ static void opVersion(Globals & globals, Strings opFlags, Strings opArgs) } -int main(int argc, char * * argv) +static int _main(int argc, char * * argv) { - return handleExceptions(argv[0], [&]() { - initNix(); - initGC(); - + { Strings opFlags, opArgs; Operation op = 0; RepairFlag repair = NoRepair; @@ -1420,5 +1426,9 @@ int main(int argc, char * * argv) op(globals, opFlags, opArgs); globals.state->printStats(); - }); + + return 0; + } } + +static RegisterLegacyCommand s1("nix-env", _main); diff --git a/src/nix-instantiate/local.mk b/src/nix-instantiate/local.mk deleted file mode 100644 index 7d1bc5ec9df..00000000000 --- a/src/nix-instantiate/local.mk +++ /dev/null @@ -1,7 +0,0 @@ -programs += nix-instantiate - -nix-instantiate_DIR := $(d) - -nix-instantiate_SOURCES := $(d)/nix-instantiate.cc - -nix-instantiate_LIBS = libexpr libmain libstore libutil libformat diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index 5049460c754..a736caa8f05 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -9,6 +9,7 @@ #include "util.hh" #include "store-api.hh" #include "common-eval-args.hh" +#include "legacy.hh" #include #include @@ -83,12 +84,9 @@ void processExpr(EvalState & state, const Strings & attrPaths, } -int main(int argc, char * * argv) +static int _main(int argc, char * * argv) { - return handleExceptions(argv[0], [&]() { - initNix(); - initGC(); - + { Strings files; bool readStdin = false; bool fromArgs = false; @@ -158,37 +156,41 @@ int main(int argc, char * * argv) auto store = openStore(); - EvalState state(myArgs.searchPath, store); - state.repair = repair; + auto state = std::make_unique(myArgs.searchPath, store); + state->repair = repair; - Bindings & autoArgs = *myArgs.getAutoArgs(state); + Bindings & autoArgs = *myArgs.getAutoArgs(*state); if (attrPaths.empty()) attrPaths = {""}; if (findFile) { for (auto & i : files) { - Path p = state.findFile(i); + Path p = state->findFile(i); if (p == "") throw Error(format("unable to find '%1%'") % i); std::cout << p << std::endl; } - return; + return 0; } if (readStdin) { - Expr * e = state.parseStdin(); - processExpr(state, attrPaths, parseOnly, strict, autoArgs, + Expr * e = state->parseStdin(); + processExpr(*state, attrPaths, parseOnly, strict, autoArgs, evalOnly, outputKind, xmlOutputSourceLocation, e); } else if (files.empty() && !fromArgs) files.push_back("./default.nix"); for (auto & i : files) { Expr * e = fromArgs - ? state.parseExprFromString(i, absPath(".")) - : state.parseExprFromFile(resolveExprPath(state.checkSourcePath(lookupFileArg(state, i)))); - processExpr(state, attrPaths, parseOnly, strict, autoArgs, + ? state->parseExprFromString(i, absPath(".")) + : state->parseExprFromFile(resolveExprPath(state->checkSourcePath(lookupFileArg(*state, i)))); + processExpr(*state, attrPaths, parseOnly, strict, autoArgs, evalOnly, outputKind, xmlOutputSourceLocation, e); } - state.printStats(); - }); + state->printStats(); + + return 0; + } } + +static RegisterLegacyCommand s1("nix-instantiate", _main); diff --git a/src/nix-prefetch-url/local.mk b/src/nix-prefetch-url/local.mk deleted file mode 100644 index 3e7735406af..00000000000 --- a/src/nix-prefetch-url/local.mk +++ /dev/null @@ -1,7 +0,0 @@ -programs += nix-prefetch-url - -nix-prefetch-url_DIR := $(d) - -nix-prefetch-url_SOURCES := $(d)/nix-prefetch-url.cc - -nix-prefetch-url_LIBS = libmain libexpr libstore libutil libformat diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc index fa7ee254500..f54706a8a01 100644 --- a/src/nix-prefetch-url/nix-prefetch-url.cc +++ b/src/nix-prefetch-url/nix-prefetch-url.cc @@ -6,9 +6,16 @@ #include "eval-inline.hh" #include "common-eval-args.hh" #include "attr-path.hh" +#include "legacy.hh" +#include "finally.hh" +#include "progress-bar.hh" #include +#include +#include +#include + using namespace nix; @@ -40,12 +47,9 @@ string resolveMirrorUri(EvalState & state, string uri) } -int main(int argc, char * * argv) +static int _main(int argc, char * * argv) { - return handleExceptions(argv[0], [&]() { - initNix(); - initGC(); - + { HashType ht = htSHA256; std::vector args; bool printPath = getEnv("PRINT_PATH") != ""; @@ -94,10 +98,15 @@ int main(int argc, char * * argv) if (args.size() > 2) throw UsageError("too many arguments"); + Finally f([]() { stopProgressBar(); }); + + if (isatty(STDERR_FILENO)) + startProgressBar(); + auto store = openStore(); - EvalState state(myArgs.searchPath, store); + auto state = std::make_unique(myArgs.searchPath, store); - Bindings & autoArgs = *myArgs.getAutoArgs(state); + Bindings & autoArgs = *myArgs.getAutoArgs(*state); /* If -A is given, get the URI from the specified Nix expression. */ @@ -107,33 +116,33 @@ int main(int argc, char * * argv) throw UsageError("you must specify a URI"); uri = args[0]; } else { - Path path = resolveExprPath(lookupFileArg(state, args.empty() ? "." : args[0])); + Path path = resolveExprPath(lookupFileArg(*state, args.empty() ? "." : args[0])); Value vRoot; - state.evalFile(path, vRoot); - Value & v(*findAlongAttrPath(state, attrPath, autoArgs, vRoot)); - state.forceAttrs(v); + state->evalFile(path, vRoot); + Value & v(*findAlongAttrPath(*state, attrPath, autoArgs, vRoot)); + state->forceAttrs(v); /* Extract the URI. */ - auto attr = v.attrs->find(state.symbols.create("urls")); + auto attr = v.attrs->find(state->symbols.create("urls")); if (attr == v.attrs->end()) throw Error("attribute set does not contain a 'urls' attribute"); - state.forceList(*attr->value); + state->forceList(*attr->value); if (attr->value->listSize() < 1) throw Error("'urls' list is empty"); - uri = state.forceString(*attr->value->listElems()[0]); + uri = state->forceString(*attr->value->listElems()[0]); /* Extract the hash mode. */ - attr = v.attrs->find(state.symbols.create("outputHashMode")); + attr = v.attrs->find(state->symbols.create("outputHashMode")); if (attr == v.attrs->end()) printInfo("warning: this does not look like a fetchurl call"); else - unpack = state.forceString(*attr->value) == "recursive"; + unpack = state->forceString(*attr->value) == "recursive"; /* Extract the name. */ if (name.empty()) { - attr = v.attrs->find(state.symbols.create("name")); + attr = v.attrs->find(state->symbols.create("name")); if (attr != v.attrs->end()) - name = state.forceString(*attr->value); + name = state->forceString(*attr->value); } } @@ -158,16 +167,22 @@ int main(int argc, char * * argv) if (storePath.empty()) { - auto actualUri = resolveMirrorUri(state, uri); - - /* Download the file. */ - DownloadRequest req(actualUri); - req.decompress = false; - auto result = getDownloader()->download(req); + auto actualUri = resolveMirrorUri(*state, uri); AutoDelete tmpDir(createTempDir(), true); Path tmpFile = (Path) tmpDir + "/tmp"; - writeFile(tmpFile, *result.data); + + /* Download the file. */ + { + AutoCloseFD fd = open(tmpFile.c_str(), O_WRONLY | O_CREAT | O_EXCL, 0600); + if (!fd) throw SysError("creating temporary file '%s'", tmpFile); + + FdSink sink(fd.get()); + + DownloadRequest req(actualUri); + req.decompress = false; + getDownloader()->download(std::move(req), sink); + } /* Optionally unpack the file. */ if (unpack) { @@ -191,7 +206,7 @@ int main(int argc, char * * argv) /* FIXME: inefficient; addToStore() will also hash this. */ - hash = unpack ? hashPath(ht, tmpFile).first : hashString(ht, *result.data); + hash = unpack ? hashPath(ht, tmpFile).first : hashFile(ht, tmpFile); if (expectedHash != Hash(ht) && expectedHash != hash) throw Error(format("hash mismatch for '%1%'") % uri); @@ -205,11 +220,17 @@ int main(int argc, char * * argv) assert(storePath == store->makeFixedOutputPath(unpack, hash, name)); } + stopProgressBar(); + if (!printPath) printInfo(format("path is '%1%'") % storePath); std::cout << printHash16or32(hash) << std::endl; if (printPath) std::cout << storePath << std::endl; - }); + + return 0; + } } + +static RegisterLegacyCommand s1("nix-prefetch-url", _main); diff --git a/src/nix-store/dotgraph.cc b/src/nix-store/dotgraph.cc index 51dedcf0a09..abdfa5e58f9 100644 --- a/src/nix-store/dotgraph.cc +++ b/src/nix-store/dotgraph.cc @@ -47,8 +47,7 @@ static string makeNode(const string & id, const string & label, static string symbolicName(const string & path) { string p = baseNameOf(path); - int dash = p.find('-'); - return string(p, dash + 1); + return string(p, p.find('-') + 1); } diff --git a/src/nix-store/graphml.cc b/src/nix-store/graphml.cc new file mode 100644 index 00000000000..670fbe227a4 --- /dev/null +++ b/src/nix-store/graphml.cc @@ -0,0 +1,90 @@ +#include "graphml.hh" +#include "util.hh" +#include "store-api.hh" +#include "derivations.hh" + +#include + + +using std::cout; + +namespace nix { + + +static inline const string & xmlQuote(const string & s) +{ + // Luckily, store paths shouldn't contain any character that needs to be + // quoted. + return s; +} + + +static string symbolicName(const string & path) +{ + string p = baseNameOf(path); + return string(p, p.find('-') + 1); +} + + +static string makeEdge(const string & src, const string & dst) +{ + return fmt(" \n", + xmlQuote(src), xmlQuote(dst)); +} + + +static string makeNode(const ValidPathInfo & info) +{ + return fmt( + " \n" + " %2%\n" + " %3%\n" + " %4%\n" + " \n", + info.path, + info.narSize, + symbolicName(info.path), + (isDerivation(info.path) ? "derivation" : "output-path")); +} + + +void printGraphML(ref store, const PathSet & roots) +{ + PathSet workList(roots); + PathSet doneSet; + std::pair ret; + + cout << "\n" + << "\n" + << "" + << "" + << "" + << "\n"; + + while (!workList.empty()) { + Path path = *(workList.begin()); + workList.erase(path); + + ret = doneSet.insert(path); + if (ret.second == false) continue; + + ValidPathInfo info = *(store->queryPathInfo(path)); + cout << makeNode(info); + + for (auto & p : store->queryPathInfo(path)->references) { + if (p != path) { + workList.insert(p); + cout << makeEdge(path, p); + } + } + + } + + cout << "\n"; + cout << "\n"; +} + + +} diff --git a/src/nix-store/xmlgraph.hh b/src/nix-store/graphml.hh similarity index 53% rename from src/nix-store/xmlgraph.hh rename to src/nix-store/graphml.hh index a6e7d4e2805..b78df1e49a6 100644 --- a/src/nix-store/xmlgraph.hh +++ b/src/nix-store/graphml.hh @@ -6,6 +6,6 @@ namespace nix { class Store; -void printXmlGraph(ref store, const PathSet & roots); +void printGraphML(ref store, const PathSet & roots); } diff --git a/src/nix-store/local.mk b/src/nix-store/local.mk deleted file mode 100644 index ade0b233adf..00000000000 --- a/src/nix-store/local.mk +++ /dev/null @@ -1,9 +0,0 @@ -programs += nix-store - -nix-store_DIR := $(d) - -nix-store_SOURCES := $(wildcard $(d)/*.cc) - -nix-store_LIBS = libmain libstore libutil libformat - -nix-store_LDFLAGS = -lbz2 -pthread $(SODIUM_LIBS) diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index e1e27ceef94..33138baff38 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -8,7 +8,8 @@ #include "shared.hh" #include "util.hh" #include "worker-protocol.hh" -#include "xmlgraph.hh" +#include "graphml.hh" +#include "legacy.hh" #include #include @@ -273,7 +274,7 @@ static void opQuery(Strings opFlags, Strings opArgs) enum QueryType { qDefault, qOutputs, qRequisites, qReferences, qReferrers , qReferrersClosure, qDeriver, qBinding, qHash, qSize - , qTree, qGraph, qXml, qResolve, qRoots }; + , qTree, qGraph, qGraphML, qResolve, qRoots }; QueryType query = qDefault; bool useOutput = false; bool includeOutputs = false; @@ -299,7 +300,7 @@ static void opQuery(Strings opFlags, Strings opArgs) else if (i == "--size") query = qSize; else if (i == "--tree") query = qTree; else if (i == "--graph") query = qGraph; - else if (i == "--xml") query = qXml; + else if (i == "--graphml") query = qGraphML; else if (i == "--resolve") query = qResolve; else if (i == "--roots") query = qRoots; else if (i == "--use-output" || i == "-u") useOutput = true; @@ -403,13 +404,13 @@ static void opQuery(Strings opFlags, Strings opArgs) break; } - case qXml: { + case qGraphML: { PathSet roots; for (auto & i : opArgs) { PathSet paths = maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise); roots.insert(paths.begin(), paths.end()); } - printXmlGraph(ref(store), roots); + printGraphML(ref(store), roots); break; } @@ -484,11 +485,16 @@ static void opReadLog(Strings opFlags, Strings opArgs) static void opDumpDB(Strings opFlags, Strings opArgs) { if (!opFlags.empty()) throw UsageError("unknown flag"); - if (!opArgs.empty()) - throw UsageError("no arguments expected"); - PathSet validPaths = store->queryAllValidPaths(); - for (auto & i : validPaths) - cout << store->makeValidityRegistration({i}, true, true); + if (!opArgs.empty()) { + for (auto & i : opArgs) + i = store->followLinksToStorePath(i); + for (auto & i : opArgs) + cout << store->makeValidityRegistration({i}, true, true); + } else { + PathSet validPaths = store->queryAllValidPaths(); + for (auto & i : validPaths) + cout << store->makeValidityRegistration({i}, true, true); + } } @@ -860,7 +866,7 @@ static void opServe(Strings opFlags, Strings opArgs) } case cmdDumpStorePath: - dumpPath(readStorePath(*store, in), out); + store->narFromPath(readStorePath(*store, in), out); break; case cmdImportPaths: { @@ -924,6 +930,28 @@ static void opServe(Strings opFlags, Strings opArgs) break; } + case cmdAddToStoreNar: { + if (!writeAllowed) throw Error("importing paths is not allowed"); + + ValidPathInfo info; + info.path = readStorePath(*store, in); + in >> info.deriver; + if (!info.deriver.empty()) + store->assertStorePath(info.deriver); + info.narHash = Hash(readString(in), htSHA256); + info.references = readStorePaths(*store, in); + in >> info.registrationTime >> info.narSize >> info.ultimate; + info.sigs = readStrings(in); + in >> info.ca; + + // FIXME: race if addToStore doesn't read source? + store->addToStore(info, in, NoRepair, NoCheckSigs); + + out << 1; // indicate success + + break; + } + default: throw Error(format("unknown serve command %1%") % cmd); } @@ -971,11 +999,9 @@ static void opVersion(Strings opFlags, Strings opArgs) /* Scan the arguments; find the operation, set global flags, put all other flags in a list, and put all other arguments in another list. */ -int main(int argc, char * * argv) +static int _main(int argc, char * * argv) { - return handleExceptions(argv[0], [&]() { - initNix(); - + { Strings opFlags, opArgs; Operation op = 0; @@ -1062,5 +1088,9 @@ int main(int argc, char * * argv) store = openStore(); op(opFlags, opArgs); - }); + + return 0; + } } + +static RegisterLegacyCommand s1("nix-store", _main); diff --git a/src/nix-store/xmlgraph.cc b/src/nix-store/xmlgraph.cc deleted file mode 100644 index 0f7be7f7a02..00000000000 --- a/src/nix-store/xmlgraph.cc +++ /dev/null @@ -1,66 +0,0 @@ -#include "xmlgraph.hh" -#include "util.hh" -#include "store-api.hh" - -#include - - -using std::cout; - -namespace nix { - - -static inline const string & xmlQuote(const string & s) -{ - // Luckily, store paths shouldn't contain any character that needs to be - // quoted. - return s; -} - - -static string makeEdge(const string & src, const string & dst) -{ - format f = format(" \n") - % xmlQuote(src) % xmlQuote(dst); - return f.str(); -} - - -static string makeNode(const string & id) -{ - format f = format(" \n") % xmlQuote(id); - return f.str(); -} - - -void printXmlGraph(ref store, const PathSet & roots) -{ - PathSet workList(roots); - PathSet doneSet; - - cout << "\n" - << "\n"; - - while (!workList.empty()) { - Path path = *(workList.begin()); - workList.erase(path); - - if (doneSet.find(path) != doneSet.end()) continue; - doneSet.insert(path); - - cout << makeNode(path); - - for (auto & p : store->queryPathInfo(path)->references) { - if (p != path) { - workList.insert(p); - cout << makeEdge(p, path); - } - } - - } - - cout << "\n"; -} - - -} diff --git a/src/nix/copy.cc b/src/nix/copy.cc index e4e6c3e303e..96bd453d87b 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -69,8 +69,12 @@ struct CmdCopy : StorePathsCommand }, #ifdef ENABLE_S3 Example{ - "To populate the current folder build output to a S3 binary cache:", - "nix copy --to s3://my-bucket?region=eu-west-1" + "To copy Hello to an S3 binary cache:", + "nix copy --to s3://my-bucket?region=eu-west-1 nixpkgs.hello" + }, + Example{ + "To copy Hello to an S3-compatible binary cache:", + "nix copy --to s3://my-bucket?region=eu-west-1&endpoint=example.com nixpkgs.hello" }, #endif }; diff --git a/src/nix/doctor.cc b/src/nix/doctor.cc new file mode 100644 index 00000000000..7b544461947 --- /dev/null +++ b/src/nix/doctor.cc @@ -0,0 +1,124 @@ +#include "command.hh" +#include "serve-protocol.hh" +#include "shared.hh" +#include "store-api.hh" +#include "worker-protocol.hh" + +using namespace nix; + +std::string formatProtocol(unsigned int proto) +{ + if (proto) { + auto major = GET_PROTOCOL_MAJOR(proto) >> 8; + auto minor = GET_PROTOCOL_MINOR(proto); + return (format("%1%.%2%") % major % minor).str(); + } + return "unknown"; +} + +struct CmdDoctor : StoreCommand +{ + bool success = true; + + std::string name() override + { + return "doctor"; + } + + std::string description() override + { + return "check your system for potential problems"; + } + + void run(ref store) override + { + std::cout << "Store uri: " << store->getUri() << std::endl; + std::cout << std::endl; + + auto type = getStoreType(); + + if (type < tOther) { + success &= checkNixInPath(); + success &= checkProfileRoots(store); + } + success &= checkStoreProtocol(store->getProtocol()); + + if (!success) + throw Exit(2); + } + + bool checkNixInPath() + { + PathSet dirs; + + for (auto & dir : tokenizeString(getEnv("PATH"), ":")) + if (pathExists(dir + "/nix-env")) + dirs.insert(dirOf(canonPath(dir + "/nix-env", true))); + + if (dirs.size() != 1) { + std::cout << "Warning: multiple versions of nix found in PATH." << std::endl; + std::cout << std::endl; + for (auto & dir : dirs) + std::cout << " " << dir << std::endl; + std::cout << std::endl; + return false; + } + + return true; + } + + bool checkProfileRoots(ref store) + { + PathSet dirs; + + for (auto & dir : tokenizeString(getEnv("PATH"), ":")) { + Path profileDir = dirOf(dir); + try { + Path userEnv = canonPath(profileDir, true); + + if (store->isStorePath(userEnv) && hasSuffix(userEnv, "user-environment")) { + while (profileDir.find("/profiles/") == std::string::npos && isLink(profileDir)) + profileDir = absPath(readLink(profileDir), dirOf(profileDir)); + + if (profileDir.find("/profiles/") == std::string::npos) + dirs.insert(dir); + } + } catch (SysError &) {} + } + + if (!dirs.empty()) { + std::cout << "Warning: found profiles outside of " << settings.nixStateDir << "/profiles." << std::endl; + std::cout << "The generation this profile points to might not have a gcroot and could be" << std::endl; + std::cout << "garbage collected, resulting in broken symlinks." << std::endl; + std::cout << std::endl; + for (auto & dir : dirs) + std::cout << " " << dir << std::endl; + std::cout << std::endl; + return false; + } + + return true; + } + + bool checkStoreProtocol(unsigned int storeProto) + { + unsigned int clientProto = GET_PROTOCOL_MAJOR(SERVE_PROTOCOL_VERSION) == GET_PROTOCOL_MAJOR(storeProto) + ? SERVE_PROTOCOL_VERSION + : PROTOCOL_VERSION; + + if (clientProto != storeProto) { + std::cout << "Warning: protocol version of this client does not match the store." << std::endl; + std::cout << "While this is not necessarily a problem it's recommended to keep the client in" << std::endl; + std::cout << "sync with the daemon." << std::endl; + std::cout << std::endl; + std::cout << "Client protocol: " << formatProtocol(clientProto) << std::endl; + std::cout << "Store protocol: " << formatProtocol(storeProto) << std::endl; + std::cout << std::endl; + return false; + } + + return true; + } +}; + +static RegisterCommand r1(make_ref()); diff --git a/src/nix/hash.cc b/src/nix/hash.cc index 64062fb9795..af4105e2890 100644 --- a/src/nix/hash.cc +++ b/src/nix/hash.cc @@ -9,13 +9,14 @@ struct CmdHash : Command { enum Mode { mFile, mPath }; Mode mode; - Base base = Base16; + Base base = SRI; bool truncate = false; - HashType ht = htSHA512; + HashType ht = htSHA256; std::vector paths; CmdHash(Mode mode) : mode(mode) { + mkFlag(0, "sri", "print hash in SRI format", &base, SRI); mkFlag(0, "base64", "print hash in base-64", &base, Base64); mkFlag(0, "base32", "print hash in base-32 (Nix-specific)", &base, Base32); mkFlag(0, "base16", "print hash in base-16", &base, Base16); @@ -43,7 +44,7 @@ struct CmdHash : Command Hash h = mode == mFile ? hashFile(ht, path) : hashPath(ht, path).first; if (truncate && h.hashSize > 20) h = compressHash(h, 20); std::cout << format("%1%\n") % - h.to_string(base, false); + h.to_string(base, base == SRI); } } }; @@ -54,7 +55,7 @@ static RegisterCommand r2(make_ref(CmdHash::mPath)); struct CmdToBase : Command { Base base; - HashType ht = htSHA512; + HashType ht = htUnknown; std::vector args; CmdToBase(Base base) : base(base) @@ -70,26 +71,30 @@ struct CmdToBase : Command return base == Base16 ? "to-base16" : base == Base32 ? "to-base32" : - "to-base64"; + base == Base64 ? "to-base64" : + "to-sri"; } std::string description() override { - return fmt("convert a hash to base-%d representation", - base == Base16 ? 16 : - base == Base32 ? 32 : 64); + return fmt("convert a hash to %s representation", + base == Base16 ? "base-16" : + base == Base32 ? "base-32" : + base == Base64 ? "base-64" : + "SRI"); } void run() override { for (auto s : args) - std::cout << fmt("%s\n", Hash(s, ht).to_string(base, false)); + std::cout << fmt("%s\n", Hash(s, ht).to_string(base, base == SRI)); } }; static RegisterCommand r3(make_ref(Base16)); static RegisterCommand r4(make_ref(Base32)); static RegisterCommand r5(make_ref(Base64)); +static RegisterCommand r6(make_ref(SRI)); /* Legacy nix-hash command. */ static int compatNixHash(int argc, char * * argv) diff --git a/src/nix/installables.cc b/src/nix/installables.cc index a3fdd8a2808..0c1ad3ab3db 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -86,22 +86,6 @@ Buildable Installable::toBuildable() return std::move(buildables[0]); } -struct InstallableStoreDrv : Installable -{ - Path drvPath; - - InstallableStoreDrv(const Path & drvPath) : drvPath(drvPath) { } - - std::string what() override { return drvPath; } - - Buildables toBuildables() override - { - Buildable b = {drvPath}; - // FIXME: add outputs? - return {b}; - } -}; - struct InstallableStorePath : Installable { Path storePath; @@ -112,7 +96,7 @@ struct InstallableStorePath : Installable Buildables toBuildables() override { - return {{"", {{"out", storePath}}}}; + return {{isDerivation(storePath) ? storePath : "", {{"out", storePath}}}}; } }; @@ -226,12 +210,8 @@ static std::vector> parseInstallables( auto path = store->toStorePath(store->followLinksToStore(s)); - if (store->isStorePath(path)) { - if (isDerivation(path)) - result.push_back(std::make_shared(path)); - else - result.push_back(std::make_shared(path)); - } + if (store->isStorePath(path)) + result.push_back(std::make_shared(path)); } else if (s == "" || std::regex_match(s, attrPathRegex)) diff --git a/src/nix/local.mk b/src/nix/local.mk index f76da194467..ca4604d566c 100644 --- a/src/nix/local.mk +++ b/src/nix/local.mk @@ -2,10 +2,24 @@ programs += nix nix_DIR := $(d) -nix_SOURCES := $(wildcard $(d)/*.cc) $(wildcard src/linenoise/*.cpp) +nix_SOURCES := \ + $(wildcard $(d)/*.cc) \ + $(wildcard src/build-remote/*.cc) \ + $(wildcard src/nix-build/*.cc) \ + $(wildcard src/nix-channel/*.cc) \ + $(wildcard src/nix-collect-garbage/*.cc) \ + $(wildcard src/nix-copy-closure/*.cc) \ + $(wildcard src/nix-daemon/*.cc) \ + $(wildcard src/nix-env/*.cc) \ + $(wildcard src/nix-instantiate/*.cc) \ + $(wildcard src/nix-prefetch-url/*.cc) \ + $(wildcard src/nix-store/*.cc) \ -nix_LIBS = libexpr libmain libstore libutil libformat +nix_LIBS = libexpr libmain libstore libutil -nix_LDFLAGS = -pthread +nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) -$(eval $(call install-symlink, nix, $(bindir)/nix-hash)) +$(foreach name, \ + nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \ + $(eval $(call install-symlink, nix, $(bindir)/$(name)))) +$(eval $(call install-symlink, $(bindir)/nix, $(libexecdir)/nix/build-remote)) diff --git a/src/nix/ls.cc b/src/nix/ls.cc index e99622faf47..d089be42fb2 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -148,7 +148,7 @@ struct CmdLsNar : Command, MixLs void run() override { - list(makeNarAccessor(make_ref(readFile(narPath)))); + list(makeNarAccessor(make_ref(readFile(narPath, true)))); } }; diff --git a/src/nix/main.cc b/src/nix/main.cc index bb107ec7d3f..64c1dc35787 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -24,7 +24,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs { mkFlag() .longName("help") - .shortName('h') .description("show usage information") .handler([&]() { showHelpAndExit(); }); @@ -34,9 +33,10 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs .handler([&]() { std::cout << "The following configuration options are available:\n\n"; Table2 tbl; - for (const auto & s : settings._getSettings()) - if (!s.second.isAlias) - tbl.emplace_back(s.first, s.second.setting->description); + std::map settings; + globalConfig.getSettings(settings); + for (const auto & s : settings) + tbl.emplace_back(s.first, s.second.description); printTable(std::cout, tbl); throw Exit(); }); @@ -67,9 +67,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs void mainWrapped(int argc, char * * argv) { - verbosity = lvlError; - settings.verboseBuild = false; - /* The chroot helper needs to be run before any threads have been started. */ if (argc > 0 && argv[0] == chrootHelperName) { @@ -88,6 +85,9 @@ void mainWrapped(int argc, char * * argv) if (legacy) return legacy(argc, argv); } + verbosity = lvlError; + settings.verboseBuild = false; + NixArgs args; args.parseCmdline(argvToStrings(argc, argv)); diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc index 47caa401d3c..dea5f0557b8 100644 --- a/src/nix/path-info.cc +++ b/src/nix/path-info.cc @@ -4,8 +4,8 @@ #include "json.hh" #include "common-args.hh" -#include #include +#include using namespace nix; @@ -13,12 +13,14 @@ struct CmdPathInfo : StorePathsCommand, MixJSON { bool showSize = false; bool showClosureSize = false; + bool humanReadable = false; bool showSigs = false; CmdPathInfo() { mkFlag('s', "size", "print size of the NAR dump of each path", &showSize); mkFlag('S', "closure-size", "print sum size of the NAR dumps of the closure of each path", &showClosureSize); + mkFlag('h', "human-readable", "with -s and -S, print sizes like 1K 234M 5.67G etc.", &humanReadable); mkFlag(0, "sigs", "show signatures", &showSigs); } @@ -39,6 +41,10 @@ struct CmdPathInfo : StorePathsCommand, MixJSON "To show the closure sizes of every path in the current NixOS system closure, sorted by size:", "nix path-info -rS /run/current-system | sort -nk2" }, + Example{ + "To show a package's closure size and all its dependencies with human readable sizes:", + "nix path-info -rsSh nixpkgs.rust" + }, Example{ "To check the existence of a path in a binary cache:", "nix path-info -r /nix/store/7qvk5c91...-geeqie-1.1 --store https://cache.nixos.org/" @@ -58,6 +64,25 @@ struct CmdPathInfo : StorePathsCommand, MixJSON }; } + void printSize(unsigned long long value) + { + if (!humanReadable) { + std::cout << fmt("\t%11d", value); + return; + } + + static const std::array idents{{ + ' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y' + }}; + size_t power = 0; + double res = value; + while (res > 1024 && power < idents.size()) { + ++power; + res /= 1024; + } + std::cout << fmt("\t%6.1f%c", res, idents.at(power)); + } + void run(ref store, Paths storePaths) override { size_t pathLen = 0; @@ -78,13 +103,16 @@ struct CmdPathInfo : StorePathsCommand, MixJSON auto info = store->queryPathInfo(storePath); storePath = info->path; // FIXME: screws up padding - std::cout << storePath << std::string(std::max(0, (int) pathLen - (int) storePath.size()), ' '); + std::cout << storePath; + + if (showSize || showClosureSize || showSigs) + std::cout << std::string(std::max(0, (int) pathLen - (int) storePath.size()), ' '); if (showSize) - std::cout << '\t' << std::setw(11) << info->narSize; + printSize(info->narSize); if (showClosureSize) - std::cout << '\t' << std::setw(11) << store->getClosureSize(storePath).first; + printSize(store->getClosureSize(storePath).first); if (showSigs) { std::cout << '\t'; diff --git a/src/nix/repl.cc b/src/nix/repl.cc index f84774a5336..227affc60e2 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -1,8 +1,17 @@ #include #include +#include +#include #include +#ifdef READLINE +#include +#include +#else +#include +#endif + #include "shared.hh" #include "eval.hh" #include "eval-inline.hh" @@ -15,8 +24,6 @@ #include "command.hh" #include "finally.hh" -#include "src/linenoise/linenoise.h" - namespace nix { #define ESC_RED "\033[31m" @@ -31,6 +38,7 @@ struct NixRepl { string curDir; EvalState state; + Bindings * autoArgs; Strings loadedFiles; @@ -117,17 +125,71 @@ NixRepl::NixRepl(const Strings & searchPath, nix::ref store) NixRepl::~NixRepl() { - linenoiseHistorySave(historyFile.c_str()); + write_history(historyFile.c_str()); } - static NixRepl * curRepl; // ugly -static void completionCallback(const char * s, linenoiseCompletions *lc) -{ - /* Otherwise, return all symbols that start with the prefix. */ - for (auto & c : curRepl->completePrefix(s)) - linenoiseAddCompletion(lc, c.c_str()); +static char * completionCallback(char * s, int *match) { + auto possible = curRepl->completePrefix(s); + if (possible.size() == 1) { + *match = 1; + auto *res = strdup(possible.begin()->c_str() + strlen(s)); + if (!res) throw Error("allocation failure"); + return res; + } else if (possible.size() > 1) { + auto checkAllHaveSameAt = [&](size_t pos) { + auto &first = *possible.begin(); + for (auto &p : possible) { + if (p.size() <= pos || p[pos] != first[pos]) + return false; + } + return true; + }; + size_t start = strlen(s); + size_t len = 0; + while (checkAllHaveSameAt(start + len)) ++len; + if (len > 0) { + *match = 1; + auto *res = strdup(std::string(*possible.begin(), start, len).c_str()); + if (!res) throw Error("allocation failure"); + return res; + } + } + + *match = 0; + return nullptr; +} + +static int listPossibleCallback(char *s, char ***avp) { + auto possible = curRepl->completePrefix(s); + + if (possible.size() > (INT_MAX / sizeof(char*))) + throw Error("too many completions"); + + int ac = 0; + char **vp = nullptr; + + auto check = [&](auto *p) { + if (!p) { + if (vp) { + while (--ac >= 0) + free(vp[ac]); + free(vp); + } + throw Error("allocation failure"); + } + return p; + }; + + vp = check((char **)malloc(possible.size() * sizeof(char*))); + + for (auto & p : possible) + vp[ac++] = check(strdup(p.c_str())); + + *avp = vp; + + return ac; } @@ -142,12 +204,18 @@ void NixRepl::mainLoop(const std::vector & files) reloadFiles(); if (!loadedFiles.empty()) std::cout << std::endl; + // Allow nix-repl specific settings in .inputrc + rl_readline_name = "nix-repl"; createDirs(dirOf(historyFile)); - linenoiseHistorySetMaxLen(1000); - linenoiseHistoryLoad(historyFile.c_str()); - +#ifndef READLINE + el_hist_size = 1000; +#endif + read_history(historyFile.c_str()); curRepl = this; - linenoiseSetCompletionCallback(completionCallback); +#ifndef READLINE + rl_set_complete_func(completionCallback); + rl_set_list_possib_func(listPossibleCallback); +#endif std::string input; @@ -175,7 +243,6 @@ void NixRepl::mainLoop(const std::vector & files) // We handled the current input fully, so we should clear it // and read brand new input. - linenoiseHistoryAdd(input.c_str()); input.clear(); std::cout << std::endl; } @@ -184,19 +251,10 @@ void NixRepl::mainLoop(const std::vector & files) bool NixRepl::getLine(string & input, const std::string &prompt) { - char * s = linenoise(prompt.c_str()); + char * s = readline(prompt.c_str()); Finally doFree([&]() { free(s); }); - if (!s) { - switch (auto type = linenoiseKeyType()) { - case 1: // ctrl-C - input = ""; - return true; - case 2: // ctrl-D - return false; - default: - throw Error(format("Unexpected linenoise keytype: %1%") % type); - } - } + if (!s) + return false; input += s; input += '\n'; return true; @@ -385,7 +443,7 @@ bool NixRepl::processLine(string line) /* We could do the build in this process using buildPaths(), but doing it in a child makes it easier to recover from problems / SIGINT. */ - if (runProgram(settings.nixBinDir + "/nix-store", Strings{"-r", drvPath}) == 0) { + if (runProgram(settings.nixBinDir + "/nix", Strings{"build", "--no-link", drvPath}) == 0) { Derivation drv = readDerivation(drvPath); std::cout << std::endl << "this derivation produced the following outputs:" << std::endl; for (auto & i : drv.outputs) @@ -441,8 +499,7 @@ void NixRepl::loadFile(const Path & path) loadedFiles.push_back(path); Value v, v2; state.evalFile(lookupFileArg(state, path), v); - Bindings & bindings(*state.allocBindings(0)); - state.autoCallFunction(bindings, v, v2); + state.autoCallFunction(*autoArgs, v, v2); addAttrsToScope(v2); } @@ -584,30 +641,13 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m for (auto & i : *v.attrs) sorted[i.name] = i.value; - /* If this is a derivation, then don't show the - self-references ("all", "out", etc.). */ - StringSet hidden; - if (isDrv) { - hidden.insert("all"); - Bindings::iterator i = v.attrs->find(state.sOutputs); - if (i == v.attrs->end()) - hidden.insert("out"); - else { - state.forceList(*i->value); - for (unsigned int j = 0; j < i->value->listSize(); ++j) - hidden.insert(state.forceStringNoCtx(*i->value->listElems()[j])); - } - } - for (auto & i : sorted) { if (isVarName(i.first)) str << i.first; else printStringValue(str, i.first.c_str()); str << " = "; - if (hidden.find(i.first) != hidden.end()) - str << "«...»"; - else if (seen.find(i.second) != seen.end()) + if (seen.find(i.second) != seen.end()) str << "«repeated»"; else try { @@ -693,8 +733,9 @@ struct CmdRepl : StoreCommand, MixEvalArgs void run(ref store) override { - NixRepl repl(searchPath, openStore()); - repl.mainLoop(files); + auto repl = std::make_unique(searchPath, openStore()); + repl->autoArgs = getAutoArgs(repl->state); + repl->mainLoop(files); } }; diff --git a/src/nix/run.cc b/src/nix/run.cc index d04e106e037..35b76334587 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -7,11 +7,14 @@ #include "finally.hh" #include "fs-accessor.hh" #include "progress-bar.hh" +#include "affinity.hh" #if __linux__ #include #endif +#include + using namespace nix; std::string chrootHelperName = "__run_in_chroot"; @@ -121,10 +124,27 @@ struct CmdRun : InstallablesCommand unsetenv(var.c_str()); } + std::unordered_set done; + std::queue todo; + for (auto & path : outPaths) todo.push(path); + auto unixPath = tokenizeString(getEnv("PATH"), ":"); - for (auto & path : outPaths) - if (accessor->stat(path + "/bin").type != FSAccessor::tMissing) + + while (!todo.empty()) { + Path path = todo.front(); + todo.pop(); + if (!done.insert(path).second) continue; + + if (true) unixPath.push_front(path + "/bin"); + + auto propPath = path + "/nix-support/propagated-user-env-packages"; + if (accessor->stat(propPath).type == FSAccessor::tRegular) { + for (auto & p : tokenizeString(readFile(propPath))) + todo.push(p); + } + } + setenv("PATH", concatStringsSep(":", unixPath).c_str(), 1); std::string cmd = *command.begin(); @@ -135,6 +155,8 @@ struct CmdRun : InstallablesCommand restoreSignals(); + restoreAffinity(); + /* If this is a diverted store (i.e. its "logical" location (typically /nix/store) differs from its "physical" location (e.g. /home/eelco/nix/store), then run the command in a diff --git a/src/nix/search.cc b/src/nix/search.cc index 53967669808..e086de2260a 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -7,19 +7,25 @@ #include "common-args.hh" #include "json.hh" #include "json-to-value.hh" +#include "shared.hh" #include #include using namespace nix; -std::string hilite(const std::string & s, const std::smatch & m) +std::string wrap(std::string prefix, std::string s) +{ + return prefix + s + ANSI_NORMAL; +} + +std::string hilite(const std::string & s, const std::smatch & m, std::string postfix) { return m.empty() ? s : std::string(m.prefix()) - + ANSI_RED + std::string(m.str()) + ANSI_NORMAL + + ANSI_RED + std::string(m.str()) + postfix + std::string(m.suffix()); } @@ -75,6 +81,10 @@ struct CmdSearch : SourceExprCommand, MixJSON "To search for git and frontend or gui:", "nix search git 'frontend|gui'" }, + Example{ + "To display the description of the found packages:", + "nix search git --verbose" + } }; } @@ -163,15 +173,13 @@ struct CmdSearch : SourceExprCommand, MixJSON jsonElem.attr("description", description); } else { + auto name = hilite(parsed.name, nameMatch, "\e[0;2m") + + std::string(parsed.fullName, parsed.name.length()); results[attrPath] = fmt( - "Attribute name: %s\n" - "Package name: %s\n" - "Version: %s\n" - "Description: %s\n", - hilite(attrPath, attrPathMatch), - hilite(name, nameMatch), - parsed.version, - hilite(description, descriptionMatch)); + "* %s (%s)\n %s\n", + wrap("\e[0;1m", hilite(attrPath, attrPathMatch, "\e[0;1m")), + wrap("\e[0;2m", hilite(name, nameMatch, "\e[0;2m")), + hilite(description, descriptionMatch, ANSI_NORMAL)); } } @@ -263,6 +271,10 @@ struct CmdSearch : SourceExprCommand, MixJSON throw SysError("cannot rename '%s' to '%s'", tmpFile, jsonCacheFileName); } + if (results.size() == 0) + throw Error("no results for the given search term(s)!"); + + RunPager pager; for (auto el : results) std::cout << el.second << "\n"; } diff --git a/src/nix/show-config.cc b/src/nix/show-config.cc index c64b12c8dd6..86638b50d2c 100644 --- a/src/nix/show-config.cc +++ b/src/nix/show-config.cc @@ -27,10 +27,12 @@ struct CmdShowConfig : Command, MixJSON if (json) { // FIXME: use appropriate JSON types (bool, ints, etc). JSONObject jsonObj(std::cout); - settings.toJSON(jsonObj); + globalConfig.toJSON(jsonObj); } else { - for (auto & s : settings.getSettings()) - std::cout << s.first + " = " + s.second + "\n"; + std::map settings; + globalConfig.getSettings(settings); + for (auto & s : settings) + std::cout << s.first + " = " + s.second.value + "\n"; } } }; diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index 758bbbc688b..35c44a70cf5 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -1,14 +1,18 @@ #include "command.hh" +#include "common-args.hh" #include "store-api.hh" #include "download.hh" #include "eval.hh" #include "attr-path.hh" +#include "names.hh" +#include "progress-bar.hh" using namespace nix; -struct CmdUpgradeNix : StoreCommand +struct CmdUpgradeNix : MixDryRun, StoreCommand { Path profileDir; + std::string storePathsUrl = "https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix"; CmdUpgradeNix() { @@ -18,6 +22,12 @@ struct CmdUpgradeNix : StoreCommand .labels({"profile-dir"}) .description("the Nix profile to upgrade") .dest(&profileDir); + + mkFlag() + .longName("nix-store-paths-url") + .labels({"url"}) + .description("URL of the file that contains the store paths of the latest Nix release") + .dest(&storePathsUrl); } std::string name() override @@ -46,7 +56,7 @@ struct CmdUpgradeNix : StoreCommand void run(ref store) override { - settings.pureEval = true; + evalSettings.pureEval = true; if (profileDir == "") profileDir = getProfileDir(store); @@ -59,6 +69,14 @@ struct CmdUpgradeNix : StoreCommand storePath = getLatestNix(store); } + auto version = DrvName(storePathToName(storePath)).version; + + if (dryRun) { + stopProgressBar(); + printError("would upgrade to version %s", version); + return; + } + { Activity act(*logger, lvlInfo, actUnknown, fmt("downloading '%s'...", storePath)); store->ensurePath(storePath); @@ -72,11 +90,15 @@ struct CmdUpgradeNix : StoreCommand throw Error("could not verify that '%s' works", program); } + stopProgressBar(); + { Activity act(*logger, lvlInfo, actUnknown, fmt("installing '%s' into profile '%s'...", storePath, profileDir)); runProgram(settings.nixBinDir + "/nix-env", false, {"--profile", profileDir, "-i", storePath, "--no-sandbox"}); } + + printError(ANSI_GREEN "upgrade to version %s done" ANSI_NORMAL, version); } /* Return the profile in which Nix is installed. */ @@ -98,11 +120,18 @@ struct CmdUpgradeNix : StoreCommand if (hasPrefix(where, "/run/current-system")) throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'"); - Path profileDir; - Path userEnv; + Path profileDir = dirOf(where); + + // Resolve profile to /nix/var/nix/profiles/ link. + while (canonPath(profileDir).find("/profiles/") == std::string::npos && isLink(profileDir)) + profileDir = readLink(profileDir); + + printInfo("found profile '%s'", profileDir); + + Path userEnv = canonPath(profileDir, true); if (baseNameOf(where) != "bin" || - !hasSuffix(userEnv = canonPath(profileDir = dirOf(where), true), "user-environment")) + !hasSuffix(userEnv, "user-environment")) throw Error("directory '%s' does not appear to be part of a Nix profile", where); if (!store->isValidPath(userEnv)) @@ -115,16 +144,16 @@ struct CmdUpgradeNix : StoreCommand Path getLatestNix(ref store) { // FIXME: use nixos.org? - auto req = DownloadRequest("https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix"); + auto req = DownloadRequest(storePathsUrl); auto res = getDownloader()->download(req); - EvalState state(Strings(), store); - auto v = state.allocValue(); - state.eval(state.parseExprFromString(*res.data, "/no-such-path"), *v); - Bindings & bindings(*state.allocBindings(0)); - auto v2 = findAlongAttrPath(state, settings.thisSystem, bindings, *v); + auto state = std::make_unique(Strings(), store); + auto v = state->allocValue(); + state->eval(state->parseExprFromString(*res.data, "/no-such-path"), *v); + Bindings & bindings(*state->allocBindings(0)); + auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v); - return state.forceString(*v2); + return state->forceString(*v2); } }; diff --git a/src/nix/verify.cc b/src/nix/verify.cc index 6540208a8a2..7ef571561a0 100644 --- a/src/nix/verify.cc +++ b/src/nix/verify.cc @@ -120,7 +120,7 @@ struct CmdVerify : StorePathsCommand for (auto sig : sigs) { if (sigsSeen.count(sig)) continue; sigsSeen.insert(sig); - if (info->checkSignature(publicKeys, sig)) + if (validSigs < ValidPathInfo::maxSigs && info->checkSignature(publicKeys, sig)) validSigs++; } }; diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc index 17e0595ae88..325a2be0a79 100644 --- a/src/nix/why-depends.cc +++ b/src/nix/why-depends.cc @@ -2,6 +2,7 @@ #include "store-api.hh" #include "progress-bar.hh" #include "fs-accessor.hh" +#include "shared.hh" #include @@ -237,6 +238,7 @@ struct CmdWhyDepends : SourceExprCommand visitPath(node.path); + RunPager pager; for (auto & ref : refs) { auto hash = storePathToHash(ref.second->path); diff --git a/src/nlohmann/json.hpp b/src/nlohmann/json.hpp index 5b0b0ea5b30..c9af0bed36d 100644 --- a/src/nlohmann/json.hpp +++ b/src/nlohmann/json.hpp @@ -1,11 +1,12 @@ /* __ _____ _____ _____ __| | __| | | | JSON for Modern C++ -| | |__ | | | | | | version 3.0.1 +| | |__ | | | | | | version 3.5.0 |_____|_____|_____|_|___| https://github.com/nlohmann/json Licensed under the MIT License . -Copyright (c) 2013-2017 Niels Lohmann . +SPDX-License-Identifier: MIT +Copyright (c) 2013-2018 Niels Lohmann . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -29,42 +30,104 @@ SOFTWARE. #ifndef NLOHMANN_JSON_HPP #define NLOHMANN_JSON_HPP -#include // all_of, copy, fill, find, for_each, generate_n, none_of, remove, reverse, transform -#include // array +#define NLOHMANN_JSON_VERSION_MAJOR 3 +#define NLOHMANN_JSON_VERSION_MINOR 5 +#define NLOHMANN_JSON_VERSION_PATCH 0 + +#include // all_of, find, for_each #include // assert #include // and, not, or -#include // lconv, localeconv -#include // isfinite, labs, ldexp, signbit #include // nullptr_t, ptrdiff_t, size_t -#include // int64_t, uint64_t -#include // abort, strtod, strtof, strtold, strtoul, strtoll, strtoull -#include // memcpy, strlen -#include // forward_list -#include // function, hash, less +#include // hash, less #include // initializer_list -#include // hex -#include // istream, ostream -#include // advance, begin, back_inserter, bidirectional_iterator_tag, distance, end, inserter, iterator, iterator_traits, next, random_access_iterator_tag, reverse_iterator -#include // numeric_limits -#include // locale -#include // map -#include // addressof, allocator, allocator_traits, unique_ptr +#include // istream, ostream +#include // random_access_iterator_tag #include // accumulate -#include // stringstream -#include // getline, stoi, string, to_string -#include // add_pointer, conditional, decay, enable_if, false_type, integral_constant, is_arithmetic, is_base_of, is_const, is_constructible, is_convertible, is_default_constructible, is_enum, is_floating_point, is_integral, is_nothrow_move_assignable, is_nothrow_move_constructible, is_pointer, is_reference, is_same, is_scalar, is_signed, remove_const, remove_cv, remove_pointer, remove_reference, true_type, underlying_type -#include // declval, forward, make_pair, move, pair, swap -#include // valarray +#include // string, stoi, to_string +#include // declval, forward, move, pair, swap + +// #include +#ifndef NLOHMANN_JSON_FWD_HPP +#define NLOHMANN_JSON_FWD_HPP + +#include // int64_t, uint64_t +#include // map +#include // allocator +#include // string #include // vector +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +namespace nlohmann +{ +/*! +@brief default JSONSerializer template argument + +This serializer ignores the template arguments and uses ADL +([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) +for serialization. +*/ +template +struct adl_serializer; + +template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer> +class basic_json; + +/*! +@brief JSON Pointer + +A JSON pointer defines a string syntax for identifying a specific value +within a JSON document. It can be used with functions `at` and +`operator[]`. Furthermore, JSON pointers are the base for JSON patches. + +@sa [RFC 6901](https://tools.ietf.org/html/rfc6901) + +@since version 2.0.0 +*/ +template +class json_pointer; + +/*! +@brief default JSON class + +This type is the default specialization of the @ref basic_json class which +uses the standard template types. + +@since version 1.0.0 +*/ +using json = basic_json<>; +} // namespace nlohmann + +#endif + +// #include + + +// This file contains all internal macro definitions +// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them + // exclude unsupported compilers -#if defined(__clang__) - #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 - #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" - #endif -#elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) - #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40900 - #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" +#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) + #if defined(__clang__) + #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 + #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" + #endif + #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) + #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 + #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" + #endif #endif #endif @@ -90,14 +153,36 @@ SOFTWARE. #endif // allow to disable exceptions -#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && not defined(JSON_NOEXCEPTION) +#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) #define JSON_THROW(exception) throw exception #define JSON_TRY try #define JSON_CATCH(exception) catch(exception) + #define JSON_INTERNAL_CATCH(exception) catch(exception) #else #define JSON_THROW(exception) std::abort() #define JSON_TRY if(true) #define JSON_CATCH(exception) if(false) + #define JSON_INTERNAL_CATCH(exception) if(false) +#endif + +// override exception macros +#if defined(JSON_THROW_USER) + #undef JSON_THROW + #define JSON_THROW JSON_THROW_USER +#endif +#if defined(JSON_TRY_USER) + #undef JSON_TRY + #define JSON_TRY JSON_TRY_USER +#endif +#if defined(JSON_CATCH_USER) + #undef JSON_CATCH + #define JSON_CATCH JSON_CATCH_USER + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_CATCH_USER +#endif +#if defined(JSON_INTERNAL_CATCH_USER) + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER #endif // manual branch prediction @@ -118,25 +203,35 @@ SOFTWARE. #endif /*! -@brief namespace for Niels Lohmann -@see https://github.com/nlohmann -@since version 1.0.0 +@brief macro to briefly define a mapping between an enum and JSON +@def NLOHMANN_JSON_SERIALIZE_ENUM +@since version 3.4.0 */ -namespace nlohmann -{ -template -struct adl_serializer; - -// forward declaration of basic_json (required to split the class) -template class ObjectType = std::map, - template class ArrayType = std::vector, - class StringType = std::string, class BooleanType = bool, - class NumberIntegerType = std::int64_t, - class NumberUnsignedType = std::uint64_t, - class NumberFloatType = double, - template class AllocatorType = std::allocator, - template class JSONSerializer = adl_serializer> -class basic_json; +#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ + template \ + inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [e](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.first == e; \ + }); \ + j = ((it != std::end(m)) ? it : std::begin(m))->second; \ + } \ + template \ + inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [j](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.second == j; \ + }); \ + e = ((it != std::end(m)) ? it : std::begin(m))->first; \ + } // Ugly macros to avoid uglier copy-paste when specializing basic_json. They // may be removed in the future once the class is split. @@ -154,5922 +249,10677 @@ class basic_json; NumberIntegerType, NumberUnsignedType, NumberFloatType, \ AllocatorType, JSONSerializer> +// #include -/*! -@brief unnamed namespace with internal helper functions -This namespace collects some functions that could not be defined inside the -@ref basic_json class. +#include // not +#include // size_t +#include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type -@since version 2.1.0 -*/ +namespace nlohmann +{ namespace detail { -//////////////// -// exceptions // -//////////////// - -/*! -@brief general exception of the @ref basic_json class - -This class is an extension of `std::exception` objects with a member @a id for -exception ids. It is used as the base class for all exceptions thrown by the -@ref basic_json class. This class can hence be used as "wildcard" to catch -exceptions. - -Subclasses: -- @ref parse_error for exceptions indicating a parse error -- @ref invalid_iterator for exceptions indicating errors with iterators -- @ref type_error for exceptions indicating executing a member function with - a wrong type -- @ref out_of_range for exceptions indicating access out of the defined range -- @ref other_error for exceptions indicating other library errors - -@internal -@note To have nothrow-copy-constructible exceptions, we internally use - `std::runtime_error` which can cope with arbitrary-length error messages. - Intermediate strings are built with static functions and then passed to - the actual constructor. -@endinternal +// alias templates to reduce boilerplate +template +using enable_if_t = typename std::enable_if::type; -@liveexample{The following code shows how arbitrary library exceptions can be -caught.,exception} +template +using uncvref_t = typename std::remove_cv::type>::type; -@since version 3.0.0 -*/ -class exception : public std::exception +// implementation of C++14 index_sequence and affiliates +// source: https://stackoverflow.com/a/32223343 +template +struct index_sequence { - public: - /// returns the explanatory string - const char* what() const noexcept override + using type = index_sequence; + using value_type = std::size_t; + static constexpr std::size_t size() noexcept { - return m.what(); + return sizeof...(Ints); } +}; - /// the id of the exception - const int id; +template +struct merge_and_renumber; - protected: - exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} +template +struct merge_and_renumber, index_sequence> + : index_sequence < I1..., (sizeof...(I1) + I2)... > {}; - static std::string name(const std::string& ename, int id_) - { - return "[json.exception." + ename + "." + std::to_string(id_) + "] "; - } +template +struct make_index_sequence + : merge_and_renumber < typename make_index_sequence < N / 2 >::type, + typename make_index_sequence < N - N / 2 >::type > {}; - private: - /// an exception object as storage for error messages - std::runtime_error m; -}; +template<> struct make_index_sequence<0> : index_sequence<> {}; +template<> struct make_index_sequence<1> : index_sequence<0> {}; -/*! -@brief exception indicating a parse error +template +using index_sequence_for = make_index_sequence; -This exception is thrown by the library when a parse error occurs. Parse errors -can occur during the deserialization of JSON text, CBOR, MessagePack, as well -as when using JSON Patch. +// dispatch utility (taken from ranges-v3) +template struct priority_tag : priority_tag < N - 1 > {}; +template<> struct priority_tag<0> {}; -Member @a byte holds the byte index of the last read character in the input -file. +// taken from ranges-v3 +template +struct static_const +{ + static constexpr T value{}; +}; -Exceptions have ids 1xx. +template +constexpr T static_const::value; +} // namespace detail +} // namespace nlohmann -name / id | example message | description ------------------------------- | --------------- | ------------------------- -json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position. -json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point. -json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid. -json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects. -json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors. -json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`. -json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character. -json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences. -json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number. -json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read. -json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read. -json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read. +// #include -@note For an input with n bytes, 1 is the index of the first character and n+1 - is the index of the terminating null byte or the end of file. This also - holds true when reading a byte vector (CBOR or MessagePack). -@liveexample{The following code shows how a `parse_error` exception can be -caught.,parse_error} +#include // not +#include // numeric_limits +#include // false_type, is_constructible, is_integral, is_same, true_type +#include // declval -@sa @ref exception for the base class of the library exceptions -@sa @ref invalid_iterator for exceptions indicating errors with iterators -@sa @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa @ref out_of_range for exceptions indicating access out of the defined range -@sa @ref other_error for exceptions indicating other library errors +// #include -@since version 3.0.0 -*/ -class parse_error : public exception -{ - public: - /*! - @brief create a parse error exception - @param[in] id_ the id of the exception - @param[in] byte_ the byte index where the error occurred (or 0 if the - position cannot be determined) - @param[in] what_arg the explanatory string - @return parse_error object - */ - static parse_error create(int id_, std::size_t byte_, const std::string& what_arg) - { - std::string w = exception::name("parse_error", id_) + "parse error" + - (byte_ != 0 ? (" at " + std::to_string(byte_)) : "") + - ": " + what_arg; - return parse_error(id_, byte_, w.c_str()); - } +// #include - /*! - @brief byte index of the parse error - The byte index of the last read character in the input file. +#include // random_access_iterator_tag - @note For an input with n bytes, 1 is the index of the first character and - n+1 is the index of the terminating null byte or the end of file. - This also holds true when reading a byte vector (CBOR or MessagePack). - */ - const std::size_t byte; +// #include - private: - parse_error(int id_, std::size_t byte_, const char* what_arg) - : exception(id_, what_arg), byte(byte_) {} + +namespace nlohmann +{ +namespace detail +{ +template struct make_void +{ + using type = void; }; +template using void_t = typename make_void::type; +} // namespace detail +} // namespace nlohmann -/*! -@brief exception indicating errors with iterators +// #include -This exception is thrown if iterators passed to a library function do not match -the expected semantics. -Exceptions have ids 2xx. +namespace nlohmann +{ +namespace detail +{ +template +struct iterator_types {}; + +template +struct iterator_types < + It, + void_t> +{ + using difference_type = typename It::difference_type; + using value_type = typename It::value_type; + using pointer = typename It::pointer; + using reference = typename It::reference; + using iterator_category = typename It::iterator_category; +}; -name / id | example message | description ------------------------------------ | --------------- | ------------------------- -json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. -json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion. -json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from. -json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid. -json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid. -json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range. -json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key. -json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. -json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. -json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. -json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to. -json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container. -json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered. -json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin(). - -@liveexample{The following code shows how an `invalid_iterator` exception can be -caught.,invalid_iterator} - -@sa @ref exception for the base class of the library exceptions -@sa @ref parse_error for exceptions indicating a parse error -@sa @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa @ref out_of_range for exceptions indicating access out of the defined range -@sa @ref other_error for exceptions indicating other library errors +// This is required as some compilers implement std::iterator_traits in a way that +// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. +template +struct iterator_traits +{ +}; -@since version 3.0.0 -*/ -class invalid_iterator : public exception +template +struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> + : iterator_types { - public: - static invalid_iterator create(int id_, const std::string& what_arg) - { - std::string w = exception::name("invalid_iterator", id_) + what_arg; - return invalid_iterator(id_, w.c_str()); - } +}; - private: - invalid_iterator(int id_, const char* what_arg) - : exception(id_, what_arg) {} +template +struct iterator_traits::value>> +{ + using iterator_category = std::random_access_iterator_tag; + using value_type = T; + using difference_type = ptrdiff_t; + using pointer = T*; + using reference = T&; }; +} +} -/*! -@brief exception indicating executing a member function with a wrong type +// #include -This exception is thrown in case of a type error; that is, a library function is -executed on a JSON value whose type does not match the expected semantics. +// #include -Exceptions have ids 3xx. -name / id | example message | description ------------------------------ | --------------- | ------------------------- -json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead. -json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types. -json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t&. -json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types. -json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types. -json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types. -json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types. -json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types. -json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types. -json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types. -json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types. -json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types. -json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined. -json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers. -json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive. -json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. | +#include -@liveexample{The following code shows how a `type_error` exception can be -caught.,type_error} +// #include -@sa @ref exception for the base class of the library exceptions -@sa @ref parse_error for exceptions indicating a parse error -@sa @ref invalid_iterator for exceptions indicating errors with iterators -@sa @ref out_of_range for exceptions indicating access out of the defined range -@sa @ref other_error for exceptions indicating other library errors -@since version 3.0.0 -*/ -class type_error : public exception +// http://en.cppreference.com/w/cpp/experimental/is_detected +namespace nlohmann { - public: - static type_error create(int id_, const std::string& what_arg) - { - std::string w = exception::name("type_error", id_) + what_arg; - return type_error(id_, w.c_str()); - } +namespace detail +{ +struct nonesuch +{ + nonesuch() = delete; + ~nonesuch() = delete; + nonesuch(nonesuch const&) = delete; + void operator=(nonesuch const&) = delete; +}; - private: - type_error(int id_, const char* what_arg) : exception(id_, what_arg) {} +template class Op, + class... Args> +struct detector +{ + using value_t = std::false_type; + using type = Default; }; -/*! -@brief exception indicating access out of the defined range +template class Op, class... Args> +struct detector>, Op, Args...> +{ + using value_t = std::true_type; + using type = Op; +}; -This exception is thrown in case a library function is called on an input -parameter that exceeds the expected range, for instance in case of array -indices or nonexisting object keys. +template