summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVivien Kraus <vivien@planete-kraus.eu>2023-08-29 09:31:33 +0200
committerVivien Kraus <vivien@planete-kraus.eu>2023-08-30 22:32:29 +0200
commit547ff19e086d2a73a445cb7921f6f403fd6aab08 (patch)
tree8fc5ac72828d0aca0c59bd799c0e5cd648469410
Lexer and parser for json
-rw-r--r--.gitmodules3
-rw-r--r--.prev-version1
-rw-r--r--AUTHORS12
-rw-r--r--ChangeLog0
-rw-r--r--GNUmakefile127
-rw-r--r--Makefile.am17
-rw-r--r--NEWS9
-rw-r--r--README142
-rw-r--r--README-release101
-rwxr-xr-xautogen.sh39
-rwxr-xr-xautopull.sh37
-rwxr-xr-xbootstrap234
-rw-r--r--bootstrap-funclib.sh1302
-rw-r--r--bootstrap.conf74
-rw-r--r--cfg.mk12
-rw-r--r--configure.ac37
-rw-r--r--gen-bootstrap.sed10
-rwxr-xr-xgitsub.sh509
m---------gnulib0
-rw-r--r--json-elements/Makefile.am9
-rw-r--r--json-elements/false1
-rw-r--r--json-elements/json4
-rw-r--r--json-elements/jsonld38
-rw-r--r--json-elements/null1
-rw-r--r--json-elements/number1
-rw-r--r--json-elements/string1
-rw-r--r--json-elements/structure1
-rw-r--r--json-elements/true1
-rw-r--r--maint.mk1805
-rw-r--r--src/Makefile.am8
-rw-r--r--src/json-lexer.c282
-rw-r--r--src/liballoc/allocator.h141
-rw-r--r--src/libjson/neoas-json-lexer-and-parser.h551
-rw-r--r--src/libjson/neoas-json-lexer.h1036
-rw-r--r--src/libjson/neoas-json-parser.h462
35 files changed, 7008 insertions, 0 deletions
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..546e03f
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "gnulib"]
+ path = gnulib
+ url = https://git.savannah.gnu.org/git/gnulib.git
diff --git a/.prev-version b/.prev-version
new file mode 100644
index 0000000..7a2d820
--- /dev/null
+++ b/.prev-version
@@ -0,0 +1 @@
+NONE
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..a3be28f
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,12 @@
+#+title: NEO-AS authors
+#+language: en
+#+author: Vivien Kraus
+#+email: vivien@planete-kraus.eu
+
+List of authors:
+
+- Vivien Kraus [[mailto:vivien@planete-kraus.eu]]
+
+# Local Variables:
+# mode: org
+# End:
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/ChangeLog
diff --git a/GNUmakefile b/GNUmakefile
new file mode 100644
index 0000000..07b331f
--- /dev/null
+++ b/GNUmakefile
@@ -0,0 +1,127 @@
+# Having a separate GNUmakefile lets me 'include' the dynamically
+# generated rules created via cfg.mk (package-local configuration)
+# as well as maint.mk (generic maintainer rules).
+# This makefile is used only if you run GNU Make.
+# It is necessary if you want to build targets usually of interest
+# only to the maintainer.
+
+# Copyright (C) 2001, 2003, 2006-2023 Free Software Foundation, Inc.
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+# If the user runs GNU make but has not yet run ./configure,
+# give them a diagnostic.
+_gl-Makefile := $(wildcard [M]akefile)
+ifneq ($(_gl-Makefile),)
+
+# Make tar archive easier to reproduce.
+export TAR_OPTIONS = --owner=0 --group=0 --numeric-owner --sort=name
+
+# Allow the user to add to this in the Makefile.
+ALL_RECURSIVE_TARGETS =
+
+include Makefile
+
+# Some projects override e.g., _autoreconf here.
+-include $(srcdir)/cfg.mk
+
+# Allow cfg.mk to override these.
+_build-aux ?= build-aux
+_autoreconf ?= autoreconf -v
+
+include $(srcdir)/maint.mk
+
+# Ensure that $(VERSION) is up to date for dist-related targets, but not
+# for others: rerunning autoreconf and recompiling everything isn't cheap.
+_have-git-version-gen := \
+ $(shell test -f $(srcdir)/$(_build-aux)/git-version-gen && echo yes)
+ifeq ($(_have-git-version-gen)0,yes$(MAKELEVEL))
+ _is-dist-target ?= $(filter-out %clean, \
+ $(filter maintainer-% dist% alpha beta stable,$(MAKECMDGOALS)))
+ _is-install-target ?= $(filter-out %check, $(filter install%,$(MAKECMDGOALS)))
+ ifneq (,$(_is-dist-target)$(_is-install-target))
+ _curr-ver := $(shell cd $(srcdir) \
+ && $(_build-aux)/git-version-gen \
+ .tarball-version \
+ $(git-version-gen-tag-sed-script))
+ ifneq ($(_curr-ver),$(VERSION))
+ ifeq ($(_curr-ver),UNKNOWN)
+ $(info WARNING: unable to verify if $(VERSION) is the correct version)
+ else
+ ifneq (,$(_is-install-target))
+ # GNU Coding Standards state that 'make install' should not cause
+ # recompilation after 'make all'. But as long as changing the version
+ # string alters config.h, the cost of having 'make all' always have an
+ # up-to-date version is prohibitive. So, as a compromise, we merely
+ # warn when installing a version string that is out of date; the user
+ # should run 'autoreconf' (or something like 'make distcheck') to
+ # fix the version, 'make all' to propagate it, then 'make install'.
+ $(info WARNING: version string $(VERSION) is out of date;)
+ $(info run '$(MAKE) _version' to fix it)
+ else
+ $(info INFO: running autoreconf for new version string: $(_curr-ver))
+GNUmakefile: _version
+ touch GNUmakefile
+ endif
+ endif
+ endif
+ endif
+endif
+
+.PHONY: _version
+_version:
+ cd $(srcdir) && rm -rf autom4te.cache .version && $(_autoreconf)
+ $(MAKE) $(AM_MAKEFLAGS) Makefile
+
+else
+
+.DEFAULT_GOAL := abort-due-to-no-makefile
+srcdir = .
+
+# The package can override .DEFAULT_GOAL to run actions like autoreconf.
+-include ./cfg.mk
+
+# Allow cfg.mk to override these.
+_build-aux ?= build-aux
+_autoreconf ?= autoreconf -v
+
+include ./maint.mk
+
+ifeq ($(.DEFAULT_GOAL),abort-due-to-no-makefile)
+$(MAKECMDGOALS): abort-due-to-no-makefile
+endif
+
+abort-due-to-no-makefile:
+ @echo There seems to be no Makefile in this directory. 1>&2
+ @echo "You must run ./configure before running '$(MAKE)'." 1>&2
+ @exit 1
+
+endif
+
+# Tell version 3.79 and up of GNU make to not build goals in this
+# directory in parallel, in case someone tries to build multiple
+# targets, and one of them can cause a recursive target to be invoked.
+
+# Only set this if Automake doesn't provide it.
+AM_RECURSIVE_TARGETS ?= $(RECURSIVE_TARGETS:-recursive=) \
+ $(RECURSIVE_CLEAN_TARGETS:-recursive=) \
+ dist distcheck tags ctags
+
+ALL_RECURSIVE_TARGETS += $(AM_RECURSIVE_TARGETS)
+
+ifneq ($(word 2, $(MAKECMDGOALS)), )
+ifneq ($(filter $(ALL_RECURSIVE_TARGETS), $(MAKECMDGOALS)), )
+.NOTPARALLEL:
+endif
+endif
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..3202b91
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,17 @@
+SUBDIRS = lib
+ACLOCAL_AMFLAGS = -I m4
+EXTRA_DIST = m4/gnulib-cache.m4 \
+ $(top_srcdir)/.version
+
+MAINTAINERCLEANFILES = .version
+BUILT_SOURCES = $(top_srcdir)/.version
+
+$(top_srcdir)/.version:
+ $(AM_V_GEN) echo '$(VERSION)' > $@-t
+ @mv $@-t $@
+
+dist-hook:
+ echo '$(VERSION)' > $(distdir)/.tarball-version
+
+include %D%/src/Makefile.am
+include %D%/json-elements/Makefile.am
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..e137e52
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,9 @@
+#+title: What’s new in neo-as?
+
+* Noteworthy changes in release ?.? (????-??-??) [?]
+
+#+language: en
+
+# Local Variables:
+# mode: org
+# End:
diff --git a/README b/README
new file mode 100644
index 0000000..f5156dc
--- /dev/null
+++ b/README
@@ -0,0 +1,142 @@
+#+title: Namespaced Extensions Only ActivityStreams
+#+author: Vivien Kraus
+#+email: vivien@planete-kraus.eu
+#+language: en
+
+* Namespaced Extensions Only ActivityStreams
+ActivityStreams is cool, but json-ld is horribly complex. We believe
+that it’s OK to define the NEO-ActivityStreams format by only
+accepting JSON-LD that:
+
+- have exactly one top-level object;
+- have exactly one context object;
+- the context object is directly in the top-level object, at the top
+ of the file;
+- the context is an array, a single string, or a single map;
+- if an array, the first elements of the array is a list of guaranteed
+ non-overlapping context URIs, that do not change the meaning of the
+ definitions of other contexts, and that dereference to constant
+ definitions;
+- the last element of the array is optionally a map from prefixes to
+ namespaces (strings ending in '#' or '/');
+- there are no two overlapping namespaced extensions.
+
+It is OK to expect context URIs to be immutable in all cases, because
+parsing a json-ld document should yield the exact same semantics
+whenever it is parsed: whether the server hosting the context is
+offline, or whether its domain named has been reclaimed by a malicious
+entity, for instance. If the semantics of the context change in a
+backward-incompatible way, the easy solution for the context author is
+to version the context document and host it in a version-dependent
+URI.
+
+We believe that it is OK to have all these constraints because it
+means server functionality can be achieved just by looking at the
+pre-parsed context URIs, and client-specific extensions can be put
+under a namespace.
+
+Client-specific extensions should NOT add any term definition in the
+context. This is not idiomatic json, and is not very readable (URI
+values for properties would be represented as a map with to keys,
+'@value' and '@type' mapped to "@id"), however we believe noone
+actually reads json-ld data, and this makes server-side processing way
+easier.
+
+** Server-side processing of NEO-AS
+Servers process the activities according to a set of semantic rules
+for the data, expressed by context URIs. If an activity uses an
+unknown context URI, the server assumes that it does not interfere
+with the contexts it recognizes and processes the data without needing
+to understand those it does not recognize. Therefore, the server never
+has to fetch any context from the web. When transmitting a NEO-AS
+object, the server should re-write the non-namespaced properties and
+discard context URIs that it does not understand. It should also make
+sure that no two namespaces use the same prefix, and transmit all the
+namespaced data. This way, downstream users of the object can parse it
+as full JSON-LD.
+
+** Client-side processing of NEO-AS
+Client-side extension data should be generated under a prefix that is
+not already bound in the existing data. Parsing the data should be a
+2-step procedure: first, scan the context to find which prefix is
+bound to the extension namespace, and then use this prefix to
+dereference the object properties. Since the context object is at the
+top, parsing extension data is done in one step.
+
+** Examples
+
+Here are two examples of how to convert json-ld data from the json-ld
+playground to neo-as. Many of the examples use the schema.org context,
+without a namespace.
+
+*** Person
+
+Here is how the Person example can put the schema.org vocabulary under
+a proper namespace, in case it is expected to not be immutable.
+
+The source JSON-LD:
+
+#+begin_src js :eval no
+ {
+ "@context": "http://schema.org/",
+ "@type": "Person",
+ "name": "Jane Doe",
+ "jobTitle": "Professor",
+ "telephone": "(425) 123-4567",
+ "url": "http://www.janedoe.com"
+ }
+#+end_src
+
+is now converted to:
+
+#+begin_src js :eval no
+ {
+ "@context": { "schema": "http://schema.org/" },
+ "@type": "Person",
+ "schema:name": "Jane Doe",
+ "schema:jobTitle": "Professor",
+ "schema:telephone": "(425) 123-4567",
+ "schema:url": { "@value": "http://www.janedoe.com", "@type": "@id" }
+ }
+#+end_src
+
+Note that the =schema:url= value starts with =http://=, so it would be
+OK to not decorate it with =@value= and =@type: @id=.
+
+*** Event
+
+This example adds a term definition in the context, which we do not
+want.
+
+#+begin_src js :eval no
+ {
+ "@context": {
+ "ical": "http://www.w3.org/2002/12/cal/ical#",
+ "xsd": "http://www.w3.org/2001/XMLSchema#",
+ "ical:dtstart": {
+ "@type": "xsd:dateTime"
+ }
+ },
+ "ical:summary": "Lady Gaga Concert",
+ "ical:location": "New Orleans Arena, New Orleans, Louisiana, USA",
+ "ical:dtstart": "2011-04-09T20:00:00Z"
+ }
+#+end_src
+
+It is converted to:
+
+#+begin_src js :eval no
+ {
+ "@context": {
+ "ical": "http://www.w3.org/2002/12/cal/ical#",
+ "xsd": "http://www.w3.org/2001/XMLSchema#"
+ },
+ "ical:summary": "Lady Gaga Concert",
+ "ical:location": "New Orleans Arena, New Orleans, Louisiana, USA",
+ "ical:dtstart": { "@value": "2011-04-09T20:00:00Z", "@type": "xsd:dateTime" }
+ }
+#+end_src
+
+# Local Variables:
+# mode: org
+# End:
diff --git a/README-release b/README-release
new file mode 100644
index 0000000..6a5e5a2
--- /dev/null
+++ b/README-release
@@ -0,0 +1,101 @@
+Here are most of the steps we (maintainers) follow when making a release.
+
+* Start from a clean, up-to-date git directory on "master":
+
+ make -k maintainer-clean || { ./configure && make maintainer-clean; }
+ git checkout master
+ git pull origin master
+
+* Ensure that the latest stable versions of autoconf, automake, etc.
+ are in your PATH. See the buildreq list in bootstrap.conf for
+ the complete list of tools.
+
+* Ensure that you have no uncommitted diffs. This should produce no
+ output:
+
+ git diff
+
+* Ensure that you've pushed all changes that belong in the release:
+
+ git push origin master
+
+* Check that the NixOS/Hydra autobuilder is reporting all is well:
+
+ https://hydra.nixos.org/project/gnu/@PACKAGE@-master
+
+* Run the following command to download any new translations:
+
+ ./bootstrap && ./configure
+
+* Pre-release testing: ensure that the following commands succeed:
+
+ c=check ve=check-very-expensive; git grep \-q "^$ve:\$" && c=$ve
+ make $c syntax-check distcheck
+
+* To (i) set the date, version number, and release TYPE on line 3 of
+ NEWS, (ii) commit that, and (iii) tag the release, run
+
+ # "TYPE" must be stable, beta or alpha
+ make release-commit RELEASE='X.Y TYPE'
+
+* Run the following to create release tarballs. Your choice selects the
+ corresponding upload-to destination in the emitted gnupload command.
+ The different destinations are specified in cfg.mk. See the definitions
+ of gnu_ftp_host-{alpha,beta,stable}.
+
+ make release RELEASE='X.Y TYPE'
+
+* Test the tarball. Copy it to a few odd-ball systems and ensure that
+ it builds and passes all tests.
+
+* While that's happening, write the release announcement that you will
+ soon post. Start with the template, $HOME/announce-@PACKAGE@-X.Y
+ that was just created by that "make" command.
+
+Once all the builds and tests have passed,
+
+* Run the gnupload command that was suggested by your "make release"
+ run above, or run
+
+ make upload RELEASE='X.Y TYPE'
+
+* Wait a few minutes (maybe up to 30?) and then use the release URLs to
+ download all tarball/signature pairs and use gpg --verify to ensure
+ that they're all valid.
+
+* Push the NEWS-updating changes and the new tag:
+
+ v=$(cat .prev-version)
+ git push origin master tag v$v
+
+* Announce it on Savannah first, so you can include the savannah.org
+ announcement link in the email message.
+
+ Go to the news-submission form:
+
+ https://savannah.gnu.org/news/submit.php?group=@PACKAGE@
+
+ If it does not work, then enable "News" for the project via this link:
+
+ https://savannah.gnu.org/project/admin/editgroupfeatures.php?group=@PACKAGE@
+
+ Write something like the following:
+
+ Subject: @PACKAGE@-X.Y released [stable]
+ +verbatim+
+ ...paste the announcement here...
+ -verbatim-
+
+ Then go here to approve it:
+
+ https://savannah.gnu.org/news/approve.php?group=@PACKAGE@
+
+* Send the announcement email message.
+
+* After each non-alpha release, run
+
+ make web-manual-update
+
+ to update the on-line manual accessible at
+
+ https://www.gnu.org/software/@PACKAGE@/manual/
diff --git a/autogen.sh b/autogen.sh
new file mode 100755
index 0000000..3562385
--- /dev/null
+++ b/autogen.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+# Convenience script for regenerating all autogeneratable files that are
+# omitted from the version control repository. In particular, this script
+# also regenerates all aclocal.m4, config.h.in, Makefile.in, configure files
+# with new versions of autoconf or automake.
+
+# Copyright (C) 2003-2023 Free Software Foundation, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+# Originally written by Paul Eggert. The canonical version of this
+# script is maintained as top/autogen.sh in gnulib. However, to be
+# useful to your package, you should place a copy of it under version
+# control in the top-level directory of your package. The intent is
+# that all customization can be done with a bootstrap.conf file also
+# maintained in your version control; gnulib comes with a template
+# build-aux/bootstrap.conf to get you started.
+#
+# Alternatively, you can use an autogen.sh script that is specific
+# to your package.
+
+me="$0"
+medir=`dirname "$me"`
+
+# Read the function library and the configuration.
+. "$medir"/bootstrap-funclib.sh
+
+autogen "$@"
diff --git a/autopull.sh b/autopull.sh
new file mode 100755
index 0000000..0135a27
--- /dev/null
+++ b/autopull.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+# Convenience script for fetching auxiliary files that are omitted from
+# the version control repository of this package.
+
+# Copyright (C) 2003-2023 Free Software Foundation, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+# Originally written by Paul Eggert. The canonical version of this
+# script is maintained as top/autopull.sh in gnulib. However, to be
+# useful to your package, you should place a copy of it under version
+# control in the top-level directory of your package. The intent is
+# that all customization can be done with a bootstrap.conf file also
+# maintained in your version control; gnulib comes with a template
+# build-aux/bootstrap.conf to get you started.
+#
+# Alternatively, you can use an autopull.sh script that is specific
+# to your package.
+
+me="$0"
+medir=`dirname "$me"`
+
+# Read the function library and the configuration.
+. "$medir"/bootstrap-funclib.sh
+
+autopull "$@"
diff --git a/bootstrap b/bootstrap
new file mode 100755
index 0000000..6c170d3
--- /dev/null
+++ b/bootstrap
@@ -0,0 +1,234 @@
+#! /bin/sh
+# Bootstrap this package from checked-out sources.
+
+scriptversion=2023-08-29.21; # UTC
+
+# Copyright (C) 2003-2023 Free Software Foundation, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+# Originally written by Paul Eggert. The canonical version of this
+# script is maintained as top/bootstrap in gnulib. However, to be
+# useful to your package, you should place a copy of it under version
+# control in the top-level directory of your package. The intent is
+# that all customization can be done with a bootstrap.conf file also
+# maintained in your version control; gnulib comes with a template
+# build-aux/bootstrap.conf to get you started.
+
+# Please report bugs or propose patches to bug-gnulib@gnu.org.
+
+me="$0"
+medir=`dirname "$me"`
+
+# Read the function library and the configuration.
+. "$medir"/bootstrap-funclib.sh
+
+usage() {
+ cat <<EOF
+Usage: $me [OPTION]...
+Bootstrap this package from the checked-out sources.
+
+Optional environment variables:
+ GNULIB_SRCDIR Specifies the local directory where gnulib
+ sources reside. Use this if you already
+ have gnulib sources on your machine, and
+ do not want to waste your bandwidth downloading
+ them again.
+ GNULIB_URL URL of the gnulib repository. The default is
+ $default_gnulib_url,
+ which is Gnulib's upstream repository.
+
+Options:
+
+ --pull Do phase 1: pull files from network
+ --gen Do phase 2: generate from local files.
+ (The default is to do both phases.)
+
+ --gnulib-srcdir=DIRNAME specify the local directory where gnulib
+ sources reside. Use this if you already
+ have gnulib sources on your machine, and
+ you want to use these sources. Defaults
+ to \$GNULIB_SRCDIR
+ --gnulib-refdir=DIRNAME specify the local directory where a gnulib
+ repository (with a .git subdirectory) resides.
+ Use this if you already have gnulib sources
+ and history on your machine, and do not want
+ to waste your bandwidth downloading them again.
+ Defaults to \$GNULIB_REFDIR
+
+ --bootstrap-sync if this bootstrap script is not identical to
+ the version in the local gnulib sources,
+ update this script, and then restart it with
+ /bin/sh or the shell \$CONFIG_SHELL
+ --no-bootstrap-sync do not check whether bootstrap is out of sync
+
+ --copy copy files instead of creating symbolic links
+ --force attempt to bootstrap even if the sources seem
+ not to have been checked out
+ --no-git do not use git to update gnulib. Requires that
+ \$GNULIB_SRCDIR or the --gnulib-srcdir option
+ points to a gnulib repository with the correct
+ revision
+ --skip-po do not download po files
+EOF
+ bootstrap_print_option_usage_hook
+ cat <<EOF
+If the file bootstrap.conf exists in the same directory as this script, its
+contents are read as shell variables to configure the bootstrap.
+
+For build prerequisites, environment variables like \$AUTOCONF and \$AMTAR
+are honored.
+
+Gnulib sources can be fetched in various ways:
+
+ * If the environment variable GNULIB_SRCDIR is set (either as an
+ environment variable or via the --gnulib-srcdir option), then sources
+ are fetched from that local directory. If it is a git repository and
+ the configuration variable GNULIB_REVISION is set in bootstrap.conf,
+ then that revision is checked out.
+
+ * Otherwise, if this package is in a git repository with a 'gnulib'
+ submodule configured, then that submodule is initialized and updated
+ and sources are fetched from there. If GNULIB_REFDIR is set (either
+ as an environment variable or via the --gnulib-refdir option) and is
+ a git repository, then it is used as a reference.
+
+ * Otherwise, if the 'gnulib' directory does not exist, Gnulib sources
+ are cloned into that directory using git from \$GNULIB_URL, defaulting
+ to $default_gnulib_url.
+ If the configuration variable GNULIB_REVISION is set in bootstrap.conf,
+ then that revision is checked out.
+
+ * Otherwise, the existing Gnulib sources in the 'gnulib' directory are
+ used. If it is a git repository and the configuration variable
+ GNULIB_REVISION is set in bootstrap.conf, then that revision is
+ checked out.
+
+If you maintain a package and want to pin a particular revision of the
+Gnulib sources that has been tested with your package, then there are
+two possible approaches: either configure a 'gnulib' submodule with the
+appropriate revision, or set GNULIB_REVISION (and if necessary
+GNULIB_URL) in bootstrap.conf.
+
+Running without arguments will suffice in most cases.
+EOF
+}
+
+# Parse options.
+
+# Whether to pull and generate.
+pull=false
+gen=false
+
+# Whether to use copies instead of symlinks.
+copy=false
+
+# Use git to update gnulib sources
+use_git=true
+
+for option
+do
+ case $option in
+ --help)
+ usage
+ exit;;
+ --version)
+ set -e
+ echo "bootstrap $scriptversion lib $scriptlibversion"
+ echo "$copyright"
+ exit 0
+ ;;
+ --pull)
+ pull=true;;
+ --gen)
+ gen=true;;
+ --gnulib-srcdir=*)
+ GNULIB_SRCDIR=${option#--gnulib-srcdir=};;
+ --gnulib-refdir=*)
+ GNULIB_REFDIR=${option#--gnulib-refdir=};;
+ --skip-po)
+ SKIP_PO=t;;
+ --force)
+ checkout_only_file=;;
+ --copy)
+ copy=true;;
+ --bootstrap-sync)
+ bootstrap_sync=true;;
+ --no-bootstrap-sync)
+ bootstrap_sync=false;;
+ --no-git)
+ use_git=false;;
+ *)
+ bootstrap_option_hook $option || die "$option: unknown option";;
+ esac
+done
+
+# Default is to do both.
+$pull || $gen || pull=true gen=true
+
+$use_git || test -n "$GNULIB_SRCDIR" \
+ || die "Error: --no-git requires \$GNULIB_SRCDIR environment variable" \
+ "or --gnulib-srcdir option"
+test -z "$GNULIB_SRCDIR" || test -d "$GNULIB_SRCDIR" \
+ || die "Error: \$GNULIB_SRCDIR environment variable or --gnulib-srcdir" \
+ "option is specified, but does not denote a directory"
+
+if test -n "$checkout_only_file" && test ! -r "$checkout_only_file"; then
+ die "Bootstrapping from a non-checked-out distribution is risky."
+fi
+
+check_build_prerequisites $use_git
+
+if $bootstrap_sync; then
+ prepare_GNULIB_SRCDIR
+ upgrade_bootstrap
+ # Since we have now upgraded if needed, no need to try it a second time below.
+ bootstrap_sync=false
+fi
+
+echo "$0: Bootstrapping from checked-out $package sources..."
+
+# Pass GNULIB_SRCDIR and GNULIB_REFDIR to any subsidiary commands that care.
+export GNULIB_SRCDIR
+export GNULIB_REFDIR
+
+if $pull && { $use_git || test -z "$SKIP_PO"; }; then
+ autopull \
+ `if $bootstrap_sync; then
+ echo ' --bootstrap-sync'
+ else
+ echo ' --no-bootstrap-sync'
+ fi` \
+ `if test -z "$checkout_only_file"; then echo ' --force'; fi` \
+ `if ! $use_git; then echo ' --no-git'; fi` \
+ `if test -n "$SKIP_PO"; then echo ' --skip-po'; fi` \
+ || die "could not fetch auxiliary files"
+fi
+
+if $gen; then
+ autogen \
+ `if $copy; then echo ' --copy'; fi` \
+ `if test -z "$checkout_only_file"; then echo ' --force'; fi` \
+ || die "could not generate auxiliary files"
+fi
+
+# ----------------------------------------------------------------------------
+
+# Local Variables:
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC0"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/bootstrap-funclib.sh b/bootstrap-funclib.sh
new file mode 100644
index 0000000..54deea6
--- /dev/null
+++ b/bootstrap-funclib.sh
@@ -0,0 +1,1302 @@
+# A library of shell functions for autopull.sh, autogen.sh, and bootstrap.
+
+scriptlibversion=2023-08-29.21; # UTC
+
+# Copyright (C) 2003-2023 Free Software Foundation, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+# Originally written by Paul Eggert. The canonical version of this
+# script is maintained as top/bootstrap-funclib.sh in gnulib. However,
+# to be useful to your package, you should place a copy of it under
+# version control in the top-level directory of your package. The
+# intent is that all customization can be done with a bootstrap.conf
+# file also maintained in your version control; gnulib comes with a
+# template build-aux/bootstrap.conf to get you started.
+
+nl='
+'
+
+# Ensure file names are sorted consistently across platforms.
+LC_ALL=C
+export LC_ALL
+
+# Honor $PERL, but work even if there is none.
+PERL="${PERL-perl}"
+
+default_gnulib_url=https://git.savannah.gnu.org/git/gnulib.git
+
+# Copyright year, for the --version output.
+copyright_year=`echo "$scriptlibversion" | sed -e 's/[^0-9].*//'`
+copyright="Copyright (C) ${copyright_year} Free Software Foundation, Inc.
+License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>.
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law."
+
+# warnf_ FORMAT-STRING ARG1...
+warnf_ ()
+{
+ warnf_format_=$1
+ shift
+ nl='
+'
+ case $* in
+ *$nl*) me_=$(printf "$me"|tr "$nl|" '??')
+ printf "$warnf_format_" "$@" | sed "s|^|$me_: |" ;;
+ *) printf "$me: $warnf_format_" "$@" ;;
+ esac >&2
+}
+
+# warn_ WORD1...
+warn_ ()
+{
+ # If IFS does not start with ' ', set it and emit the warning in a subshell.
+ case $IFS in
+ ' '*) warnf_ '%s\n' "$*";;
+ *) (IFS=' '; warn_ "$@");;
+ esac
+}
+
+# die WORD1...
+die() { warn_ "$@"; exit 1; }
+
+# ------------------------------ Configuration. ------------------------------
+
+# Directory that contains package-specific gnulib modules and/or overrides.
+local_gl_dir=gl
+
+# Name of the Makefile.am
+# XXX Not used.
+gnulib_mk=gnulib.mk
+
+# List of gnulib modules needed.
+gnulib_modules=
+
+# Any gnulib files needed that are not in modules.
+gnulib_files=
+
+# A function to be called for each unrecognized option. Returns 0 if
+# the option in $1 has been processed by the function. Returns 1 if
+# the option has not been processed by the function. Override it via
+# your own definition in bootstrap.conf
+bootstrap_option_hook() { return 1; }
+
+# A function to be called in order to print the --help information
+# corresponding to user-defined command-line options.
+bootstrap_print_option_usage_hook() { :; }
+
+# A function to be called at the end of autopull.sh.
+# Override it via your own definition in bootstrap.conf.
+bootstrap_post_pull_hook() { :; }
+
+# A function to be called right after gnulib-tool is run.
+# Override it via your own definition in bootstrap.conf.
+bootstrap_post_import_hook() { :; }
+
+# A function to be called after everything else in this script.
+# Override it via your own definition in bootstrap.conf.
+bootstrap_epilogue() { :; }
+
+# The command to download all .po files for a specified domain into a
+# specified directory. Fill in the first %s with the destination
+# directory and the second with the domain name.
+po_download_command_format=\
+"wget --mirror --level=1 -nd -nv -A.po -P '%s' \
+ https://translationproject.org/latest/%s/"
+
+# Prefer a non-empty tarname (4th argument of AC_INIT if given), else
+# fall back to the package name (1st argument with munging).
+extract_package_name='
+ /^AC_INIT(\[*/{
+ s///
+ /^[^,]*,[^,]*,[^,]*,[ []*\([^][ ,)]\)/{
+ s//\1/
+ s/[],)].*//
+ p
+ q
+ }
+ s/[],)].*//
+ s/^GNU //
+ y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/
+ s/[^abcdefghijklmnopqrstuvwxyz0123456789_]/-/g
+ p
+ }
+'
+package=$(${AUTOCONF:-autoconf} --trace AC_INIT:\$4 configure.ac 2>/dev/null)
+if test -z "$package"; then
+ package=$(sed -n "$extract_package_name" configure.ac) \
+ || die 'cannot find package name in configure.ac'
+fi
+gnulib_name=lib$package
+
+build_aux=build-aux
+source_base=lib
+m4_base=m4
+doc_base=doc
+tests_base=tests
+gnulib_extra_files="
+ build-aux/install-sh
+ build-aux/mdate-sh
+ build-aux/texinfo.tex
+ build-aux/depcomp
+ build-aux/config.guess
+ build-aux/config.sub
+ doc/INSTALL
+"
+
+# Additional gnulib-tool options to use. Use "\newline" to break lines.
+gnulib_tool_option_extras=
+
+# Other locale categories that need message catalogs.
+EXTRA_LOCALE_CATEGORIES=
+
+# Additional xgettext options to use. Use "\\\newline" to break lines.
+XGETTEXT_OPTIONS='\\\
+ --flag=_:1:pass-c-format\\\
+ --flag=N_:1:pass-c-format\\\
+ --flag=error:3:c-format --flag=error_at_line:5:c-format\\\
+'
+
+# Package bug report address and copyright holder for gettext files
+COPYRIGHT_HOLDER='Free Software Foundation, Inc.'
+MSGID_BUGS_ADDRESS=bug-$package@gnu.org
+
+# Files we don't want to import.
+# XXX Not used.
+excluded_files=
+
+# File that should exist in the top directory of a checked out hierarchy,
+# but not in a distribution tarball.
+checkout_only_file=README-hacking
+
+# Set this to '.cvsignore .gitignore' in bootstrap.conf if you want
+# those files to be generated in directories like lib/, m4/, and po/.
+# Or set it to 'auto' to make this script select which to use based
+# on which version control system (if any) is used in the source directory.
+vc_ignore=auto
+
+# Set this to true in bootstrap.conf to enable --bootstrap-sync by
+# default.
+bootstrap_sync=false
+
+# Override the default configuration, if necessary.
+# Make sure that bootstrap.conf is sourced from the current directory
+# if we were invoked as "sh bootstrap".
+conffile=`dirname "$me"`/bootstrap.conf
+test -r "$conffile" && . "$conffile"
+
+# ------------------------- Build-time prerequisites -------------------------
+
+check_exists() {
+ if test "$1" = "--verbose"; then
+ ($2 --version </dev/null) >/dev/null 2>&1
+ if test $? -ge 126; then
+ # If not found, run with diagnostics as one may be
+ # presented with env variables to set to find the right version
+ ($2 --version </dev/null)
+ fi
+ else
+ ($1 --version </dev/null) >/dev/null 2>&1
+ fi
+
+ test $? -lt 126
+}
+
+# Note this deviates from the version comparison in automake
+# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a
+# but this should suffice as we won't be specifying old
+# version formats or redundant trailing .0 in bootstrap.conf.
+# If we did want full compatibility then we should probably
+# use m4_version_compare from autoconf.
+sort_ver() { # sort -V is not generally available
+ ver1="$1"
+ ver2="$2"
+
+ # split on '.' and compare each component
+ i=1
+ while : ; do
+ p1=$(echo "$ver1" | cut -d. -f$i)
+ p2=$(echo "$ver2" | cut -d. -f$i)
+ if [ ! "$p1" ]; then
+ echo "$1 $2"
+ break
+ elif [ ! "$p2" ]; then
+ echo "$2 $1"
+ break
+ elif [ ! "$p1" = "$p2" ]; then
+ if [ "$p1" -gt "$p2" ] 2>/dev/null; then # numeric comparison
+ echo "$2 $1"
+ elif [ "$p2" -gt "$p1" ] 2>/dev/null; then # numeric comparison
+ echo "$1 $2"
+ else # numeric, then lexicographic comparison
+ lp=$(printf "%s\n%s\n" "$p1" "$p2" | LANG=C sort -n | tail -n1)
+ if [ "$lp" = "$p2" ]; then
+ echo "$1 $2"
+ else
+ echo "$2 $1"
+ fi
+ fi
+ break
+ fi
+ i=$(($i+1))
+ done
+}
+
+get_version_sed='
+# Move version to start of line.
+s/.*[v ]\([0-9]\)/\1/
+
+# Skip lines that do not start with version.
+/^[0-9]/!d
+
+# Remove characters after the version.
+s/[^.a-z0-9-].*//
+
+# The first component must be digits only.
+s/^\([0-9]*\)[a-z-].*/\1/
+
+#the following essentially does s/5.005/5.5/
+s/\.0*\([1-9]\)/.\1/g
+p
+q'
+
+get_version() {
+ app=$1
+
+ $app --version >/dev/null 2>&1 || { $app --version; return 1; }
+
+ $app --version 2>&1 | sed -n "$get_version_sed"
+}
+
+check_versions() {
+ ret=0
+
+ while read app req_ver; do
+ # We only need libtoolize from the libtool package.
+ if test "$app" = libtool; then
+ app=libtoolize
+ fi
+ # Exempt git if git is not needed.
+ if test "$app" = git; then
+ $check_git || continue
+ fi
+ # Honor $APP variables ($TAR, $AUTOCONF, etc.)
+ appvar=$(echo $app | LC_ALL=C tr '[a-z]-' '[A-Z]_')
+ test "$appvar" = TAR && appvar=AMTAR
+ case $appvar in
+ GZIP) ;; # Do not use $GZIP: it contains gzip options.
+ PERL::*) ;; # Keep perl modules as-is
+ *) eval "app=\${$appvar-$app}" ;;
+ esac
+
+ # Handle the still-experimental Automake-NG programs specially.
+ # They remain named as the mainstream Automake programs ("automake",
+ # and "aclocal") to avoid gratuitous incompatibilities with
+ # preexisting usages (by, say, autoreconf, or custom autogen.sh
+ # scripts), but correctly identify themselves (as being part of
+ # "GNU automake-ng") when asked their version.
+ case $app in
+ automake-ng|aclocal-ng)
+ app=${app%-ng}
+ ($app --version | grep '(GNU automake-ng)') >/dev/null 2>&1 || {
+ warn_ "Error: '$app' not found or not from Automake-NG"
+ ret=1
+ continue
+ } ;;
+ # Another check is for perl modules. These can be written as
+ # e.g. perl::XML::XPath in case of XML::XPath module, etc.
+ perl::*)
+ # Extract module name
+ app="${app#perl::}"
+ if ! $PERL -m"$app" -e 'exit 0' >/dev/null 2>&1; then
+ warn_ "Error: perl module '$app' not found"
+ ret=1
+ fi
+ continue
+ ;;
+ esac
+ if [ "$req_ver" = "-" ]; then
+ # Merely require app to exist; not all prereq apps are well-behaved
+ # so we have to rely on $? rather than get_version.
+ if ! check_exists --verbose $app; then
+ warn_ "Error: '$app' not found"
+ ret=1
+ fi
+ else
+ # Require app to produce a new enough version string.
+ inst_ver=$(get_version $app)
+ if [ ! "$inst_ver" ]; then
+ warn_ "Error: '$app' not found"
+ ret=1
+ else
+ latest_ver=$(sort_ver $req_ver $inst_ver | cut -d' ' -f2)
+ if [ ! "$latest_ver" = "$inst_ver" ]; then
+ warnf_ '%s\n' \
+ "Error: '$app' version == $inst_ver is too old" \
+ " '$app' version >= $req_ver is required"
+ ret=1
+ fi
+ fi
+ fi
+ done
+
+ return $ret
+}
+
+print_versions() {
+ echo "Program Min_version"
+ echo "----------------------"
+ printf %s "$buildreq"
+ echo "----------------------"
+ # can't depend on column -t
+}
+
+# check_build_prerequisites check_git
+check_build_prerequisites()
+{
+ check_git="$1"
+
+ # gnulib-tool requires at least automake and autoconf.
+ # If either is not listed, add it (with minimum version) as a prerequisite.
+ case $buildreq in
+ *automake*) ;;
+ *) buildreq="automake 1.9
+$buildreq" ;;
+ esac
+ case $buildreq in
+ *autoconf*) ;;
+ *) buildreq="autoconf 2.59
+$buildreq" ;;
+ esac
+
+ # When we can deduce that gnulib-tool will require patch,
+ # and when patch is not already listed as a prerequisite, add it, too.
+ if test -d "$local_gl_dir" \
+ && ! find "$local_gl_dir" -name '*.diff' -exec false {} +; then
+ case $buildreq in
+ *patch*) ;;
+ *) buildreq="patch -
+$buildreq" ;;
+ esac
+ fi
+
+ if ! printf '%s' "$buildreq" | check_versions; then
+ echo >&2
+ if test -f README-prereq; then
+ die "See README-prereq for how to get the prerequisite programs"
+ else
+ die "Please install the prerequisite programs"
+ fi
+ fi
+
+ # Warn the user if autom4te appears to be broken; this causes known
+ # issues with at least gettext 0.18.3.
+ probe=$(echo 'm4_quote([hi])' | autom4te -l M4sugar -t 'm4_quote:$%' -)
+ if test "x$probe" != xhi; then
+ warn_ "WARNING: your autom4te wrapper eats stdin;"
+ warn_ "if bootstrap fails, consider upgrading your autotools"
+ fi
+}
+
+# find_tool ENVVAR NAMES...
+# -------------------------
+# Search for a required program. Use the value of ENVVAR, if set,
+# otherwise find the first of the NAMES that can be run.
+# If found, set ENVVAR to the program name, die otherwise.
+#
+# FIXME: code duplication, see also gnu-web-doc-update.
+find_tool ()
+{
+ find_tool_envvar=$1
+ shift
+ find_tool_names=$@
+ eval "find_tool_res=\$$find_tool_envvar"
+ if test x"$find_tool_res" = x; then
+ for i; do
+ if check_exists $i; then
+ find_tool_res=$i
+ break
+ fi
+ done
+ fi
+ if test x"$find_tool_res" = x; then
+ warn_ "one of these is required: $find_tool_names;"
+ die "alternatively set $find_tool_envvar to a compatible tool"
+ fi
+ eval "$find_tool_envvar=\$find_tool_res"
+ eval "export $find_tool_envvar"
+}
+
+# --------------------- Preparing GNULIB_SRCDIR for use. ---------------------
+# This is part of autopull.sh, but bootstrap needs it too, for self-upgrading.
+
+cleanup_gnulib() {
+ status=$?
+ # XXX It's a bad idea to erase the submodule directory if it contains local
+ # modifications.
+ rm -fr "$gnulib_path"
+ exit $status
+}
+
+git_modules_config () {
+ test -f .gitmodules && git config --file .gitmodules "$@"
+}
+
+prepare_GNULIB_SRCDIR ()
+{
+ if test -n "$GNULIB_SRCDIR"; then
+ # Use GNULIB_SRCDIR directly.
+ # We already checked that $GNULIB_SRCDIR references a directory.
+ # Verify that it contains a gnulib checkout.
+ test -f "$GNULIB_SRCDIR/gnulib-tool" \
+ || die "Error: --gnulib-srcdir or \$GNULIB_SRCDIR is specified," \
+ "but does not contain gnulib-tool"
+ elif $use_git; then
+ gnulib_path=$(git_modules_config submodule.gnulib.path)
+ test -z "$gnulib_path" && gnulib_path=gnulib
+
+ # Get gnulib files. Populate $gnulib_path, possibly updating a
+ # submodule, for use in the rest of the script.
+
+ if test -n "$GNULIB_REFDIR" && test -d "$GNULIB_REFDIR"/.git \
+ && git_modules_config submodule.gnulib.url >/dev/null; then
+ # Use GNULIB_REFDIR as a reference.
+ echo "$0: getting gnulib files..."
+ if git submodule -h|grep -- --reference > /dev/null; then
+ # Prefer the one-liner available in git 1.6.4 or newer.
+ git submodule update --init --reference "$GNULIB_REFDIR" \
+ "$gnulib_path" || exit $?
+ else
+ # This fallback allows at least git 1.5.5.
+ if test -f "$gnulib_path"/gnulib-tool; then
+ # Since file already exists, assume submodule init already complete.
+ git submodule update -- "$gnulib_path" || exit $?
+ else
+ # Older git can't clone into an empty directory.
+ rmdir "$gnulib_path" 2>/dev/null
+ git clone --reference "$GNULIB_REFDIR" \
+ "$(git_modules_config submodule.gnulib.url)" "$gnulib_path" \
+ && git submodule init -- "$gnulib_path" \
+ && git submodule update -- "$gnulib_path" \
+ || exit $?
+ fi
+ fi
+ else
+ # GNULIB_REFDIR is not set or not usable. Ignore it.
+ if git_modules_config submodule.gnulib.url >/dev/null; then
+ echo "$0: getting gnulib files..."
+ git submodule init -- "$gnulib_path" || exit $?
+ git submodule update -- "$gnulib_path" || exit $?
+
+ elif [ ! -d "$gnulib_path" ]; then
+ echo "$0: getting gnulib files..."
+
+ trap cleanup_gnulib HUP INT PIPE TERM
+
+ shallow=
+ if test -z "$GNULIB_REVISION"; then
+ if git clone -h 2>&1 | grep -- --depth > /dev/null; then
+ shallow='--depth 2'
+ fi
+ git clone $shallow ${GNULIB_URL:-$default_gnulib_url} "$gnulib_path" \
+ || cleanup_gnulib
+ else
+ if git fetch -h 2>&1 | grep -- --depth > /dev/null; then
+ shallow='--depth 2'
+ fi
+ mkdir -p "$gnulib_path"
+ # Only want a shallow checkout of $GNULIB_REVISION, but git does not
+ # support cloning by commit hash. So attempt a shallow fetch by commit
+ # hash to minimize the amount of data downloaded and changes needed to
+ # be processed, which can drastically reduce download and processing
+ # time for checkout. If the fetch by commit fails, a shallow fetch can
+ # not be performed because we do not know what the depth of the commit
+ # is without fetching all commits. So fall back to fetching all
+ # commits.
+ git -C "$gnulib_path" init
+ git -C "$gnulib_path" remote add origin \
+ ${GNULIB_URL:-$default_gnulib_url}
+ git -C "$gnulib_path" fetch $shallow origin "$GNULIB_REVISION" \
+ || git -C "$gnulib_path" fetch origin \
+ || cleanup_gnulib
+ git -C "$gnulib_path" reset --hard FETCH_HEAD
+ fi
+
+ trap - HUP INT PIPE TERM
+ fi
+ fi
+ GNULIB_SRCDIR=$gnulib_path
+ # Verify that the submodule contains a gnulib checkout.
+ test -f "$gnulib_path/gnulib-tool" \
+ || die "Error: $gnulib_path is supposed to contain a gnulib checkout," \
+ "but does not contain gnulib-tool"
+ fi
+
+ # XXX Should this be done if $use_git is false?
+ if test -d "$GNULIB_SRCDIR"/.git && test -n "$GNULIB_REVISION" \
+ && ! git_modules_config submodule.gnulib.url >/dev/null; then
+ (cd "$GNULIB_SRCDIR" && git checkout "$GNULIB_REVISION") || cleanup_gnulib
+ fi
+
+ # $GNULIB_SRCDIR now points to the version of gnulib to use, and
+ # we no longer need to use git or $gnulib_path below here.
+}
+
+# -------- Upgrading bootstrap to the version found in GNULIB_SRCDIR. --------
+
+upgrade_bootstrap ()
+{
+ if test -f "$medir"/bootstrap-funclib.sh; then
+ update_lib=true
+ { cmp -s "$medir"/bootstrap "$GNULIB_SRCDIR/top/bootstrap" \
+ && cmp -s "$medir"/bootstrap-funclib.sh \
+ "$GNULIB_SRCDIR/top/bootstrap-funclib.sh" \
+ && cmp -s "$medir"/autopull.sh "$GNULIB_SRCDIR/top/autopull.sh" \
+ && cmp -s "$medir"/autogen.sh "$GNULIB_SRCDIR/top/autogen.sh"; \
+ }
+ else
+ update_lib=false
+ cmp -s "$medir"/bootstrap "$GNULIB_SRCDIR/build-aux/bootstrap"
+ fi || {
+ if $update_lib; then
+ echo "$0: updating bootstrap & companions and restarting..."
+ else
+ echo "$0: updating bootstrap and restarting..."
+ fi
+ case $(sh -c 'echo "$1"' -- a) in
+ a) ignored=--;;
+ *) ignored=ignored;;
+ esac
+ u=$update_lib
+ exec sh -c \
+ '{ if '$u' && test -f "$1"; then cp "$1" "$3"; else cp "$2" "$3"; fi; } &&
+ { if '$u' && test -f "$4"; then cp "$4" "$5"; else rm -f "$5"; fi; } &&
+ { if '$u' && test -f "$6"; then cp "$6" "$7"; else rm -f "$7"; fi; } &&
+ { if '$u' && test -f "$8"; then cp "$8" "$9"; else rm -f "$9"; fi; } &&
+ shift && shift && shift && shift && shift &&
+ shift && shift && shift && shift &&
+ exec "${CONFIG_SHELL-/bin/sh}" "$@"' \
+ $ignored \
+ "$GNULIB_SRCDIR/top/bootstrap" "$GNULIB_SRCDIR/build-aux/bootstrap" \
+ "$medir/bootstrap" \
+ "$GNULIB_SRCDIR/top/bootstrap-funclib.sh" "$medir/bootstrap-funclib.sh" \
+ "$GNULIB_SRCDIR/top/autopull.sh" "$medir/autopull.sh" \
+ "$GNULIB_SRCDIR/top/autogen.sh" "$medir/autogen.sh" \
+ "$0" "$@" --no-bootstrap-sync
+ }
+}
+
+# ----------------------------------------------------------------------------
+
+if test x"$gnulib_modules$gnulib_files$gnulib_extra_files" = x; then
+ use_gnulib=false
+else
+ use_gnulib=true
+fi
+
+# -------- Fetch auxiliary files from the network. --------------------------
+
+autopull_usage() {
+ cat <<EOF
+Usage: $me [OPTION]...
+Bootstrap this package from the checked-out sources.
+
+Optional environment variables:
+ GNULIB_SRCDIR Specifies the local directory where gnulib
+ sources reside. Use this if you already
+ have gnulib sources on your machine, and
+ you want to use these sources.
+ GNULIB_REFDIR Specifies the local directory where a gnulib
+ repository (with a .git subdirectory) resides.
+ Use this if you already have gnulib sources
+ and history on your machine, and do not want
+ to waste your bandwidth downloading them again.
+ GNULIB_URL URL of the gnulib repository. The default is
+ $default_gnulib_url,
+ which is Gnulib's upstream repository.
+
+Options:
+ --bootstrap-sync if this bootstrap script is not identical to
+ the version in the local gnulib sources,
+ update this script, and then restart it with
+ /bin/sh or the shell \$CONFIG_SHELL
+ --no-bootstrap-sync do not check whether bootstrap is out of sync
+ --force attempt to bootstrap even if the sources seem
+ not to have been checked out
+ --no-git do not use git to update gnulib. Requires that
+ \$GNULIB_SRCDIR or the --gnulib-srcdir option
+ points to a gnulib repository with the correct
+ revision
+ --skip-po do not download po files
+EOF
+ bootstrap_print_option_usage_hook
+ cat <<EOF
+If the file bootstrap.conf exists in the same directory as this script, its
+contents are read as shell variables to configure the bootstrap.
+
+For build prerequisites, environment variables like \$AUTOCONF and \$AMTAR
+are honored.
+
+Gnulib sources can be fetched in various ways:
+
+ * If the environment variable GNULIB_SRCDIR is set (either as an
+ environment variable or via the --gnulib-srcdir option), then sources
+ are fetched from that local directory. If it is a git repository and
+ the configuration variable GNULIB_REVISION is set in bootstrap.conf,
+ then that revision is checked out.
+
+ * Otherwise, if this package is in a git repository with a 'gnulib'
+ submodule configured, then that submodule is initialized and updated
+ and sources are fetched from there. If GNULIB_REFDIR is set (either
+ as an environment variable or via the --gnulib-refdir option) and is
+ a git repository, then it is used as a reference.
+
+ * Otherwise, if the 'gnulib' directory does not exist, Gnulib sources
+ are cloned into that directory using git from \$GNULIB_URL, defaulting
+ to $default_gnulib_url.
+ If the configuration variable GNULIB_REVISION is set in bootstrap.conf,
+ then that revision is checked out.
+
+ * Otherwise, the existing Gnulib sources in the 'gnulib' directory are
+ used. If it is a git repository and the configuration variable
+ GNULIB_REVISION is set in bootstrap.conf, then that revision is
+ checked out.
+
+If you maintain a package and want to pin a particular revision of the
+Gnulib sources that has been tested with your package, then there are
+two possible approaches: either configure a 'gnulib' submodule with the
+appropriate revision, or set GNULIB_REVISION (and if necessary
+GNULIB_URL) in bootstrap.conf.
+
+Running without arguments will suffice in most cases.
+EOF
+}
+
+# Fetch auxiliary files that are omitted from the version control
+# repository of this package.
+autopull()
+{
+ # Ensure that CDPATH is not set. Otherwise, the output from cd
+ # would cause trouble in at least one use below.
+ (unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+ # Parse options.
+
+ # Use git to update gnulib sources
+ use_git=true
+
+ for option
+ do
+ case $option in
+ --help)
+ autopull_usage
+ return;;
+ --version)
+ set -e
+ echo "autopull.sh $scriptlibversion"
+ echo "$copyright"
+ return 0
+ ;;
+ --skip-po)
+ SKIP_PO=t;;
+ --force)
+ checkout_only_file=;;
+ --bootstrap-sync)
+ bootstrap_sync=true;;
+ --no-bootstrap-sync)
+ bootstrap_sync=false;;
+ --no-git)
+ use_git=false;;
+ *)
+ bootstrap_option_hook $option || die "$option: unknown option";;
+ esac
+ done
+
+ $use_git || test -n "$GNULIB_SRCDIR" \
+ || die "Error: --no-git requires \$GNULIB_SRCDIR environment variable" \
+ "or --gnulib-srcdir option"
+ test -z "$GNULIB_SRCDIR" || test -d "$GNULIB_SRCDIR" \
+ || die "Error: \$GNULIB_SRCDIR environment variable" \
+ "or --gnulib-srcdir option is specified," \
+ "but does not denote a directory"
+
+ if test -n "$checkout_only_file" && test ! -r "$checkout_only_file"; then
+ die "Running this script from a non-checked-out distribution is risky."
+ fi
+
+ check_build_prerequisites $use_git
+
+ if $use_gnulib || $bootstrap_sync; then
+ prepare_GNULIB_SRCDIR
+ if $bootstrap_sync; then
+ upgrade_bootstrap
+ fi
+ fi
+
+ # Find sha1sum, named gsha1sum on MacPorts, shasum on Mac OS X 10.6.
+ # Also find the compatible sha1 utility on the BSDs
+ if test x"$SKIP_PO" = x; then
+ find_tool SHA1SUM sha1sum gsha1sum shasum sha1
+ fi
+
+ # See if we can use gnulib's git-merge-changelog merge driver.
+ if $use_git && test -d .git && check_exists git; then
+ if git config merge.merge-changelog.driver >/dev/null ; then
+ :
+ elif check_exists git-merge-changelog; then
+ echo "$0: initializing git-merge-changelog driver"
+ git config merge.merge-changelog.name 'GNU-style ChangeLog merge driver'
+ git config merge.merge-changelog.driver 'git-merge-changelog %O %A %B'
+ else
+ echo "$0: consider installing git-merge-changelog from gnulib"
+ fi
+ fi
+
+ case $SKIP_PO in
+ '')
+ if test -d po; then
+ update_po_files po $package || return
+ fi
+
+ if test -d runtime-po; then
+ update_po_files runtime-po $package-runtime || return
+ fi;;
+ esac
+
+ # ---------------------------------------------------------------------------
+
+ bootstrap_post_pull_hook \
+ || die "bootstrap_post_pull_hook failed"
+
+ # Don't proceed if there are uninitialized submodules. In particular,
+ # autogen.sh will remove dangling links, which might be links into
+ # uninitialized submodules.
+ # But it's OK if the 'gnulib' submodule is uninitialized, as long as
+ # GNULIB_SRCDIR is set.
+ if $use_git; then
+ # Uninitialized submodules are listed with an initial dash.
+ uninitialized=`git submodule | grep '^-' | awk '{ print $2 }'`
+ if test -n "$GNULIB_SRCDIR"; then
+ uninitialized=`echo "$uninitialized" | grep -v '^gnulib$'`
+ fi
+ if test -n "$uninitialized"; then
+ uninit_comma=`echo "$uninitialized" | tr '\n' ',' | sed -e 's|,$|.|'`
+ die "Some git submodules are not initialized: "$uninit_comma \
+ "Either use option '--no-git'," \
+ "or run 'git submodule update --init' and bootstrap again."
+ fi
+ fi
+
+ if test -f "$medir"/autogen.sh; then
+ echo "$0: done. Now you can run '$medir/autogen.sh'."
+ fi
+}
+
+# ----------------------------- Get translations. -----------------------------
+
+download_po_files() {
+ subdir=$1
+ domain=$2
+ echo "$me: getting translations into $subdir for $domain..."
+ cmd=$(printf "$po_download_command_format" "$subdir" "$domain")
+ eval "$cmd"
+}
+
+# Mirror .po files to $po_dir/.reference and copy only the new
+# or modified ones into $po_dir. Also update $po_dir/LINGUAS.
+# Note po files that exist locally only are left in $po_dir but will
+# not be included in LINGUAS and hence will not be distributed.
+update_po_files() {
+ # Directory containing primary .po files.
+ # Overwrite them only when we're sure a .po file is new.
+ po_dir=$1
+ domain=$2
+
+ # Mirror *.po files into this dir.
+ # Usually contains *.s1 checksum files.
+ ref_po_dir="$po_dir/.reference"
+
+ test -d $ref_po_dir || mkdir $ref_po_dir || return
+ download_po_files $ref_po_dir $domain \
+ && ls "$ref_po_dir"/*.po 2>/dev/null |
+ sed 's|.*/||; s|\.po$||' > "$po_dir/LINGUAS" || return
+
+ langs=$(cd $ref_po_dir && echo *.po | sed 's/\.po//g')
+ test "$langs" = '*' && langs=x
+ for po in $langs; do
+ case $po in x) continue;; esac
+ new_po="$ref_po_dir/$po.po"
+ cksum_file="$ref_po_dir/$po.s1"
+ if ! test -f "$cksum_file" ||
+ ! test -f "$po_dir/$po.po" ||
+ ! $SHA1SUM -c "$cksum_file" < "$new_po" > /dev/null 2>&1; then
+ echo "$me: updated $po_dir/$po.po..."
+ cp "$new_po" "$po_dir/$po.po" \
+ && $SHA1SUM < "$new_po" > "$cksum_file" || return
+ fi
+ done
+}
+
+# -------- Generate files automatically from existing sources. --------------
+
+autogen_usage() {
+ cat <<EOF
+Usage: $me [OPTION]...
+Bootstrap this package from the checked-out sources.
+
+Optional environment variables:
+ GNULIB_SRCDIR Specifies the local directory where gnulib
+ sources reside. Use this if you already
+ have gnulib sources on your machine, and
+ you want to use these sources.
+
+Options:
+ --copy copy files instead of creating symbolic links
+ --force attempt to bootstrap even if the sources seem
+ not to have been checked out
+EOF
+ bootstrap_print_option_usage_hook
+ cat <<EOF
+If the file bootstrap.conf exists in the same directory as this script, its
+contents are read as shell variables to configure the bootstrap.
+
+For build prerequisites, environment variables like \$AUTOCONF and \$AMTAR
+are honored.
+
+Gnulib sources are assumed to be present:
+ * in \$GNULIB_SRCDIR, if that environment variable is set,
+ * otherwise, in the 'gnulib' submodule, if such a submodule is configured,
+ * otherwise, in the 'gnulib' subdirectory.
+
+Running without arguments will suffice in most cases.
+EOF
+}
+
+
+version_controlled_file() {
+ parent=$1
+ file=$2
+ if test -d .git; then
+ git rm -n "$file" > /dev/null 2>&1
+ elif test -d .svn; then
+ svn log -r HEAD "$file" > /dev/null 2>&1
+ elif test -d CVS; then
+ grep -F "/${file##*/}/" "$parent/CVS/Entries" 2>/dev/null |
+ grep '^/[^/]*/[0-9]' > /dev/null
+ else
+ warn_ "no version control for $file?"
+ false
+ fi
+}
+
+# Strip blank and comment lines to leave significant entries.
+gitignore_entries() {
+ sed '/^#/d; /^$/d' "$@"
+}
+
+# If $STR is not already on a line by itself in $FILE, insert it at the start.
+# Entries are inserted at the start of the ignore list to ensure existing
+# entries starting with ! are not overridden. Such entries support
+# whitelisting exceptions after a more generic blacklist pattern.
+insert_if_absent() {
+ file=$1
+ str=$2
+ test -f $file || touch $file
+ test -r $file || die "Error: failed to read ignore file: $file"
+ duplicate_entries=$(gitignore_entries $file | sort | uniq -d)
+ if [ "$duplicate_entries" ] ; then
+ die "Error: Duplicate entries in $file: " $duplicate_entries
+ fi
+ linesold=$(gitignore_entries $file | wc -l)
+ linesnew=$( { echo "$str"; cat $file; } | gitignore_entries | sort -u | wc -l)
+ if [ $linesold != $linesnew ] ; then
+ { echo "$str" | cat - $file > $file.bak && mv $file.bak $file; } \
+ || die "insert_if_absent $file $str: failed"
+ fi
+}
+
+# Adjust $PATTERN for $VC_IGNORE_FILE and insert it with
+# insert_if_absent.
+insert_vc_ignore() {
+ vc_ignore_file="$1"
+ pattern="$2"
+ case $vc_ignore_file in
+ *.gitignore)
+ # A .gitignore entry that does not start with '/' applies
+ # recursively to subdirectories, so prepend '/' to every
+ # .gitignore entry.
+ pattern=$(echo "$pattern" | sed s,^,/,);;
+ esac
+ insert_if_absent "$vc_ignore_file" "$pattern"
+}
+
+symlink_to_dir()
+{
+ src=$1/$2
+ dst=${3-$2}
+
+ test -f "$src" && {
+
+ # If the destination directory doesn't exist, create it.
+ # This is required at least for "lib/uniwidth/cjk.h".
+ dst_dir=$(dirname "$dst")
+ if ! test -d "$dst_dir"; then
+ mkdir -p "$dst_dir"
+
+ # If we've just created a directory like lib/uniwidth,
+ # tell version control system(s) it's ignorable.
+ # FIXME: for now, this does only one level
+ parent=$(dirname "$dst_dir")
+ for dot_ig in x $vc_ignore; do
+ test $dot_ig = x && continue
+ ig=$parent/$dot_ig
+ insert_vc_ignore $ig "${dst_dir##*/}/"
+ done
+ fi
+
+ if $copy; then
+ {
+ test ! -h "$dst" || {
+ echo "$me: rm -f $dst" &&
+ rm -f "$dst"
+ }
+ } &&
+ test -f "$dst" &&
+ cmp -s "$src" "$dst" || {
+ echo "$me: cp -fp $src $dst" &&
+ cp -fp "$src" "$dst"
+ }
+ else
+ # Leave any existing symlink alone, if it already points to the source,
+ # so that broken build tools that care about symlink times
+ # aren't confused into doing unnecessary builds. Conversely, if the
+ # existing symlink's timestamp is older than the source, make it afresh,
+ # so that broken tools aren't confused into skipping needed builds. See
+ # <https://lists.gnu.org/r/bug-gnulib/2011-05/msg00326.html>.
+ test -h "$dst" &&
+ src_ls=$(ls -diL "$src" 2>/dev/null) && set $src_ls && src_i=$1 &&
+ dst_ls=$(ls -diL "$dst" 2>/dev/null) && set $dst_ls && dst_i=$1 &&
+ test "$src_i" = "$dst_i" &&
+ both_ls=$(ls -dt "$src" "$dst") &&
+ test "X$both_ls" = "X$dst$nl$src" || {
+ dot_dots=
+ case $src in
+ /*) ;;
+ *)
+ case /$dst/ in
+ *//* | */../* | */./* | /*/*/*/*/*/)
+ die "invalid symlink calculation: $src -> $dst";;
+ /*/*/*/*/) dot_dots=../../../;;
+ /*/*/*/) dot_dots=../../;;
+ /*/*/) dot_dots=../;;
+ esac;;
+ esac
+
+ echo "$me: ln -fs $dot_dots$src $dst" &&
+ ln -fs "$dot_dots$src" "$dst"
+ }
+ fi
+ }
+}
+
+# Regenerate all autogeneratable files that are omitted from the
+# version control repository. In particular, regenerate all
+# aclocal.m4, config.h.in, Makefile.in, configure files with new
+# versions of autoconf or automake.
+autogen()
+{
+ # Ensure that CDPATH is not set. Otherwise, the output from cd
+ # would cause trouble in at least one use below.
+ (unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+ # Environment variables that may be set by the user.
+ : "${AUTOPOINT=autopoint}"
+ : "${AUTORECONF=autoreconf}"
+
+ if test "$vc_ignore" = auto; then
+ vc_ignore=
+ test -d .git && vc_ignore=.gitignore
+ test -d CVS && vc_ignore="$vc_ignore .cvsignore"
+ fi
+
+
+ # Parse options.
+
+ # Whether to use copies instead of symlinks.
+ copy=false
+
+ for option
+ do
+ case $option in
+ --help)
+ autogen_usage
+ return;;
+ --version)
+ set -e
+ echo "autogen.sh $scriptlibversion"
+ echo "$copyright"
+ return 0
+ ;;
+ --force)
+ checkout_only_file=;;
+ --copy)
+ copy=true;;
+ *)
+ bootstrap_option_hook $option || die "$option: unknown option";;
+ esac
+ done
+
+ test -z "$GNULIB_SRCDIR" || test -d "$GNULIB_SRCDIR" \
+ || die "Error: \$GNULIB_SRCDIR environment variable or --gnulib-srcdir" \
+ "option is specified, but does not denote a directory"
+
+ if test -n "$checkout_only_file" && test ! -r "$checkout_only_file"; then
+ die "Running this script from a non-checked-out distribution is risky."
+ fi
+
+ if $use_gnulib; then
+ if test -z "$GNULIB_SRCDIR"; then
+ gnulib_path=$(test -f .gitmodules &&
+ git config --file .gitmodules submodule.gnulib.path)
+ test -z "$gnulib_path" && gnulib_path=gnulib
+ GNULIB_SRCDIR=$gnulib_path
+ fi
+ fi
+
+ # Die if there is no AC_CONFIG_AUX_DIR($build_aux) line in configure.ac.
+ found_aux_dir=no
+ grep '^[ ]*AC_CONFIG_AUX_DIR(\['"$build_aux"'])' configure.ac \
+ >/dev/null && found_aux_dir=yes
+ grep '^[ ]*AC_CONFIG_AUX_DIR('"$build_aux"')' configure.ac \
+ >/dev/null && found_aux_dir=yes
+ test $found_aux_dir = yes \
+ || die "configure.ac lacks 'AC_CONFIG_AUX_DIR([$build_aux])'; add it"
+
+ # If $build_aux doesn't exist, create it now, otherwise some bits
+ # below will malfunction. If creating it, also mark it as ignored.
+ if test ! -d $build_aux; then
+ mkdir $build_aux
+ for dot_ig in x $vc_ignore; do
+ test $dot_ig = x && continue
+ insert_vc_ignore $dot_ig $build_aux/
+ done
+ fi
+
+ check_build_prerequisites false
+
+ use_libtool=0
+ # We'd like to use grep -E, to see if any of LT_INIT,
+ # AC_PROG_LIBTOOL, AM_PROG_LIBTOOL is used in configure.ac,
+ # but that's not portable enough (e.g., for Solaris).
+ grep '^[ ]*A[CM]_PROG_LIBTOOL' configure.ac >/dev/null \
+ && use_libtool=1
+ grep '^[ ]*LT_INIT' configure.ac >/dev/null \
+ && use_libtool=1
+ if test $use_libtool = 1; then
+ find_tool LIBTOOLIZE glibtoolize libtoolize
+ fi
+
+ if $use_gnulib; then
+ gnulib_tool=$GNULIB_SRCDIR/gnulib-tool
+ <$gnulib_tool || return
+ fi
+
+ # NOTE: we have to be careful to run both autopoint and libtoolize
+ # before gnulib-tool, since gnulib-tool is likely to provide newer
+ # versions of files "installed" by these two programs.
+ # Then, *after* gnulib-tool (see below), we have to be careful to
+ # run autoreconf in such a way that it does not run either of these
+ # two just-pre-run programs.
+
+ # Import from gettext.
+ with_gettext=yes
+ grep '^[ ]*AM_GNU_GETTEXT_VERSION(' configure.ac >/dev/null || \
+ with_gettext=no
+
+ if test $with_gettext = yes || test $use_libtool = 1; then
+
+ tempbase=.bootstrap$$
+ trap "rm -f $tempbase.0 $tempbase.1" HUP INT PIPE TERM
+
+ > $tempbase.0 > $tempbase.1 &&
+ find . ! -type d -print | sort > $tempbase.0 || return
+
+ if test $with_gettext = yes; then
+ # Released autopoint has the tendency to install macros that have been
+ # obsoleted in current gnulib, so run this before gnulib-tool.
+ echo "$0: $AUTOPOINT --force"
+ $AUTOPOINT --force || return
+ fi
+
+ # Autoreconf runs aclocal before libtoolize, which causes spurious
+ # warnings if the initial aclocal is confused by the libtoolized
+ # (or worse out-of-date) macro directory.
+ # libtoolize 1.9b added the --install option; but we support back
+ # to libtoolize 1.5.22, where the install action was default.
+ if test $use_libtool = 1; then
+ install=
+ case $($LIBTOOLIZE --help) in
+ *--install*) install=--install ;;
+ esac
+ echo "running: $LIBTOOLIZE $install --copy"
+ $LIBTOOLIZE $install --copy
+ fi
+
+ find . ! -type d -print | sort >$tempbase.1
+ old_IFS=$IFS
+ IFS=$nl
+ for file in $(comm -13 $tempbase.0 $tempbase.1); do
+ IFS=$old_IFS
+ parent=${file%/*}
+ version_controlled_file "$parent" "$file" || {
+ for dot_ig in x $vc_ignore; do
+ test $dot_ig = x && continue
+ ig=$parent/$dot_ig
+ insert_vc_ignore "$ig" "${file##*/}"
+ done
+ }
+ done
+ IFS=$old_IFS
+
+ rm -f $tempbase.0 $tempbase.1
+ trap - HUP INT PIPE TERM
+ fi
+
+ # Import from gnulib.
+
+ if $use_gnulib; then
+ gnulib_tool_options="\
+ --no-changelog\
+ --aux-dir=$build_aux\
+ --doc-base=$doc_base\
+ --lib=$gnulib_name\
+ --m4-base=$m4_base/\
+ --source-base=$source_base/\
+ --tests-base=$tests_base\
+ --local-dir=$local_gl_dir\
+ $gnulib_tool_option_extras\
+ "
+ if test $use_libtool = 1; then
+ case "$gnulib_tool_options " in
+ *' --libtool '*) ;;
+ *) gnulib_tool_options="$gnulib_tool_options --libtool" ;;
+ esac
+ fi
+ echo "$0: $gnulib_tool $gnulib_tool_options --import ..."
+ $gnulib_tool $gnulib_tool_options --import $gnulib_modules \
+ || die "gnulib-tool failed"
+
+ for file in $gnulib_files; do
+ symlink_to_dir "$GNULIB_SRCDIR" $file \
+ || die "failed to symlink $file"
+ done
+ fi
+
+ bootstrap_post_import_hook \
+ || die "bootstrap_post_import_hook failed"
+
+ # Remove any dangling symlink matching "*.m4" or "*.[ch]" in some
+ # gnulib-populated directories. Such .m4 files would cause aclocal to fail.
+ # The following requires GNU find 4.2.3 or newer. Considering the usual
+ # portability constraints of this script, that may seem a very demanding
+ # requirement, but it should be ok. Ignore any failure, which is fine,
+ # since this is only a convenience to help developers avoid the relatively
+ # unusual case in which a symlinked-to .m4 file is git-removed from gnulib
+ # between successive runs of this script.
+ find "$m4_base" "$source_base" \
+ -depth \( -name '*.m4' -o -name '*.[ch]' \) \
+ -type l -xtype l -delete > /dev/null 2>&1
+
+ # Invoke autoreconf with --force --install to ensure upgrades of tools
+ # such as ylwrap.
+ AUTORECONFFLAGS="--verbose --install --force -I $m4_base $ACLOCAL_FLAGS"
+ AUTORECONFFLAGS="$AUTORECONFFLAGS --no-recursive"
+
+ # Tell autoreconf not to invoke autopoint or libtoolize; they were run above.
+ echo "running: AUTOPOINT=true LIBTOOLIZE=true $AUTORECONF $AUTORECONFFLAGS"
+ AUTOPOINT=true LIBTOOLIZE=true $AUTORECONF $AUTORECONFFLAGS \
+ || die "autoreconf failed"
+
+ # Get some extra files from gnulib, overriding existing files.
+ for file in $gnulib_extra_files; do
+ case $file in
+ */INSTALL) dst=INSTALL;;
+ build-aux/*) dst=$build_aux/${file#build-aux/};;
+ *) dst=$file;;
+ esac
+ symlink_to_dir "$GNULIB_SRCDIR" $file $dst \
+ || die "failed to symlink $file"
+ done
+
+ if test $with_gettext = yes; then
+ # Create gettext configuration.
+ echo "$0: Creating po/Makevars from po/Makevars.template ..."
+ rm -f po/Makevars
+ sed '
+ /^EXTRA_LOCALE_CATEGORIES *=/s/=.*/= '"$EXTRA_LOCALE_CATEGORIES"'/
+ /^COPYRIGHT_HOLDER *=/s/=.*/= '"$COPYRIGHT_HOLDER"'/
+ /^MSGID_BUGS_ADDRESS *=/s|=.*|= '"$MSGID_BUGS_ADDRESS"'|
+ /^XGETTEXT_OPTIONS *=/{
+ s/$/ \\/
+ a\
+ '"$XGETTEXT_OPTIONS"' $${end_of_xgettext_options+}
+ }
+ ' po/Makevars.template >po/Makevars \
+ || die 'cannot generate po/Makevars'
+
+ # If the 'gettext' module is in use, grab the latest Makefile.in.in.
+ # If only the 'gettext-h' module is in use, assume autopoint already
+ # put the correct version of this file into place.
+ case $gnulib_modules in
+ *gettext-h*) ;;
+ *gettext*)
+ cp $GNULIB_SRCDIR/build-aux/po/Makefile.in.in po/Makefile.in.in \
+ || die "cannot create po/Makefile.in.in"
+ ;;
+ esac
+
+ if test -d runtime-po; then
+ # Similarly for runtime-po/Makevars, but not quite the same.
+ rm -f runtime-po/Makevars
+ sed '
+ /^DOMAIN *=.*/s/=.*/= '"$package"'-runtime/
+ /^subdir *=.*/s/=.*/= runtime-po/
+ /^MSGID_BUGS_ADDRESS *=/s/=.*/= bug-'"$package"'@gnu.org/
+ /^XGETTEXT_OPTIONS *=/{
+ s/$/ \\/
+ a\
+ '"$XGETTEXT_OPTIONS_RUNTIME"' $${end_of_xgettext_options+}
+ }
+ ' po/Makevars.template >runtime-po/Makevars \
+ || die 'cannot generate runtime-po/Makevars'
+
+ # Copy identical files from po to runtime-po.
+ (cd po && cp -p Makefile.in.in *-quot *.header *.sed *.sin ../runtime-po)
+ fi
+ fi
+
+ bootstrap_epilogue
+
+ echo "$0: done. Now you can run './configure'."
+}
+
+# ----------------------------------------------------------------------------
+
+# Local Variables:
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-start: "scriptlibversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC0"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/bootstrap.conf b/bootstrap.conf
new file mode 100644
index 0000000..e9009c5
--- /dev/null
+++ b/bootstrap.conf
@@ -0,0 +1,74 @@
+# Bootstrap configuration. -*- sh -*-
+
+# Copyright (C) 2006-2023 Free Software Foundation, Inc.
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+
+# gnulib modules used by this package.
+gnulib_modules="
+stdbool
+git-version-gen
+vc-list-files
+do-release-commit-and-tag
+announce-gen
+"
+
+# Additional xgettext options to use. Use "\\\newline" to break lines.
+XGETTEXT_OPTIONS=$XGETTEXT_OPTIONS'\\\
+ --from-code=UTF-8\\\
+ --flag=asprintf:2:c-format --flag=vasprintf:2:c-format\\\
+ --flag=asnprintf:3:c-format --flag=vasnprintf:3:c-format\\\
+ --flag=wrapf:1:c-format\\\
+'
+
+# If "AM_GNU_GETTEXT(external" or "AM_GNU_GETTEXT([external]"
+# appears in configure.ac, exclude some unnecessary files.
+# Without grep's -E option (not portable enough, pre-configure),
+# the following test is ugly. Also, this depends on the existence
+# of configure.ac, not the obsolescent-named configure.in. But if
+# you're using this infrastructure, you should care about such things.
+
+gettext_external=0
+grep '^[ ]*AM_GNU_GETTEXT(external\>' configure.ac > /dev/null &&
+ gettext_external=1
+grep '^[ ]*AM_GNU_GETTEXT(\[external]' configure.ac > /dev/null &&
+ gettext_external=1
+
+if test $gettext_external = 1; then
+ # Gettext supplies these files, but we don't need them since
+ # we don't have an intl subdirectory.
+ excluded_files='
+ m4/glibc2.m4
+ m4/intdiv0.m4
+ m4/lcmessage.m4
+ m4/lock.m4
+ m4/printf-posix.m4
+ m4/size_max.m4
+ m4/uintmax_t.m4
+ m4/ulonglong.m4
+ m4/visibility.m4
+ m4/xsize.m4
+ '
+fi
+
+# Build prerequisites
+buildreq="\
+autoconf 2.59
+automake 1.9.6
+git 1.5.5
+tar -
+"
+
+checkout_only_file=.git/HEAD
diff --git a/cfg.mk b/cfg.mk
new file mode 100644
index 0000000..48d42d7
--- /dev/null
+++ b/cfg.mk
@@ -0,0 +1,12 @@
+old_NEWS_hash = d41d8cd98f00b204e9800998ecf8427e
+
+list_authors = $(GIT) log --pretty=format:%an
+
+sc_git_authors_known:
+ @$(list_authors) \
+ | sort -u \
+ | while read line ; \
+ do $(GREP) "$$line" $(srcdir)/AUTHORS >/dev/null 2>&1 \
+ || (echo "$$line does not appear in AUTHORS." ; \
+ exit 1) ; \
+ done
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..224342c
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,37 @@
+# -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+AC_PREREQ([2.71])
+AC_INIT([neoas], m4_esyscmd([sh build-aux/git-version-gen .tarball-version]), [vivien@planete-kraus.eu])
+AC_CONFIG_SRCDIR([src/json-lexer.c])
+AC_CONFIG_HEADERS([config.h])
+AC_CONFIG_AUX_DIR([build-aux])
+
+# Checks for programs.
+AC_PROG_CC
+gl_EARLY
+AM_INIT_AUTOMAKE([subdir-objects])
+
+gl_INIT
+
+# Checks for libraries.
+AC_SEARCH_LIBS([pow], [m])
+
+# Checks for header files.
+AC_CHECK_HEADERS([math.h])
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_INLINE
+AC_TYPE_UINT8_T
+AC_C_RESTRICT
+AC_TYPE_SIZE_T
+AC_TYPE_SSIZE_T
+
+# Checks for library functions.
+AC_FUNC_MALLOC
+AC_FUNC_REALLOC
+AC_CHECK_FUNCS([calloc pow])
+
+AC_CONFIG_FILES([Makefile
+ lib/Makefile])
+AC_OUTPUT
diff --git a/gen-bootstrap.sed b/gen-bootstrap.sed
new file mode 100644
index 0000000..434e614
--- /dev/null
+++ b/gen-bootstrap.sed
@@ -0,0 +1,10 @@
+1{
+a\
+# DO NOT EDIT! GENERATED AUTOMATICALLY!
+a\
+
+}
+/^[.] "[$]medir"[/]bootstrap-funclib.sh/{
+ s/^.*$//
+ r top/bootstrap-funclib.sh
+}
diff --git a/gitsub.sh b/gitsub.sh
new file mode 100755
index 0000000..32f2c64
--- /dev/null
+++ b/gitsub.sh
@@ -0,0 +1,509 @@
+#! /bin/sh
+#
+# Copyright (C) 2019-2023 Free Software Foundation, Inc.
+# Written by Bruno Haible <bruno@clisp.org>, 2019.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+# Program that manages the subdirectories of a git checkout of a package
+# that come from other packages (called "dependency packages").
+#
+# This program is similar in spirit to 'git submodule', with three
+# essential differences:
+#
+# 1) Its options are easy to remember, and do not require knowledge of
+# 'git submodule'.
+#
+# 2) The developer may choose to work on a different checkout for each
+# dependency package. This is important when the developer is
+# preparing simultaneous changes to the package and the dependency
+# package, or is using the dependency package in several packages.
+#
+# The developer indicates this different checkout by setting the
+# environment variable <SUBDIR>_SRCDIR (e.g. GNULIB_SRCDIR) to point to it.
+#
+# 3) The package maintainer may choose to use or not use git submodules.
+#
+# The advantages of management through a git submodule are:
+# - Changes to the dependency package cannot suddenly break your package.
+# In other words, when there is an incompatible change that will cause
+# a breakage, you can fix things at your pace; you are not forced to
+# cope with such breakages in an emergency.
+# - When you need to make a change as a response to a change in the
+# dependency package, your co-developers cannot accidentally mix things
+# up (for example, use a combination of your newest change with an
+# older version of the dependency package).
+#
+# The advantages of management without a git submodule (just as a plain
+# subdirectory, let's call it a "subcheckout") are:
+# - The simplicity: you are conceptually always using the newest revision
+# of the dependency package.
+# - You don't have to remember to periodically upgrade the dependency.
+# Upgrading the dependency is an implicit operation.
+
+# This program is meant to be copied to the top-level directory of the package,
+# together with a configuration file. The configuration is supposed to be
+# named '.gitmodules' and to define:
+# * The git submodules, as described in "man 5 gitmodules" or
+# <https://git-scm.com/docs/gitmodules>. For example:
+#
+# [submodule "gnulib"]
+# url = https://git.savannah.gnu.org/git/gnulib.git
+# path = gnulib
+#
+# You don't add this piece of configuration to .gitmodules manually. Instead,
+# you would invoke
+# $ git submodule add --name "gnulib" -- https://git.savannah.gnu.org/git/gnulib.git gnulib
+#
+# * The subdirectories that are not git submodules, in a similar syntax. For
+# example:
+#
+# [subcheckout "gnulib"]
+# url = https://git.savannah.gnu.org/git/gnulib.git
+# path = gnulib
+#
+# Here the URL is the one used for anonymous checkouts of the dependency
+# package. If the developer needs a checkout with write access, they can
+# either set the GNULIB_SRCDIR environment variable to point to that checkout
+# or modify the gnulib/.git/config file to enter a different URL.
+
+scriptname="$0"
+scriptversion='2019-04-01'
+nl='
+'
+IFS=" "" $nl"
+
+# func_usage
+# outputs to stdout the --help usage message.
+func_usage ()
+{
+ echo "\
+Usage: gitsub.sh pull [SUBDIR]
+ gitsub.sh upgrade [SUBDIR]
+ gitsub.sh checkout SUBDIR REVISION
+
+Operations:
+
+gitsub.sh pull [GIT_OPTIONS] [SUBDIR]
+ You should perform this operation after 'git clone ...' and after
+ every 'git pull'.
+ It brings your checkout in sync with what the other developers of
+ your package have committed and pushed.
+ If an environment variable <SUBDIR>_SRCDIR is set, with a non-empty
+ value, nothing is done for this SUBDIR.
+ Supported GIT_OPTIONS (for expert git users) are:
+ --reference <repository>
+ --depth <depth>
+ --recursive
+ If no SUBDIR is specified, the operation applies to all dependencies.
+
+gitsub.sh upgrade [SUBDIR]
+ You should perform this operation periodically, to ensure currency
+ of the dependency package revisions that you use.
+ This operation pulls and checks out the changes that the developers
+ of the dependency package have committed and pushed.
+ If an environment variable <SUBDIR>_SRCDIR is set, with a non-empty
+ value, nothing is done for this SUBDIR.
+ If no SUBDIR is specified, the operation applies to all dependencies.
+
+gitsub.sh checkout SUBDIR REVISION
+ Checks out a specific revision for a dependency package.
+ If an environment variable <SUBDIR>_SRCDIR is set, with a non-empty
+ value, this operation fails.
+
+This script requires the git program in the PATH and an internet connection.
+"
+}
+
+# func_version
+# outputs to stdout the --version message.
+func_version ()
+{
+ year=`echo "$scriptversion" | sed -e 's/^\(....\)-.*/\1/'`
+ echo "\
+gitsub.sh (GNU gnulib) $scriptversion
+Copyright (C) 2019-$year Free Software Foundation, Inc.
+License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
+"
+ printf "Written by %s.\n" "Bruno Haible"
+}
+
+# func_fatal_error message
+# outputs to stderr a fatal error message, and terminates the program.
+# Input:
+# - scriptname name of this program
+func_fatal_error ()
+{
+ echo "$scriptname: *** $1" 1>&2
+ echo "$scriptname: *** Stop." 1>&2
+ exit 1
+}
+
+# func_warning message
+# Outputs to stderr a warning message,
+func_warning ()
+{
+ echo "gitsub.sh: warning: $1" 1>&2
+}
+
+# func_note message
+# Outputs to stdout a note message,
+func_note ()
+{
+ echo "gitsub.sh: note: $1"
+}
+
+# Unset CDPATH. Otherwise, output from 'cd dir' can surprise callers.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+# Command-line option processing.
+mode=
+while test $# -gt 0; do
+ case "$1" in
+ --help | --hel | --he | --h )
+ func_usage
+ exit $? ;;
+ --version | --versio | --versi | --vers | --ver | --ve | --v )
+ func_version
+ exit $? ;;
+ -- )
+ # Stop option processing
+ shift
+ break ;;
+ -* )
+ echo "gitsub.sh: unknown option $1" 1>&2
+ echo "Try 'gitsub.sh --help' for more information." 1>&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+if test $# = 0; then
+ echo "gitsub.sh: missing operation argument" 1>&2
+ echo "Try 'gitsub.sh --help' for more information." 1>&2
+ exit 1
+fi
+case "$1" in
+ pull | upgrade | checkout )
+ mode="$1"
+ shift ;;
+ *)
+ echo "gitsub.sh: unknown operation '$1'" 1>&2
+ echo "Try 'gitsub.sh --help' for more information." 1>&2
+ exit 1 ;;
+esac
+if { test $mode = upgrade && test $# -gt 1; } \
+ || { test $mode = checkout && test $# -gt 2; }; then
+ echo "gitsub.sh: too many arguments in '$mode' mode" 1>&2
+ echo "Try 'gitsub.sh --help' for more information." 1>&2
+ exit 1
+fi
+if test $# = 0 && test $mode = checkout; then
+ echo "gitsub.sh: too few arguments in '$mode' mode" 1>&2
+ echo "Try 'gitsub.sh --help' for more information." 1>&2
+ exit 1
+fi
+
+# Read the configuration.
+# Output:
+# - subcheckout_names space-separated list of subcheckout names
+# - submodule_names space-separated list of submodule names
+if test -f .gitmodules; then
+ subcheckout_names=`git config --file .gitmodules --get-regexp --name-only 'subcheckout\..*\.url' | sed -e 's/^subcheckout\.//' -e 's/\.url$//' | tr -d '\r' | tr '\n' ' '`
+ submodule_names=`git config --file .gitmodules --get-regexp --name-only 'submodule\..*\.url' | sed -e 's/^submodule\.//' -e 's/\.url$//' | tr -d '\r' | tr '\n' ' '`
+else
+ subcheckout_names=
+ submodule_names=
+fi
+
+# func_validate SUBDIR
+# Verifies that the state on the file system is in sync with the declarations
+# in the configuration file.
+# Input:
+# - subcheckout_names space-separated list of subcheckout names
+# - submodule_names space-separated list of submodule names
+# Output:
+# - srcdirvar Environment that the user can set
+# - srcdir Value of the environment variable
+# - path if $srcdir = "": relative path of the subdirectory
+# - needs_init if $srcdir = "" and $path is not yet initialized:
+# true
+# - url if $srcdir = "" and $path is not yet initialized:
+# the repository URL
+func_validate ()
+{
+ srcdirvar=`echo "$1" | LC_ALL=C sed -e 's/[^a-zA-Z0-9]/_/g' | LC_ALL=C tr '[a-z]' '[A-Z]'`"_SRCDIR"
+ eval 'srcdir=$'"$srcdirvar"
+ path=
+ url=
+ if test -n "$srcdir"; then
+ func_note "Ignoring '$1' because $srcdirvar is set."
+ else
+ found=false
+ needs_init=
+ case " $subcheckout_names " in *" $1 "*)
+ found=true
+ # It ought to be a subcheckout.
+ path=`git config --file .gitmodules "subcheckout.$1.path"`
+ if test -z "$path"; then
+ path="$1"
+ fi
+ if test -d "$path"; then
+ if test -d "$path/.git"; then
+ # It's a plain checkout.
+ :
+ else
+ if test -f "$path/.git"; then
+ # It's a submodule.
+ func_fatal_error "Subdirectory '$path' is supposed to be a plain checkout, but it is a submodule."
+ else
+ func_warning "Ignoring '$path' because it exists but is not a git checkout."
+ fi
+ fi
+ else
+ # The subdir does not yet exist.
+ needs_init=true
+ url=`git config --file .gitmodules "subcheckout.$1.url"`
+ if test -z "$url"; then
+ func_fatal_error "Property subcheckout.$1.url is not defined in .gitmodules"
+ fi
+ fi
+ ;;
+ esac
+ case " $submodule_names " in *" $1 "*)
+ found=true
+ # It ought to be a submodule.
+ path=`git config --file .gitmodules "submodule.$1.path"`
+ if test -z "$path"; then
+ path="$1"
+ fi
+ if test -d "$path"; then
+ if test -d "$path/.git" || test -f "$path/.git"; then
+ # It's likely a submodule.
+ :
+ else
+ path_if_empty=`find "$path" -prune -empty 2>/dev/null`
+ if test -n "$path_if_empty"; then
+ # The subdir is empty.
+ needs_init=true
+ else
+ # The subdir is not empty.
+ # It is important to report an error, because we don't want to erase
+ # the user's files and 'git submodule update gnulib' sometimes reports
+ # "fatal: destination path '$path' already exists and is not an empty directory."
+ # but sometimes does not.
+ func_fatal_error "Subdir '$path' exists but is not a git checkout."
+ fi
+ fi
+ else
+ # The subdir does not yet exist.
+ needs_init=true
+ fi
+ # Another way to determine needs_init could be:
+ # if git submodule status "$path" | grep '^-' > /dev/null; then
+ # needs_init=true
+ # fi
+ if test -n "$needs_init"; then
+ url=`git config --file .gitmodules "submodule.$1.url"`
+ if test -z "$url"; then
+ func_fatal_error "Property submodule.$1.url is not defined in .gitmodules"
+ fi
+ fi
+ ;;
+ esac
+ if ! $found; then
+ func_fatal_error "Subdir '$1' is not configured as a subcheckout or a submodule in .gitmodules"
+ fi
+ fi
+}
+
+# func_cleanup_current_git_clone
+# Cleans up the current 'git clone' operation.
+# Input:
+# - path
+func_cleanup_current_git_clone ()
+{
+ rm -rf "$path"
+ func_fatal_error "git clone failed"
+}
+
+# func_pull SUBDIR GIT_OPTIONS
+# Implements the 'pull' operation.
+func_pull ()
+{
+ func_validate "$1"
+ if test -z "$srcdir"; then
+ case " $subcheckout_names " in *" $1 "*)
+ # It's a subcheckout.
+ if test -d "$path"; then
+ if test -d "$path/.git"; then
+ (cd "$path" && git pull) || func_fatal_error "git operation failed"
+ fi
+ else
+ # The subdir does not yet exist. Create a plain checkout.
+ trap func_cleanup_current_git_clone HUP INT PIPE TERM
+ git clone $2 "$url" "$path" || func_cleanup_current_git_clone
+ trap - HUP INT PIPE TERM
+ fi
+ ;;
+ esac
+ case " $submodule_names " in *" $1 "*)
+ # It's a submodule.
+ if test -n "$needs_init"; then
+ # Create a submodule checkout.
+ git submodule init -- "$path" && git submodule update $2 -- "$path" || func_fatal_error "git operation failed"
+ else
+ # See https://stackoverflow.com/questions/1030169/easy-way-to-pull-latest-of-all-git-submodules
+ # https://stackoverflow.com/questions/4611512/is-there-a-way-to-make-git-pull-automatically-update-submodules
+ git submodule update "$path" || func_fatal_error "git operation failed"
+ fi
+ ;;
+ esac
+ fi
+}
+
+# func_upgrade SUBDIR
+# Implements the 'upgrade' operation.
+func_upgrade ()
+{
+ func_validate "$1"
+ if test -z "$srcdir"; then
+ if test -d "$path"; then
+ case " $subcheckout_names " in *" $1 "*)
+ # It's a subcheckout.
+ if test -d "$path/.git"; then
+ (cd "$path" && git pull) || func_fatal_error "git operation failed"
+ fi
+ ;;
+ esac
+ case " $submodule_names " in *" $1 "*)
+ # It's a submodule.
+ if test -z "$needs_init"; then
+ (cd "$path" && git fetch && git merge origin/master) || func_fatal_error "git operation failed"
+ fi
+ ;;
+ esac
+ else
+ # The subdir does not yet exist.
+ func_fatal_error "Subdirectory '$path' does not exist yet. Use 'gitsub.sh pull' to create it."
+ fi
+ fi
+}
+
+# func_checkout SUBDIR REVISION
+# Implements the 'checkout' operation.
+func_checkout ()
+{
+ func_validate "$1"
+ if test -z "$srcdir"; then
+ if test -d "$path"; then
+ case " $subcheckout_names " in *" $1 "*)
+ # It's a subcheckout.
+ if test -d "$path/.git"; then
+ (cd "$path" && git checkout "$2") || func_fatal_error "git operation failed"
+ fi
+ ;;
+ esac
+ case " $submodule_names " in *" $1 "*)
+ # It's a submodule.
+ if test -z "$needs_init"; then
+ (cd "$path" && git checkout "$2") || func_fatal_error "git operation failed"
+ fi
+ ;;
+ esac
+ else
+ # The subdir does not yet exist.
+ func_fatal_error "Subdirectory '$path' does not exist yet. Use 'gitsub.sh pull' to create it."
+ fi
+ fi
+}
+
+case "$mode" in
+ pull )
+ git_options=""
+ while test $# -gt 0; do
+ case "$1" in
+ --reference=* | --depth=* | --recursive)
+ git_options="$git_options $1"
+ shift
+ ;;
+ --reference | --depth)
+ git_options="$git_options $1 $2"
+ shift; shift
+ ;;
+ *)
+ break
+ ;;
+ esac
+ done
+ if test $# -gt 1; then
+ echo "gitsub.sh: too many arguments in '$mode' mode" 1>&2
+ echo "Try 'gitsub.sh --help' for more information." 1>&2
+ exit 1
+ fi
+ if test $# = 0; then
+ for sub in $subcheckout_names $submodule_names; do
+ func_pull "$sub" "$git_options"
+ done
+ else
+ valid=false
+ for sub in $subcheckout_names $submodule_names; do
+ if test "$sub" = "$1"; then
+ valid=true
+ fi
+ done
+ if $valid; then
+ func_pull "$1" "$git_options"
+ else
+ func_fatal_error "Subdir '$1' is not configured as a subcheckout or a submodule in .gitmodules"
+ fi
+ fi
+ ;;
+
+ upgrade )
+ if test $# = 0; then
+ for sub in $subcheckout_names $submodule_names; do
+ func_upgrade "$sub"
+ done
+ else
+ valid=false
+ for sub in $subcheckout_names $submodule_names; do
+ if test "$sub" = "$1"; then
+ valid=true
+ fi
+ done
+ if $valid; then
+ func_upgrade "$1"
+ else
+ func_fatal_error "Subdir '$1' is not configured as a subcheckout or a submodule in .gitmodules"
+ fi
+ fi
+ ;;
+
+ checkout )
+ valid=false
+ for sub in $subcheckout_names $submodule_names; do
+ if test "$sub" = "$1"; then
+ valid=true
+ fi
+ done
+ if $valid; then
+ func_checkout "$1" "$2"
+ else
+ func_fatal_error "Subdir '$1' is not configured as a subcheckout or a submodule in .gitmodules"
+ fi
+ ;;
+esac
diff --git a/gnulib b/gnulib
new file mode 160000
+Subproject 6674810815912a828bd9a509e0041ee203c8848
diff --git a/json-elements/Makefile.am b/json-elements/Makefile.am
new file mode 100644
index 0000000..dcd410f
--- /dev/null
+++ b/json-elements/Makefile.am
@@ -0,0 +1,9 @@
+EXTRA_DIST += \
+ %D%/false \
+ %D%/json \
+ %D%/jsonld \
+ %D%/null \
+ %D%/number \
+ %D%/string \
+ %D%/structure \
+ %D%/true
diff --git a/json-elements/false b/json-elements/false
new file mode 100644
index 0000000..c508d53
--- /dev/null
+++ b/json-elements/false
@@ -0,0 +1 @@
+false
diff --git a/json-elements/json b/json-elements/json
new file mode 100644
index 0000000..fc740b3
--- /dev/null
+++ b/json-elements/json
@@ -0,0 +1,4 @@
+{
+ "hello": "world",
+ "how": ["are", "you", "?"]
+}
diff --git a/json-elements/jsonld b/json-elements/jsonld
new file mode 100644
index 0000000..be0e711
--- /dev/null
+++ b/json-elements/jsonld
@@ -0,0 +1,38 @@
+{
+ "@context": {
+ "gr": "http://purl.org/goodrelations/v1#",
+ "pto": "http://www.productontology.org/id/",
+ "foaf": "http://xmlns.com/foaf/0.1/",
+ "xsd": "http://www.w3.org/2001/XMLSchema#",
+ "foaf:page": {
+ "@type": "@id"
+ },
+ "gr:acceptedPaymentMethods": {
+ "@type": "@id"
+ },
+ "gr:hasBusinessFunction": {
+ "@type": "@id"
+ },
+ "gr:hasCurrencyValue": {
+ "@type": "xsd:float"
+ }
+ },
+ "@id": "http://example.org/cars/for-sale#tesla",
+ "@type": "gr:Offering",
+ "gr:name": "Used Tesla Roadster",
+ "gr:description": "Need to sell fast and furiously",
+ "gr:hasBusinessFunction": "gr:Sell",
+ "gr:acceptedPaymentMethods": "gr:Cash",
+ "gr:hasPriceSpecification": {
+ "gr:hasCurrencyValue": "85000",
+ "gr:hasCurrency": "USD"
+ },
+ "gr:includes": {
+ "@type": [
+ "gr:Individual",
+ "pto:Vehicle"
+ ],
+ "gr:name": "Tesla Roadster",
+ "foaf:page": "http://www.teslamotors.com/roadster"
+ }
+}
diff --git a/json-elements/null b/json-elements/null
new file mode 100644
index 0000000..19765bd
--- /dev/null
+++ b/json-elements/null
@@ -0,0 +1 @@
+null
diff --git a/json-elements/number b/json-elements/number
new file mode 100644
index 0000000..e3579cf
--- /dev/null
+++ b/json-elements/number
@@ -0,0 +1 @@
+-42.13e-4
diff --git a/json-elements/string b/json-elements/string
new file mode 100644
index 0000000..99e20f9
--- /dev/null
+++ b/json-elements/string
@@ -0,0 +1 @@
+"hello" "hello\n" "hello\u0074"
diff --git a/json-elements/structure b/json-elements/structure
new file mode 100644
index 0000000..02ac7c5
--- /dev/null
+++ b/json-elements/structure
@@ -0,0 +1 @@
+{}[]:,
diff --git a/json-elements/true b/json-elements/true
new file mode 100644
index 0000000..27ba77d
--- /dev/null
+++ b/json-elements/true
@@ -0,0 +1 @@
+true
diff --git a/maint.mk b/maint.mk
new file mode 100644
index 0000000..02e6546
--- /dev/null
+++ b/maint.mk
@@ -0,0 +1,1805 @@
+# -*-Makefile-*-
+# This Makefile fragment tries to be general-purpose enough to be
+# used by many projects via the gnulib maintainer-makefile module.
+
+## Copyright (C) 2001-2023 Free Software Foundation, Inc.
+##
+## This program is free software: you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation, either version 3 of the License, or
+## (at your option) any later version.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+# This is reported not to work with make-3.79.1
+# ME := $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST))
+ME := maint.mk
+
+# These variables ought to be defined through the configure.ac section
+# of the module description. But some packages import this file directly,
+# ignoring the module description.
+AWK ?= awk
+GREP ?= grep
+SED ?= sed
+
+# Helper variables.
+_empty =
+_sp = $(_empty) $(_empty)
+
+# _equal,S1,S2
+# ------------
+# If S1 == S2, return S1, otherwise the empty string.
+_equal = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# member-check,VARIABLE,VALID-VALUES
+# ----------------------------------
+# Check that $(VARIABLE) is in the space-separated list of VALID-VALUES, and
+# return it. Die otherwise.
+member-check = \
+ $(strip \
+ $(if $($(1)), \
+ $(if $(findstring $(_sp),$($(1))), \
+ $(error invalid $(1): '$($(1))', expected $(2)), \
+ $(or $(findstring $(_sp)$($(1))$(_sp),$(_sp)$(2)$(_sp)), \
+ $(error invalid $(1): '$($(1))', expected $(2)))), \
+ $(error $(1) undefined)))
+
+# Do not save the original name or timestamp in the .tar.gz file.
+# Use --rsyncable if available.
+gzip_rsyncable := \
+ $(shell gzip --help 2>/dev/null|$(GREP) rsyncable >/dev/null \
+ && printf %s --rsyncable)
+GZIP_ENV = '--no-name --best $(gzip_rsyncable)'
+
+GIT = git
+VC = $(GIT)
+
+VC_LIST = $(srcdir)/$(_build-aux)/vc-list-files -C $(srcdir)
+
+# You can override this variable in cfg.mk if your gnulib submodule lives
+# in a different location.
+gnulib_dir ?= $(shell if test -n "$(GNULIB_SRCDIR)" && test -f "$(GNULIB_SRCDIR)/gnulib-tool"; then \
+ echo "$(GNULIB_SRCDIR)"; \
+ else \
+ echo $(srcdir)/gnulib; \
+ fi)
+
+# You can override this variable in cfg.mk to set your own regexp
+# matching files to ignore.
+VC_LIST_ALWAYS_EXCLUDE_REGEX ?= ^$$
+
+# This is to preprocess robustly the output of $(VC_LIST), so that even
+# when $(srcdir) is a pathological name like "....", the leading sed command
+# removes only the intended prefix.
+_dot_escaped_srcdir = $(subst .,\.,$(srcdir))
+
+# Post-process $(VC_LIST) output, prepending $(srcdir)/, but only
+# when $(srcdir) is not ".".
+ifeq ($(srcdir),.)
+ _prepend_srcdir_prefix =
+else
+ _prepend_srcdir_prefix = | $(SED) 's|^|$(srcdir)/|'
+endif
+
+# In order to be able to consistently filter "."-relative names,
+# (i.e., with no $(srcdir) prefix), this definition is careful to
+# remove any $(srcdir) prefix, and to restore what it removes.
+_sc_excl = \
+ $(or $(exclude_file_name_regexp--$@),^$$)
+VC_LIST_EXCEPT = \
+ $(VC_LIST) | $(SED) 's|^$(_dot_escaped_srcdir)/||' \
+ | if test -f $(srcdir)/.x-$@; then $(GREP) -vEf $(srcdir)/.x-$@; \
+ else $(GREP) -Ev -e "$${VC_LIST_EXCEPT_DEFAULT-ChangeLog}"; fi \
+ | $(GREP) -Ev -e '($(VC_LIST_ALWAYS_EXCLUDE_REGEX)|$(_sc_excl))' \
+ $(_prepend_srcdir_prefix)
+
+ifeq ($(origin prev_version_file), undefined)
+ prev_version_file = $(srcdir)/.prev-version
+endif
+
+PREV_VERSION := $(shell cat $(prev_version_file) 2>/dev/null)
+VERSION_REGEXP = $(subst .,\.,$(VERSION))
+PREV_VERSION_REGEXP = $(subst .,\.,$(PREV_VERSION))
+
+ifeq ($(VC),$(GIT))
+ this-vc-tag = v$(VERSION)
+ this-vc-tag-regexp = v$(VERSION_REGEXP)
+else
+ tag-package = $(shell echo "$(PACKAGE)" | tr '[:lower:]' '[:upper:]')
+ tag-this-version = $(subst .,_,$(VERSION))
+ this-vc-tag = $(tag-package)-$(tag-this-version)
+ this-vc-tag-regexp = $(this-vc-tag)
+endif
+my_distdir = $(PACKAGE)-$(VERSION)
+
+# Old releases are stored here.
+release_archive_dir ?= ../release
+
+# If RELEASE_TYPE is undefined, but RELEASE is, use its second word.
+# But overwrite VERSION.
+ifdef RELEASE
+ VERSION := $(word 1, $(RELEASE))
+ RELEASE_TYPE ?= $(word 2, $(RELEASE))
+endif
+
+# Validate and return $(RELEASE_TYPE), or die.
+RELEASE_TYPES = alpha beta stable
+release-type = $(call member-check,RELEASE_TYPE,$(RELEASE_TYPES))
+
+# Override gnu_rel_host and url_dir_list in cfg.mk if these are not right.
+# Use alpha.gnu.org for alpha and beta releases.
+# Use ftp.gnu.org for stable releases.
+gnu_ftp_host-alpha = alpha.gnu.org
+gnu_ftp_host-beta = alpha.gnu.org
+gnu_ftp_host-stable = ftp.gnu.org
+gnu_rel_host ?= $(gnu_ftp_host-$(release-type))
+
+url_dir_list ?= $(if $(call _equal,$(gnu_rel_host),ftp.gnu.org), \
+ https://ftpmirror.gnu.org/$(PACKAGE), \
+ https://$(gnu_rel_host)/gnu/$(PACKAGE))
+
+# Override this in cfg.mk if you are using a different format in your
+# NEWS file.
+today = $(shell date +%Y-%m-%d)
+
+# Select which lines of NEWS are searched for $(news-check-regexp).
+# This is a sed line number spec. The default says that we search
+# lines 1..10 of NEWS for $(news-check-regexp).
+# If you want to search only line 3 or only lines 20-22, use "3" or "20,22".
+news-check-lines-spec ?= 1,10
+news-check-regexp ?= '^\*.* $(VERSION_REGEXP) \($(today)\)'
+
+# Prevent programs like 'sort' from considering distinct strings to be equal.
+# Doing it here saves us from having to set LC_ALL elsewhere in this file.
+export LC_ALL = C
+
+## --------------- ##
+## Sanity checks. ##
+## --------------- ##
+
+ifneq ($(_gl-Makefile),)
+_cfg_mk := $(wildcard $(srcdir)/cfg.mk)
+
+# Collect the names of rules starting with 'sc_'.
+syntax-check-rules := $(sort $(shell env LC_ALL=C $(SED) -n \
+ 's/^\(sc_[a-zA-Z0-9_-]*\):.*/\1/p' $(srcdir)/$(ME) $(_cfg_mk)))
+.PHONY: $(syntax-check-rules)
+
+ifeq ($(shell $(VC_LIST) >/dev/null 2>&1; echo $$?),0)
+ local-checks-available += $(syntax-check-rules)
+else
+ local-checks-available += no-vc-detected
+no-vc-detected:
+ @echo "No version control files detected; skipping syntax check"
+endif
+.PHONY: $(local-checks-available)
+
+# Arrange to prine the name of each syntax-checking rule just before running it.
+$(syntax-check-rules): %: %.m
+sc_m_rules_ = $(patsubst %, %.m, $(syntax-check-rules))
+.PHONY: $(sc_m_rules_)
+$(sc_m_rules_):
+ @echo $(patsubst sc_%.m, %, $@)
+ @date +%s.%N > .sc-start-$(basename $@)
+
+# Compute and print the elapsed time for each syntax-check rule.
+sc_z_rules_ = $(patsubst %, %.z, $(syntax-check-rules))
+.PHONY: $(sc_z_rules_)
+$(sc_z_rules_): %.z: %
+ @end=$$(date +%s.%N); \
+ start=$$(cat .sc-start-$*); \
+ rm -f .sc-start-$*; \
+ $(AWK) -v s=$$start -v e=$$end \
+ 'END {printf "%.2f $(patsubst sc_%,%,$*)\n", e - s}' < /dev/null
+
+# The patsubst here is to replace each sc_% rule with its sc_%.z wrapper
+# that computes and prints elapsed time.
+local-check := \
+ $(patsubst sc_%, sc_%.z, \
+ $(filter-out $(local-checks-to-skip), $(local-checks-available)))
+
+syntax-check: $(local-check)
+endif
+
+# _sc_search_regexp
+#
+# This macro searches for a given construct in the selected files and
+# then takes some action.
+#
+# Parameters (shell variables):
+#
+# prohibit | require
+#
+# Regular expression (ERE) denoting either a forbidden construct
+# or a required construct. Those arguments are exclusive.
+#
+# exclude
+#
+# Regular expression (ERE) denoting lines to ignore that matched
+# a prohibit construct. For example, this can be used to exclude
+# comments that mention why the nearby code uses an alternative
+# construct instead of the simpler prohibited construct.
+#
+# in_vc_files | in_files
+#
+# grep-E-style regexp selecting the files to check. For in_vc_files,
+# the regexp is used to select matching files from the list of all
+# version-controlled files; for in_files, it's from the names printed
+# by "find $(srcdir)". When neither is specified, use all files that
+# are under version control.
+#
+# containing | non_containing
+#
+# Select the files (non) containing strings matching this regexp.
+# If both arguments are specified then CONTAINING takes
+# precedence.
+#
+# with_grep_options
+#
+# Extra options for grep.
+#
+# ignore_case
+#
+# Ignore case.
+#
+# halt
+#
+# Message to display before to halting execution.
+#
+# Finally, you may exempt files based on an ERE matching file names.
+# For example, to exempt from the sc_space_tab check all files with the
+# .diff suffix, set this Make variable:
+#
+# exclude_file_name_regexp--sc_space_tab = \.diff$
+#
+# Note that while this functionality is mostly inherited via VC_LIST_EXCEPT,
+# when filtering by name via in_files, we explicitly filter out matching
+# names here as well.
+
+# Initialize each, so that envvar settings cannot interfere.
+export require =
+export prohibit =
+export exclude =
+export in_vc_files =
+export in_files =
+export containing =
+export non_containing =
+export halt =
+export with_grep_options =
+
+# By default, _sc_search_regexp does not ignore case.
+export ignore_case =
+_ignore_case = $$(test -n "$$ignore_case" && printf %s -i || :)
+
+define _sc_say_and_exit
+ dummy=; : so we do not need a semicolon before each use; \
+ { printf '%s\n' "$(ME): $$msg" 1>&2; exit 1; };
+endef
+
+define _sc_search_regexp
+ dummy=; : so we do not need a semicolon before each use; \
+ \
+ : Check arguments; \
+ test -n "$$prohibit" && test -n "$$require" \
+ && { msg='Cannot specify both prohibit and require' \
+ $(_sc_say_and_exit) } || :; \
+ test -z "$$prohibit" && test -z "$$require" \
+ && { msg='Should specify either prohibit or require' \
+ $(_sc_say_and_exit) } || :; \
+ test -z "$$prohibit" && test -n "$$exclude" \
+ && { msg='Use of exclude requires a prohibit pattern' \
+ $(_sc_say_and_exit) } || :; \
+ test -n "$$in_vc_files" && test -n "$$in_files" \
+ && { msg='Cannot specify both in_vc_files and in_files' \
+ $(_sc_say_and_exit) } || :; \
+ test "x$$halt" != x \
+ || { msg='halt not defined' $(_sc_say_and_exit) }; \
+ \
+ : Filter by file name; \
+ if test -n "$$in_files"; then \
+ files=$$(find $(srcdir) | $(GREP) -E "$$in_files" \
+ | $(GREP) -Ev '$(_sc_excl)'); \
+ else \
+ files=$$($(VC_LIST_EXCEPT)); \
+ if test -n "$$in_vc_files"; then \
+ files=$$(echo "$$files" | $(GREP) -E "$$in_vc_files"); \
+ fi; \
+ fi; \
+ \
+ : Filter by content; \
+ test -n "$$files" \
+ && test -n "$$containing" \
+ && { files=$$(echo "$$files" | xargs $(GREP) -l "$$containing"); } \
+ || :; \
+ test -n "$$files" \
+ && test -n "$$non_containing" \
+ && { files=$$(echo "$$files" | xargs $(GREP) -vl "$$non_containing"); } \
+ || :; \
+ \
+ : Check for the construct; \
+ if test -n "$$files"; then \
+ if test -n "$$prohibit"; then \
+ echo "$$files" \
+ | xargs $(GREP) $$with_grep_options $(_ignore_case) -nE \
+ "$$prohibit" /dev/null \
+ | $(GREP) -vE "$${exclude:-^$$}" \
+ && { msg="$$halt" $(_sc_say_and_exit) } \
+ || :; \
+ else \
+ echo "$$files" \
+ | xargs \
+ $(GREP) $$with_grep_options $(_ignore_case) -LE "$$require" \
+ | $(GREP) . \
+ && { msg="$$halt" $(_sc_say_and_exit) } \
+ || :; \
+ fi \
+ else :; \
+ fi || :;
+endef
+
+sc_avoid_if_before_free:
+ @$(VC_LIST_EXCEPT) \
+ | $(GREP) -v useless-if-before-free \
+ | xargs \
+ $(srcdir)/$(_build-aux)/useless-if-before-free \
+ $(useless_free_options) \
+ && { printf '$(ME): found useless "if"' \
+ ' before "free" above\n' 1>&2; \
+ exit 1; } \
+ || :
+
+sc_cast_of_argument_to_free:
+ @prohibit='\<free *\( *\(' halt="don't cast free argument" \
+ $(_sc_search_regexp)
+
+sc_cast_of_x_alloc_return_value:
+ @prohibit='\*\) *x(m|c|re)alloc\>' \
+ halt="don't cast x*alloc return value" \
+ $(_sc_search_regexp)
+
+sc_cast_of_alloca_return_value:
+ @prohibit='\*\) *alloca\>' \
+ halt="don't cast alloca return value" \
+ $(_sc_search_regexp)
+
+sc_space_tab:
+ @prohibit='[ ] ' \
+ halt='found SPACE-TAB sequence; remove the SPACE' \
+ $(_sc_search_regexp)
+
+# Don't use *scanf or the old ato* functions in "real" code.
+# They provide no error checking mechanism.
+# Instead, use strto* functions.
+sc_prohibit_atoi_atof:
+ @prohibit='\<([fs]?scanf|ato([filq]|ll)) *\(' \
+ halt='do not use *scan''f, ato''f, ato''i, ato''l, ato''ll or ato''q' \
+ $(_sc_search_regexp)
+
+# Use STREQ rather than comparing strcmp == 0, or != 0.
+sp_ = strcmp *\(.+\)
+sc_prohibit_strcmp:
+ @prohibit='! *strcmp *\(|\<$(sp_) *[!=]=|[!=]= *$(sp_)' \
+ exclude='# *define STRN?EQ\(' \
+ halt='replace strcmp calls above with STREQ/STRNEQ' \
+ $(_sc_search_regexp)
+
+# Really. You don't want to use this function.
+# It may fail to NUL-terminate the destination,
+# and always NUL-pads out to the specified length.
+sc_prohibit_strncpy:
+ @prohibit='\<strncpy *\(' \
+ halt='do not use strncpy, period' \
+ $(_sc_search_regexp)
+
+# Pass EXIT_*, not number, to usage, exit, and error (when exiting)
+# Convert all uses automatically, via these two commands:
+# git grep -l '\<exit *(1)' \
+# | grep -vEf .x-sc_prohibit_magic_number_exit \
+# | xargs --no-run-if-empty \
+# perl -pi -e 's/(^|[^.])\b(exit ?)\(1\)/$1$2(EXIT_FAILURE)/'
+# git grep -l '\<exit *(0)' \
+# | grep -vEf .x-sc_prohibit_magic_number_exit \
+# | xargs --no-run-if-empty \
+# perl -pi -e 's/(^|[^.])\b(exit ?)\(0\)/$1$2(EXIT_SUCCESS)/'
+sc_prohibit_magic_number_exit:
+ @prohibit='(^|[^.])\<(usage|exit|error) ?\(-?[0-9]+[,)]' \
+ exclude='exit \(77\)|error ?\(((0|77),|[^,]*)' \
+ halt='use EXIT_* values rather than magic number' \
+ $(_sc_search_regexp)
+
+# Check that we don't use $< in non-implicit Makefile rules.
+#
+# To find the Makefiles, trace AC_CONFIG_FILES. Using VC_LIST would
+# miss the Makefiles that are not under VC control (e.g., symlinks
+# installed for gettext). "Parsing" (recursive) uses of SUBDIRS seems
+# too delicate.
+#
+# Use GNU Make's --print-data-base to normalize the rules into some
+# easy to parse format: they are separated by two \n. Look for the
+# "section" about non-pattern rules (marked with "# Files") inside
+# which there are still the POSIX Make like implicit rules (".c.o").
+sc_prohibit_gnu_make_extensions_awk_ = \
+ BEGIN { \
+ RS = "\n\n"; \
+ in_rules = 0; \
+ } \
+ /^\# Files/ { \
+ in_rules = 1; \
+ } \
+ /\$$</ && in_rules && $$0 !~ /^(.*\n)*\.\w+(\.\w+)?:/ { \
+ print "Error: " file ": $$< in a non implicit rule\n" $$0; \
+ status = 1; \
+ } \
+ END { \
+ exit status; \
+ }
+sc_prohibit_gnu_make_extensions:
+ @if $(AWK) --version | grep GNU >/dev/null 2>&1; then \
+ (cd $(srcdir) && autoconf --trace AC_CONFIG_FILES:'$$1') | \
+ tr ' ' '\n' | \
+ $(SED) -ne '/Makefile/{s/\.in$$//;p;}' | \
+ while read m; do \
+ $(MAKE) -qp -f $$m .DUMMY-TARGET 2>/dev/null | \
+ $(AWK) -v file=$$m -e '$($@_awk_)' || exit 1; \
+ done; \
+ fi
+
+# Using EXIT_SUCCESS as the first argument to error is misleading,
+# since when that parameter is 0, error does not exit. Use '0' instead.
+sc_error_exit_success:
+ @prohibit='error *\(EXIT_SUCCESS,' \
+ in_vc_files='\.[chly]$$' \
+ halt='found error (EXIT_SUCCESS' \
+ $(_sc_search_regexp)
+
+# "FATAL:" should be fully upper-cased in error messages
+# "WARNING:" should be fully upper-cased, or fully lower-cased
+sc_error_message_warn_fatal:
+ @$(VC_LIST_EXCEPT) \
+ | xargs $(GREP) -nEA2 '[^rp]error *\(' /dev/null \
+ | $(GREP) -E '"Warning|"Fatal|"fatal' \
+ && { echo '$(ME): use FATAL, WARNING or warning' 1>&2; \
+ exit 1; } \
+ || :
+
+# Error messages should not start with a capital letter
+sc_error_message_uppercase:
+ @$(VC_LIST_EXCEPT) \
+ | xargs $(GREP) -nEA2 '[^rp]error *\(' /dev/null \
+ | $(GREP) -E '"[A-Z]' \
+ | $(GREP) -vE '"FATAL|"WARNING|"Java|"C#|"PRI' \
+ && { echo '$(ME): found capitalized error message' 1>&2; \
+ exit 1; } \
+ || :
+
+# Error messages should not end with a period
+sc_error_message_period:
+ @$(VC_LIST_EXCEPT) \
+ | xargs $(GREP) -nEA2 '[^rp]error *\(' /dev/null \
+ | $(GREP) -E '[^."]\."' \
+ && { echo '$(ME): found error message ending in period' 1>&2; \
+ exit 1; } \
+ || :
+
+sc_file_system:
+ @prohibit=file''system \
+ exclude='/proc/filesystems' \
+ ignore_case=1 \
+ halt='found use of "file''system"; spell it "file system"' \
+ $(_sc_search_regexp)
+
+# Don't use cpp tests of this symbol. All code assumes config.h is included.
+sc_prohibit_have_config_h:
+ @prohibit='^# *if.*HAVE''_CONFIG_H' \
+ halt='found use of HAVE''_CONFIG_H; remove' \
+ $(_sc_search_regexp)
+
+# Nearly all .c files must include <config.h>. However, we also permit this
+# via inclusion of a package-specific header, if cfg.mk specified one.
+# config_h_header must be suitable for grep -E.
+config_h_header ?= <config\.h>
+sc_require_config_h:
+ @require='^# *include $(config_h_header)' \
+ in_vc_files='\.c$$' \
+ halt='the above files do not include <config.h>' \
+ $(_sc_search_regexp)
+
+# Print each file name for which the first #include does not match
+# $(config_h_header). Like grep -m 1, this only looks at the first match.
+perl_config_h_first_ = \
+ -e 'BEGIN {$$ret = 0}' \
+ -e 'if (/^\# *include\b/) {' \
+ -e ' if (not m{^\# *include $(config_h_header)}) {' \
+ -e ' print "$$ARGV\n";' \
+ -e ' $$ret = 1;' \
+ -e ' }' \
+ -e ' \# Move on to next file after first include' \
+ -e ' close ARGV;' \
+ -e '}' \
+ -e 'END {exit $$ret}'
+
+# You must include <config.h> before including any other header file.
+# This can possibly be via a package-specific header, if given by cfg.mk.
+sc_require_config_h_first:
+ @if $(VC_LIST_EXCEPT) | $(GREP) '\.c$$' > /dev/null; then \
+ files=$$($(VC_LIST_EXCEPT) | $(GREP) '\.c$$') && \
+ perl -n $(perl_config_h_first_) $$files || \
+ { echo '$(ME): the above files include some other header' \
+ 'before <config.h>' 1>&2; exit 1; } || :; \
+ else :; \
+ fi
+
+sc_prohibit_HAVE_MBRTOWC:
+ @prohibit='\bHAVE_MBRTOWC\b' \
+ halt="do not use $$prohibit; it is always defined" \
+ $(_sc_search_regexp)
+
+# To use this "command" macro, you must first define two shell variables:
+# h: the header name, with no enclosing <> or ""
+# re: a regular expression that matches IFF something provided by $h is used.
+define _sc_header_without_use
+ dummy=; : so we do not need a semicolon before each use; \
+ h_esc=`echo '[<"]'"$$h"'[">]'|$(SED) 's/\./\\\\./g'`; \
+ if $(VC_LIST_EXCEPT) | $(GREP) '\.c$$' > /dev/null; then \
+ files=$$($(GREP) -l '^# *include '"$$h_esc" \
+ $$($(VC_LIST_EXCEPT) | $(GREP) '\.c$$')) && \
+ $(GREP) -LE "$$re" $$files | $(GREP) . && \
+ { echo "$(ME): the above files include $$h but don't use it" \
+ 1>&2; exit 1; } || :; \
+ else :; \
+ fi
+endef
+
+# Prohibit the inclusion of assert.h without an actual use of assert.
+sc_prohibit_assert_without_use:
+ @h='assert.h' re='\<assert *\(' $(_sc_header_without_use)
+
+# Prohibit the inclusion of close-stream.h without an actual use.
+sc_prohibit_close_stream_without_use:
+ @h='close-stream.h' re='\<close_stream *\(' $(_sc_header_without_use)
+
+# Prohibit the inclusion of getopt.h without an actual use.
+sc_prohibit_getopt_without_use:
+ @h='getopt.h' re='\<getopt(_long)? *\(' $(_sc_header_without_use)
+
+# Don't include quotearg.h unless you use one of its functions.
+sc_prohibit_quotearg_without_use:
+ @h='quotearg.h' re='\<quotearg(_[^ ]+)? *\(' $(_sc_header_without_use)
+
+# Don't include quote.h unless you use one of its functions.
+sc_prohibit_quote_without_use:
+ @h='quote.h' re='\<quote((_n)? *\(|_quoting_options\>)' \
+ $(_sc_header_without_use)
+
+# Don't include this header unless you use one of its functions.
+sc_prohibit_long_options_without_use:
+ @h='long-options.h' re='\<parse_(long_options|gnu_standard_options_only) *\(' \
+ $(_sc_header_without_use)
+
+# Don't include this header unless you use one of its functions.
+sc_prohibit_inttostr_without_use:
+ @h='inttostr.h' re='\<(off|[iu]max|uint)tostr *\(' \
+ $(_sc_header_without_use)
+
+# Don't include this header unless you use one of its functions.
+sc_prohibit_ignore_value_without_use:
+ @h='ignore-value.h' re='\<ignore_(value|ptr) *\(' \
+ $(_sc_header_without_use)
+
+# Don't include this header unless you use one of its functions.
+sc_prohibit_error_without_use:
+ @h='error.h' \
+ re='\<error(_at_line|_print_progname|_one_per_line|_message_count)? *\('\
+ $(_sc_header_without_use)
+
+# Don't include xalloc.h unless you use one of its symbols.
+# Consider these symbols:
+# perl -lne '/^# *define (\w+)\(/ and print $1' lib/xalloc.h|grep -v '^__';
+# perl -lne 'm{^(?:_Noreturn )?(?:void|char) \*?(\w+) *\(} and print $1' lib/xalloc.h
+# Divide into two sets on case, and filter each through this:
+# | sort | perl -MRegexp::Assemble -le \
+# 'print Regexp::Assemble->new(file => "/dev/stdin")->as_string'|sed 's/\?://g'
+_xa1 = x(i(m(emdup0?|alloc)|realloc(array)?|([cz]|nm)alloc)|([pz]|c(har)?|2n?re|nm)alloc|realloc(array)?|m(alloc|emdup)|alloc_die|strdup)
+_xa2 = X([CZ]|N?M)ALLOC
+sc_prohibit_xalloc_without_use:
+ @h='xalloc.h' \
+ re='\<($(_xa1)|$(_xa2)) *\('\
+ $(_sc_header_without_use)
+
+# Extract function names:
+# perl -lne '/^(?:extern )?(?:void|char|Hash_table) \*?(\w+) *\(/ and print $1' lib/hash.h
+_hash_re = \
+hash_(re(set_tuning|move)|xin(itialize|sert)|in(itialize|sert)|get_(firs|nex)t|print_statistics|(delet|fre)e|lookup|clear)
+_hash_fn = \<($(_hash_re)) *\(
+_hash_struct = (struct )?\<[Hh]ash_(table|tuning)\>
+sc_prohibit_hash_without_use:
+ @h='hash.h' \
+ re='$(_hash_fn)|$(_hash_struct)'\
+ $(_sc_header_without_use)
+
+sc_prohibit_cloexec_without_use:
+ @h='cloexec.h' re='\<(set_cloexec_flag|dup_cloexec) *\(' \
+ $(_sc_header_without_use)
+
+sc_prohibit_posixver_without_use:
+ @h='posixver.h' re='\<posix2_version *\(' $(_sc_header_without_use)
+
+sc_prohibit_same_without_use:
+ @h='same.h' re='\<same_name(at)? *\(' $(_sc_header_without_use)
+
+sc_prohibit_hash_pjw_without_use:
+ @h='hash-pjw.h' \
+ re='\<hash_pjw\>' \
+ $(_sc_header_without_use)
+
+sc_prohibit_safe_read_without_use:
+ @h='safe-read.h' re='(\<SAFE_READ_ERROR\>|\<safe_read *\()' \
+ $(_sc_header_without_use)
+
+sc_prohibit_argmatch_without_use:
+ @h='argmatch.h' \
+ re='(\<(ARGMATCH_DEFINE_GROUP|ARRAY_CARDINALITY|X?ARGMATCH(|_TO_ARGUMENT|_VERIFY))\>|\<(invalid_arg|argmatch(_exit_fn|_(in)?valid)?) *\()' \
+ $(_sc_header_without_use)
+
+sc_prohibit_canonicalize_without_use:
+ @h='canonicalize.h' \
+ re='CAN_(EXISTING|ALL_BUT_LAST|MISSING)|canonicalize_(mode_t|filename_mode|file_name)' \
+ $(_sc_header_without_use)
+
+sc_prohibit_root_dev_ino_without_use:
+ @h='root-dev-ino.h' \
+ re='(\<ROOT_DEV_INO_(CHECK|WARN)\>|\<get_root_dev_ino *\()' \
+ $(_sc_header_without_use)
+
+sc_prohibit_openat_without_use:
+ @h='openat.h' \
+ re='\<(openat_(permissive|needs_fchdir|(save|restore)_fail)|l?(stat|ch(own|mod))at|(euid)?accessat|(FCHMOD|FCHOWN|STAT)AT_INLINE)\>' \
+ $(_sc_header_without_use)
+
+# Prohibit the inclusion of c-ctype.h without an actual use.
+ctype_re = isalnum|isalpha|isascii|isblank|iscntrl|isdigit|isgraph|islower\
+|isprint|ispunct|isspace|isupper|isxdigit|tolower|toupper
+sc_prohibit_c_ctype_without_use:
+ @h='c-ctype.h' re='\<c_($(ctype_re)) *\(' \
+ $(_sc_header_without_use)
+
+# The following list was generated by running:
+# man signal.h|col -b|perl -ne '/bsd_signal.*;/.../sigwaitinfo.*;/ and print' \
+# | perl -lne '/^\s+(?:int|void).*?(\w+).*/ and print $1' | fmt
+_sig_functions = \
+ bsd_signal kill killpg pthread_kill pthread_sigmask raise sigaction \
+ sigaddset sigaltstack sigdelset sigemptyset sigfillset sighold sigignore \
+ siginterrupt sigismember signal sigpause sigpending sigprocmask sigqueue \
+ sigrelse sigset sigsuspend sigtimedwait sigwait sigwaitinfo
+_sig_function_re = $(subst $(_sp),|,$(strip $(_sig_functions)))
+# The following were extracted from "man signal.h" manually.
+_sig_types_and_consts = \
+ MINSIGSTKSZ SA_NOCLDSTOP SA_NOCLDWAIT SA_NODEFER SA_ONSTACK \
+ SA_RESETHAND SA_RESTART SA_SIGINFO SIGEV_NONE SIGEV_SIGNAL \
+ SIGEV_THREAD SIGSTKSZ SIG_BLOCK SIG_SETMASK SIG_UNBLOCK SS_DISABLE \
+ SS_ONSTACK mcontext_t pid_t sig_atomic_t sigevent siginfo_t sigset_t \
+ sigstack sigval stack_t ucontext_t
+# generated via this:
+# perl -lne '/^#ifdef (SIG\w+)/ and print $1' lib/sig2str.c|sort -u|fmt -70
+_sig_names = \
+ SIGABRT SIGALRM SIGALRM1 SIGBUS SIGCANCEL SIGCHLD SIGCLD SIGCONT \
+ SIGDANGER SIGDIL SIGEMT SIGFPE SIGFREEZE SIGGRANT SIGHUP SIGILL \
+ SIGINFO SIGINT SIGIO SIGIOT SIGKAP SIGKILL SIGKILLTHR SIGLOST SIGLWP \
+ SIGMIGRATE SIGMSG SIGPHONE SIGPIPE SIGPOLL SIGPRE SIGPROF SIGPWR \
+ SIGQUIT SIGRETRACT SIGSAK SIGSEGV SIGSOUND SIGSTKFLT SIGSTOP SIGSYS \
+ SIGTERM SIGTHAW SIGTRAP SIGTSTP SIGTTIN SIGTTOU SIGURG SIGUSR1 \
+ SIGUSR2 SIGVIRT SIGVTALRM SIGWAITING SIGWINCH SIGWIND SIGWINDOW \
+ SIGXCPU SIGXFSZ
+_sig_syms_re = $(subst $(_sp),|,$(strip $(_sig_names) $(_sig_types_and_consts)))
+
+# Prohibit the inclusion of signal.h without an actual use.
+sc_prohibit_signal_without_use:
+ @h='signal.h' \
+ re='\<($(_sig_function_re)) *\(|\<($(_sig_syms_re))\>' \
+ $(_sc_header_without_use)
+
+# Don't include stdio--.h unless you use one of its functions.
+sc_prohibit_stdio--_without_use:
+ @h='stdio--.h' re='\<((f(re)?|p)open|tmpfile) *\(' \
+ $(_sc_header_without_use)
+
+# Don't include stdio-safer.h unless you use one of its functions.
+sc_prohibit_stdio-safer_without_use:
+ @h='stdio-safer.h' re='\<((f(re)?|p)open|tmpfile)_safer *\(' \
+ $(_sc_header_without_use)
+
+# Prohibit the inclusion of strings.h without a sensible use.
+# Using the likes of bcmp, bcopy, bzero, index or rindex is not sensible.
+sc_prohibit_strings_without_use:
+ @h='strings.h' \
+ re='\<(strn?casecmp|ffs(ll)?)\>' \
+ $(_sc_header_without_use)
+
+# Extract the raw list of symbol names with this:
+gl_extract_define_simple = \
+ /^\# *define ([A-Z]\w+)\(/ and print $$1
+# Filter out duplicates and convert to a space-separated list:
+_intprops_names = \
+ $(shell f=$(gnulib_dir)/lib/intprops.h; \
+ perl -lne '$(gl_extract_define_simple)' $$f | sort -u | tr '\n' ' ')
+# Remove trailing space and convert to a regular expression:
+_intprops_syms_re = $(subst $(_sp),|,$(strip $(_intprops_names)))
+# Prohibit the inclusion of intprops.h without an actual use.
+sc_prohibit_intprops_without_use:
+ @h='intprops.h' \
+ re='\<($(_intprops_syms_re)) *\(' \
+ $(_sc_header_without_use)
+
+_stddef_syms_re = \
+ NULL|max_align_t|nullptr_t|offsetof|ptrdiff_t|size_t|unreachable|wchar_t
+# Prohibit the inclusion of stddef.h without an actual use.
+sc_prohibit_stddef_without_use:
+ @h='stddef.h' \
+ re='\<($(_stddef_syms_re))\>' \
+ $(_sc_header_without_use)
+
+_de1 = dirfd|(close|(fd)?open|read|rewind|seek|tell)dir(64)?(_r)?
+_de2 = (versionsort|struct dirent|getdirentries|alphasort|scandir(at)?)(64)?
+_de3 = MAXNAMLEN|DIR|ino_t|d_ino|d_fileno|d_namlen
+_dirent_syms_re = $(_de1)|$(_de2)|$(_de3)
+# Prohibit the inclusion of dirent.h without an actual use.
+sc_prohibit_dirent_without_use:
+ @h='dirent.h' \
+ re='\<($(_dirent_syms_re))\>' \
+ $(_sc_header_without_use)
+
+# Prohibit the inclusion of verify.h without an actual use.
+sc_prohibit_verify_without_use:
+ @h='verify.h' \
+ re='\<(verify(_expr)?|assume) *\(' \
+ $(_sc_header_without_use)
+
+# Don't include xfreopen.h unless you use one of its functions.
+sc_prohibit_xfreopen_without_use:
+ @h='xfreopen.h' re='\<xfreopen *\(' $(_sc_header_without_use)
+
+sc_obsolete_symbols:
+ @prohibit='\<(HAVE''_FCNTL_H|O''_NDELAY)\>' \
+ halt='do not use HAVE''_FCNTL_H or O'_NDELAY \
+ $(_sc_search_regexp)
+
+# FIXME: warn about definitions of EXIT_FAILURE, EXIT_SUCCESS, STREQ
+
+# Each nonempty ChangeLog line must start with a year number, or a TAB.
+sc_changelog:
+ @prohibit='^[^12 ]' \
+ in_vc_files='^ChangeLog$$' \
+ halt='found unexpected prefix in a ChangeLog' \
+ $(_sc_search_regexp)
+
+# Ensure that each .c file containing a "main" function also
+# calls bindtextdomain.
+sc_bindtextdomain:
+ @require='bindtextdomain *\(' \
+ in_vc_files='\.c$$' \
+ containing='\<main *(' \
+ halt='the above files do not call bindtextdomain' \
+ $(_sc_search_regexp)
+
+# Require that the final line of each test-lib.sh-using test be this one:
+# Exit $fail
+# Note: this test requires GNU grep's --label= option.
+Exit_witness_file ?= tests/test-lib.sh
+Exit_base := $(notdir $(Exit_witness_file))
+sc_require_test_exit_idiom:
+ @if test -f $(srcdir)/$(Exit_witness_file); then \
+ die=0; \
+ for i in $$($(GREP) -l -F 'srcdir/$(Exit_base)' \
+ $$($(VC_LIST) tests)); do \
+ tail -n1 $$i | $(GREP) '^Exit .' > /dev/null \
+ && : || { die=1; echo $$i; } \
+ done; \
+ test $$die = 1 && \
+ { echo 1>&2 '$(ME): the final line in each of the above is not:'; \
+ echo 1>&2 'Exit something'; \
+ exit 1; } || :; \
+ fi
+
+sc_trailing_blank:
+ @prohibit='[ ]$$' \
+ halt='found trailing blank(s)' \
+ exclude='^Binary file .* matches$$' \
+ $(_sc_search_regexp)
+
+# Match lines like the following, but where there is only one space
+# between the options and the description:
+# -D, --all-repeated[=delimit-method] print all duplicate lines\n
+longopt_re = --[a-z][0-9A-Za-z-]*(\[?=[0-9A-Za-z-]*]?)?
+sc_two_space_separator_in_usage:
+ @prohibit='^ *(-[A-Za-z],)? $(longopt_re) [^ ].*\\$$' \
+ halt='help2man requires at least two spaces between an option and its description'\
+ $(_sc_search_regexp)
+
+# A regexp matching function names like "error" that may be used
+# to emit translatable messages.
+_gl_translatable_diag_func_re ?= error
+
+# Look for diagnostics that aren't marked for translation.
+# This won't find any for which error's format string is on a separate line.
+sc_unmarked_diagnostics:
+ @prohibit='\<$(_gl_translatable_diag_func_re) *\([^"]*"[^"]*[a-z]{3}' \
+ exclude='(_|ngettext ?)\(' \
+ halt='found unmarked diagnostic(s)' \
+ $(_sc_search_regexp)
+
+# Avoid useless parentheses like those in this example:
+# #if defined (SYMBOL) || defined (SYM2)
+sc_useless_cpp_parens:
+ @prohibit='^# *if .*defined *\(' \
+ halt='found useless parentheses in cpp directive' \
+ $(_sc_search_regexp)
+
+# List headers for which HAVE_HEADER_H is always true, assuming you are
+# using the appropriate gnulib module. CAUTION: for each "unnecessary"
+# #if HAVE_HEADER_H that you remove, be sure that your project explicitly
+# requires the gnulib module that guarantees the usability of that header.
+gl_assured_headers_ = \
+ cd $(gnulib_dir)/lib && echo *.in.h|$(SED) 's/\.in\.h//g'
+
+# Convert the list of names to upper case, and replace each space with "|".
+az_ = abcdefghijklmnopqrstuvwxyz
+AZ_ = ABCDEFGHIJKLMNOPQRSTUVWXYZ
+gl_header_upper_case_or_ = \
+ $$($(gl_assured_headers_) \
+ | tr $(az_)/.- $(AZ_)___ \
+ | tr -s ' ' '|' \
+ )
+sc_prohibit_always_true_header_tests:
+ @or=$(gl_header_upper_case_or_); \
+ re="HAVE_($$or)_H"; \
+ prohibit='\<'"$$re"'\>' \
+ halt=$$(printf '%s\n' \
+ 'do not test the above HAVE_<header>_H symbol(s);' \
+ ' with the corresponding gnulib module, they are always true') \
+ $(_sc_search_regexp)
+
+sc_prohibit_defined_have_decl_tests:
+ @prohibit='(#[ ]*ifn?def|\<defined)\>[ (]+HAVE_DECL_' \
+ halt='HAVE_DECL macros are always defined' \
+ $(_sc_search_regexp)
+
+# ==================================================================
+gl_other_headers_ ?= \
+ intprops.h \
+ openat.h \
+ stat-macros.h
+
+# Perl -lne code to extract "significant" cpp-defined symbols from a
+# gnulib header file, eliminating a few common false-positives.
+# The exempted names below are defined only conditionally in gnulib,
+# and hence sometimes must/may be defined in application code.
+gl_extract_significant_defines_ = \
+ /^\# *define ([^_ (][^ (]*)(\s*\(|\s+\w+)/\
+ && $$2 !~ /(?:rpl_|_used_without_)/\
+ && $$1 !~ /^(?:NSIG|ENODATA)$$/\
+ && $$1 !~ /^(?:SA_RESETHAND|SA_RESTART)$$/\
+ and print $$1
+
+# Create a list of regular expressions matching the names
+# of macros that are guaranteed to be defined by parts of gnulib.
+define def_sym_regex
+ gen_h=$(gl_generated_headers_); \
+ (cd $(gnulib_dir)/lib; \
+ for f in *.in.h $(gl_other_headers_); do \
+ test -f $$f \
+ && perl -lne '$(gl_extract_significant_defines_)' $$f; \
+ done; \
+ ) | sort -u \
+ | $(SED) 's/^/^ *# *(define|undef) */;s/$$/\\>/'
+endef
+
+# Don't define macros that we already get from gnulib header files.
+sc_prohibit_always-defined_macros:
+ @if test -d $(gnulib_dir); then \
+ case $$(echo all: | $(GREP) -l -f - Makefile) in Makefile);; *) \
+ echo '$(ME): skipping $@: you lack GNU grep' 1>&2; exit 0;; \
+ esac; \
+ regex=$$($(def_sym_regex)); export regex; \
+ $(VC_LIST_EXCEPT) \
+ | xargs sh -c 'echo $$regex | $(GREP) -E -f - "$$@"' \
+ dummy /dev/null \
+ && { printf '$(ME): define the above' \
+ ' via some gnulib .h file\n' 1>&2; \
+ exit 1; } \
+ || :; \
+ fi
+# ==================================================================
+
+# Prohibit checked in backup files.
+sc_prohibit_backup_files:
+ @$(VC_LIST) | $(GREP) '~$$' && \
+ { echo '$(ME): found version controlled backup file' 1>&2; \
+ exit 1; } || :
+
+# Require the latest GPL.
+sc_GPL_version:
+ @prohibit='either ''version [^3]' \
+ halt='GPL vN, N!=3' \
+ $(_sc_search_regexp)
+
+# Require the latest GFDL. Two regexp, since some .texi files end up
+# line wrapping between 'Free Documentation License,' and 'Version'.
+_GFDL_regexp = (Free ''Documentation.*Version 1\.[^3]|Version 1\.[^3] or any)
+sc_GFDL_version:
+ @prohibit='$(_GFDL_regexp)' \
+ halt='GFDL vN, N!=3' \
+ $(_sc_search_regexp)
+
+# Don't use Texinfo's @acronym{}.
+# https://lists.gnu.org/r/bug-gnulib/2010-03/msg00321.html
+texinfo_suffix_re_ ?= \.(txi|texi(nfo)?)$$
+sc_texinfo_acronym:
+ @prohibit='@acronym\{' \
+ in_vc_files='$(texinfo_suffix_re_)' \
+ halt='found use of Texinfo @acronym{}' \
+ $(_sc_search_regexp)
+
+cvs_keywords = \
+ Author|Date|Header|Id|Name|Locker|Log|RCSfile|Revision|Source|State
+
+sc_prohibit_cvs_keyword:
+ @prohibit='\$$($(cvs_keywords))\$$' \
+ halt='do not use CVS keyword expansion' \
+ $(_sc_search_regexp)
+
+# This Perl code is slightly obfuscated. Not only is each "$" doubled
+# because it's in a Makefile, but the $$c's are comments; we cannot
+# use "#" due to the way the script ends up concatenated onto one line.
+# It would be much more concise, and would produce better output (including
+# counts) if written as:
+# perl -ln -0777 -e '/\n(\n+)$/ and print "$ARGV: ".length $1' ...
+# but that would be far less efficient, reading the entire contents
+# of each file, rather than just the last two bytes of each.
+# In addition, while the code below detects both blank lines and a missing
+# newline at EOF, the above detects only the former.
+#
+# This is a perl script that is expected to be the single-quoted argument
+# to a command-line "-le". The remaining arguments are file names.
+# Print the name of each file that does not end in exactly one newline byte.
+# I.e., warn if there are blank lines (2 or more newlines), or if the
+# last byte is not a newline. However, currently we don't complain
+# about any file that contains exactly one byte.
+# Exit nonzero if at least one such file is found, otherwise, exit 0.
+# Warn about, but otherwise ignore open failure. Ignore seek/read failure.
+#
+# Use this if you want to remove trailing empty lines from selected files:
+# perl -pi -0777 -e 's/\n\n+$/\n/' files...
+#
+require_exactly_one_NL_at_EOF_ = \
+ foreach my $$f (@ARGV) \
+ { \
+ open F, "<", $$f or (warn "failed to open $$f: $$!\n"), next; \
+ my $$p = sysseek (F, -2, 2); \
+ my $$c = "seek failure probably means file has < 2 bytes; ignore"; \
+ my $$last_two_bytes; \
+ defined $$p and $$p = sysread F, $$last_two_bytes, 2; \
+ close F; \
+ $$c = "ignore read failure"; \
+ $$p && ($$last_two_bytes eq "\n\n" \
+ || substr ($$last_two_bytes,1) ne "\n") \
+ and (print $$f), $$fail=1; \
+ } \
+ END { exit defined $$fail }
+sc_prohibit_empty_lines_at_EOF:
+ @$(VC_LIST_EXCEPT) \
+ | xargs perl -le '$(require_exactly_one_NL_at_EOF_)' \
+ || { echo '$(ME): empty line(s) or no newline at EOF' 1>&2; \
+ exit 1; } \
+ || :
+
+# Make sure we don't use st_blocks. Use ST_NBLOCKS instead.
+# This is a bit of a kludge, since it prevents use of the string
+# even in comments, but for now it does the job with no false positives.
+sc_prohibit_stat_st_blocks:
+ @prohibit='[.>]st_blocks' \
+ halt='do not use st_blocks; use ST_NBLOCKS' \
+ $(_sc_search_regexp)
+
+# Make sure we don't define any S_IS* macros in src/*.c files.
+# They're already defined via gnulib's sys/stat.h replacement.
+sc_prohibit_S_IS_definition:
+ @prohibit='^ *# *define *S_IS' \
+ halt='do not define S_IS* macros; include <sys/stat.h>' \
+ $(_sc_search_regexp)
+
+# Perl block to convert a match to FILE_NAME:LINENO:TEST,
+# that is shared by two definitions below.
+perl_filename_lineno_text_ = \
+ -e ' {' \
+ -e ' $$n = ($$` =~ tr/\n/\n/ + 1);' \
+ -e ' ($$v = $$&) =~ s/\n/\\n/g;' \
+ -e ' print "$$ARGV:$$n:$$v\n";' \
+ -e ' }'
+
+prohibit_doubled_words_ = \
+ the then in an on if is it but for or at and do to can
+# expand the regex before running the check to avoid using expensive captures
+prohibit_doubled_word_expanded_ = \
+ $(join $(prohibit_doubled_words_),$(addprefix \s+,$(prohibit_doubled_words_)))
+prohibit_doubled_word_RE_ ?= \
+ /\b(?:$(subst $(_sp),|,$(prohibit_doubled_word_expanded_)))\b/gims
+prohibit_doubled_word_ = \
+ -e 'while ($(prohibit_doubled_word_RE_))' \
+ $(perl_filename_lineno_text_)
+
+# Define this to a regular expression that matches
+# any filename:dd:match lines you want to ignore.
+# The default is to ignore no matches.
+ignore_doubled_word_match_RE_ ?= ^$$
+
+sc_prohibit_doubled_word:
+ @$(VC_LIST_EXCEPT) \
+ | xargs perl -n -0777 $(prohibit_doubled_word_) \
+ | $(GREP) -vE '$(ignore_doubled_word_match_RE_)' \
+ | $(GREP) . \
+ && { echo '$(ME): doubled words' 1>&2; exit 1; } \
+ || :
+
+# A regular expression matching undesirable combinations of words like
+# "can\ not"; this matches them even when the two words appear on different
+# lines, but not when there is an intervening delimiter like "#" or "*".
+# Similarly undesirable, "See\ @xref{...}", since an @xref should start
+# a sentence. Explicitly prohibit any prefix of "see" or "also".
+# Also prohibit a prefix matching "\w+ +".
+# @pxref gets the same see/also treatment and should be parenthesized;
+# presume it must *not* start a sentence.
+# POSIX spells it "timestamp" rather than "time\s+stamp", so we do, too.
+bad_xref_re_ ?= (?:[\w,:;] +|(?:see|also)\s+)\@xref\{
+bad_pxref_re_ ?= (?:[.!?]|(?:see|also))\s+\@pxref\{
+prohibit_undesirable_word_seq_RE_ ?= \
+ /(?:\bcan\s+not\b|\btime\s+stamps?\b|$(bad_xref_re_)|$(bad_pxref_re_))/gims
+prohibit_undesirable_word_seq_ = \
+ -e 'while ($(prohibit_undesirable_word_seq_RE_))' \
+ $(perl_filename_lineno_text_)
+# Define this to a regular expression that matches
+# any filename:dd:match lines you want to ignore.
+# The default is to ignore no matches.
+ignore_undesirable_word_sequence_RE_ ?= ^$$
+
+sc_prohibit_undesirable_word_seq:
+ @$(VC_LIST_EXCEPT) \
+ | xargs perl -n -0777 $(prohibit_undesirable_word_seq_) \
+ | $(GREP) -vE '$(ignore_undesirable_word_sequence_RE_)' \
+ | $(GREP) . \
+ && { echo '$(ME): undesirable word sequence' >&2; exit 1; } \
+ || :
+
+# Except for shell files and for loops, double semicolon is probably a mistake
+sc_prohibit_double_semicolon:
+ @prohibit='; *;[ {} \]*(/[/*]|$$)' \
+ in_vc_files='\.[chly]$$' \
+ exclude='\bfor *\(.*\)' \
+ halt="Double semicolon detected" \
+ $(_sc_search_regexp)
+
+_ptm1 = use "test C1 && test C2", not "test C1 -''a C2"
+_ptm2 = use "test C1 || test C2", not "test C1 -''o C2"
+# Using test's -a and -o operators is not portable.
+# We prefer test over [, since the latter is spelled [[ in configure.ac.
+sc_prohibit_test_minus_ao:
+ @prohibit='(\<test| \[+) .+ -[ao] ' \
+ halt='$(_ptm1); $(_ptm2)' \
+ $(_sc_search_regexp)
+
+# Avoid a test bashism.
+sc_prohibit_test_double_equal:
+ @prohibit='(\<test| \[+) .+ == ' \
+ containing='#! */bin/[a-z]*sh' \
+ halt='use "test x = x", not "test x =''= x"' \
+ $(_sc_search_regexp)
+
+# Each program that uses proper_name_utf8 must link with one of the
+# ICONV libraries. Otherwise, some ICONV library must appear in LDADD.
+# The perl -0777 invocation below extracts the possibly-multi-line
+# definition of LDADD from the appropriate Makefile.am and exits 0
+# when it contains "ICONV".
+sc_proper_name_utf8_requires_ICONV:
+ @progs=$$($(VC_LIST_EXCEPT) \
+ | xargs $(GREP) -l 'proper_name_utf8 ''("'); \
+ if test "x$$progs" != x; then \
+ fail=0; \
+ for p in $$progs; do \
+ dir=$$(dirname "$$p"); \
+ perl -0777 \
+ -ne 'exit !(/^LDADD =(.+?[^\\]\n)/ms && $$1 =~ /ICONV/)' \
+ $$dir/Makefile.am && continue; \
+ base=$$(basename "$$p" .c); \
+ $(GREP) "$${base}_LDADD.*ICONV)" $$dir/Makefile.am > /dev/null \
+ || { fail=1; echo 1>&2 "$(ME): $$p uses proper_name_utf8"; }; \
+ done; \
+ test $$fail = 1 && \
+ { echo 1>&2 '$(ME): the above do not link with any ICONV library'; \
+ exit 1; } || :; \
+ fi
+
+# Warn about "c0nst struct Foo const foo[]",
+# but not about "char const *const foo" or "#define const const".
+sc_redundant_const:
+ @prohibit='\bconst\b[[:space:][:alnum:]]{2,}\bconst\b' \
+ halt='redundant "const" in declarations' \
+ $(_sc_search_regexp)
+
+sc_const_long_option:
+ @prohibit='^ *static.*struct option ' \
+ exclude='const struct option|struct option const' \
+ halt='add "const" to the above declarations' \
+ $(_sc_search_regexp)
+
+NEWS_hash = \
+ $$($(SED) -n '/^\*.* $(PREV_VERSION_REGEXP) ([0-9-]*)/,$$p' \
+ $(srcdir)/NEWS \
+ | perl -0777 -pe \
+ 's/^Copyright.+?Free\sSoftware\sFoundation,\sInc\.\n//ms' \
+ | md5sum - \
+ | $(SED) 's/ .*//')
+
+# Ensure that we don't accidentally insert an entry into an old NEWS block.
+sc_immutable_NEWS:
+ @if test -f $(srcdir)/NEWS; then \
+ test "$(NEWS_hash)" = '$(old_NEWS_hash)' && : || \
+ { echo '$(ME): you have modified old NEWS' 1>&2; exit 1; }; \
+ fi
+
+# Update the hash stored above. Do this after each release and
+# for any corrections to old entries.
+update-NEWS-hash: NEWS
+ perl -pi -e 's/^(old_NEWS_hash[ \t]+:?=[ \t]+).*/$${1}'"$(NEWS_hash)/" \
+ $(srcdir)/cfg.mk
+
+# Ensure that we use only the standard $(VAR) notation,
+# not @...@ in Makefile.am, now that we can rely on automake
+# to emit a definition for each substituted variable.
+# However, there is still one case in which @VAR\@ use is not just
+# legitimate, but actually required: when augmenting an automake-defined
+# variable with a prefix. For example, gettext uses this:
+# MAKEINFO = env LANG= LC_MESSAGES= LC_ALL= LANGUAGE= @MAKEINFO@
+# otherwise, makeinfo would put German or French (current locale)
+# navigation hints in the otherwise-English documentation.
+#
+# Allow the package to add exceptions via a hook in cfg.mk;
+# for example, @PRAGMA_SYSTEM_HEADER\@ can be permitted by
+# setting this to ' && !/PRAGMA_SYSTEM_HEADER/'.
+_makefile_at_at_check_exceptions ?=
+sc_makefile_at_at_check:
+ @perl -ne '/\@\w+\@/' \
+ -e ' && !/(\w+)\s+=.*\@\1\@$$/' \
+ -e ''$(_makefile_at_at_check_exceptions) \
+ -e 'and (print "$$ARGV:$$.: $$_"), $$m=1; END {exit !$$m}' \
+ $$($(VC_LIST_EXCEPT) | $(GREP) -E '(^|/)(Makefile\.am|[^/]+\.mk)$$') \
+ && { echo '$(ME): use $$(...), not @...@' 1>&2; exit 1; } || :
+
+news-check: NEWS
+ $(AM_V_GEN)if $(SED) -n $(news-check-lines-spec)p $< \
+ | $(GREP) -E $(news-check-regexp) >/dev/null; then \
+ :; \
+ else \
+ echo 'NEWS: $$(news-check-regexp) failed to match' 1>&2; \
+ exit 1; \
+ fi
+
+sc_makefile_TAB_only_indentation:
+ @prohibit='^ [ ]{8}' \
+ in_vc_files='akefile|\.mk$$' \
+ halt='found TAB-8-space indentation' \
+ $(_sc_search_regexp)
+
+sc_m4_quote_check:
+ @prohibit='(AC_DEFINE(_UNQUOTED)?|AC_DEFUN)\([^[]' \
+ in_vc_files='(^configure\.ac|\.m4)$$' \
+ halt='quote the first arg to AC_DEF*' \
+ $(_sc_search_regexp)
+
+fix_po_file_diag = \
+'you have changed the set of files with translatable diagnostics;\n\
+apply the above patch\n'
+
+# Generate a list of files in which to search for translatable strings.
+perl_translatable_files_list_ = \
+ -e 'foreach $$file (@ARGV) {' \
+ -e ' \# Consider only file extensions with one or two letters' \
+ -e ' $$file =~ /\...?$$/ or next;' \
+ -e ' \# Ignore m4 and mk files' \
+ -e ' $$file =~ /\.m[4k]$$/ and next;' \
+ -e ' \# Ignore a .c or .h file with a corresponding .l or .y file' \
+ -e ' $$file =~ /(.+)\.[ch]$$/ && (-e "$${1}.l" || -e "$${1}.y")' \
+ -e ' and next;' \
+ -e ' \# Skip unreadable files' \
+ -e ' -r $$file or next;' \
+ -e ' print "$$file ";' \
+ -e '}'
+
+# Verify that all source files using _() (more specifically, files that
+# match $(_gl_translatable_string_re)) are listed in po/POTFILES.in.
+po_file ?= $(srcdir)/po/POTFILES.in
+generated_files ?= $(srcdir)/lib/*.[ch]
+_gl_translatable_string_re ?= \b(N?_|gettext *)\([^)"]*("|$$)
+sc_po_check:
+ @if test -f $(po_file); then \
+ $(GREP) -E -v '^(#|$$)' $(po_file) \
+ | $(GREP) -v '^src/false\.c$$' | sort > $@-1; \
+ { $(VC_LIST_EXCEPT); echo $(generated_files); } \
+ | xargs perl $(perl_translatable_files_list_) \
+ | xargs $(GREP) -E -l '$(_gl_translatable_string_re)' \
+ | $(SED) 's|^$(_dot_escaped_srcdir)/||' \
+ | sort -u > $@-2; \
+ diff -u -L $(po_file) -L $(po_file) $@-1 $@-2 \
+ || { printf '$(ME): '$(fix_po_file_diag) 1>&2; exit 1; }; \
+ rm -f $@-1 $@-2; \
+ fi
+
+# Sometimes it is useful to change the PATH environment variable
+# in Makefiles. When doing so, it's better not to use the Unix-centric
+# path separator of ':', but rather the automake-provided '$(PATH_SEPARATOR)'.
+msg = 'Do not use ":" above; use $$(PATH_SEPARATOR) instead'
+sc_makefile_path_separator_check:
+ @prohibit='PATH[=].*:' \
+ in_vc_files='akefile|\.mk$$' \
+ halt=$(msg) \
+ $(_sc_search_regexp)
+
+sc_makefile_DISTCHECK_CONFIGURE_FLAGS:
+ @prohibit='^DISTCHECK_CONFIGURE_FLAGS' \
+ in_vc_files='akefile|\.mk$$' \
+ halt="use AM_DISTCHECK_CONFIGURE_FLAGS" \
+ $(_sc_search_regexp)
+
+# Check that 'make alpha' will not fail at the end of the process,
+# i.e., when pkg-M.N.tar.xz already exists (either in "." or in ../release)
+# and is read-only.
+writable-files:
+ $(AM_V_GEN)if test -d $(release_archive_dir); then \
+ for file in $(DIST_ARCHIVES); do \
+ for p in ./ $(release_archive_dir)/; do \
+ test -e $$p$$file || continue; \
+ test -w $$p$$file \
+ || { echo ERROR: $$p$$file is not writable; fail=1; }; \
+ done; \
+ done; \
+ test "$$fail" && exit 1 || : ; \
+ else :; \
+ fi
+
+v_etc_file = $(gnulib_dir)/lib/version-etc.c
+sample-test = tests/sample-test
+texi = doc/$(PACKAGE).texi
+# Make sure that the copyright date in $(v_etc_file) is up to date.
+# Do the same for the $(sample-test) and the main doc/.texi file.
+sc_copyright_check:
+ @require='enum { COPYRIGHT_YEAR = '$$(date +%Y)' };' \
+ in_files=$(v_etc_file) \
+ halt='out of date copyright in $(v_etc_file); update it' \
+ $(_sc_search_regexp)
+ @require='# Copyright \(C\) '$$(date +%Y)' Free' \
+ in_vc_files=$(sample-test) \
+ halt='out of date copyright in $(sample-test); update it' \
+ $(_sc_search_regexp)
+ @require='Copyright @copyright\{} .*'$$(date +%Y) \
+ in_vc_files=$(texi) \
+ halt='out of date copyright in $(texi); update it' \
+ $(_sc_search_regexp)
+
+# If tests/help-version exists and seems to be new enough, assume that its
+# use of init.sh and path_prepend_ is correct, and ensure that every other
+# use of init.sh is identical.
+# This is useful because help-version cross-checks prog --version
+# with $(VERSION), which verifies that its path_prepend_ invocation
+# sets PATH correctly. This is an inexpensive way to ensure that
+# the other init.sh-using tests also get it right.
+_hv_file ?= $(srcdir)/tests/help-version
+_hv_regex_weak ?= ^ *\. .*/init\.sh"
+# Fix syntax-highlighters "
+_hv_regex_strong ?= ^ *\. "\$${srcdir=\.}/init\.sh"
+sc_cross_check_PATH_usage_in_tests:
+ @if test -f $(_hv_file); then \
+ $(GREP) -l 'VERSION mismatch' $(_hv_file) >/dev/null \
+ || { echo "$@: skipped: no such file: $(_hv_file)" 1>&2; \
+ exit 0; }; \
+ $(GREP) -lE '$(_hv_regex_strong)' $(_hv_file) >/dev/null \
+ || { echo "$@: $(_hv_file) lacks conforming use of init.sh" 1>&2; \
+ exit 1; }; \
+ good=$$($(GREP) -E '$(_hv_regex_strong)' $(_hv_file)); \
+ $(VC_LIST_EXCEPT) \
+ | xargs $(GREP) -lE '$(_hv_regex_weak)' \
+ | xargs $(GREP) -LFx "$$good" \
+ | $(GREP) . \
+ && { printf "$(ME): the above files use" \
+ " path_prepend_ inconsistently\n" 1>&2; \
+ exit 1; } \
+ || :; \
+ fi
+
+# BRE regex of file contents to identify a test script.
+_test_script_regex ?= \<init\.sh\>
+
+# In tests, use "compare expected actual", not the reverse.
+sc_prohibit_reversed_compare_failure:
+ @prohibit='\<compare [^ ]+ ([^ ]*exp|/dev/null)' \
+ containing='$(_test_script_regex)' \
+ halt='reversed compare arguments' \
+ $(_sc_search_regexp)
+
+# #if HAVE_... will evaluate to false for any non numeric string.
+# That would be flagged by using -Wundef, however gnulib currently
+# tests many undefined macros, and so we can't enable that option.
+# So at least preclude common boolean strings as macro values.
+sc_Wundef_boolean:
+ @prohibit='^#define.*(yes|no|true|false)$$' \
+ in_files='$(CONFIG_INCLUDE)' \
+ halt='Use 0 or 1 for macro values' \
+ $(_sc_search_regexp)
+
+# Even if you use pathmax.h to guarantee that PATH_MAX is defined, it might
+# not be constant, or might overflow a stack. In general, use PATH_MAX as
+# a limit, not an array or alloca size.
+sc_prohibit_path_max_allocation:
+ @prohibit='(\balloca *\([^)]*|\[[^]]*)\bPATH_MAX' \
+ halt='Avoid stack allocations of size PATH_MAX' \
+ $(_sc_search_regexp)
+
+sc_vulnerable_makefile_CVE-2009-4029:
+ @prohibit='perm -777 -exec chmod a\+rwx|chmod 777 \$$\(distdir\)' \
+ in_files='(^|/)Makefile\.in$$' \
+ halt=$$(printf '%s\n' \
+ 'the above files are vulnerable; beware of running' \
+ ' "make dist*" rules, and upgrade to fixed automake' \
+ ' see https://bugzilla.redhat.com/show_bug.cgi?id=542609 for details') \
+ $(_sc_search_regexp)
+
+sc_vulnerable_makefile_CVE-2012-3386:
+ @prohibit='chmod a\+w \$$\(distdir\)' \
+ in_files='(^|/)Makefile\.in$$' \
+ halt=$$(printf '%s\n' \
+ 'the above files are vulnerable; beware of running' \
+ ' "make distcheck", and upgrade to fixed automake' \
+ ' see https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2012-3386 for details') \
+ $(_sc_search_regexp)
+
+sc_unportable_grep_q:
+ @prohibit='grep ''-q' halt="unportable 'grep \-q', use >/dev/null instead" \
+ $(_sc_search_regexp)
+
+# The GNU Coding standards say that README should refer to both
+# INSTALL and the file that contains the copying conditions. This
+# shall be COPYING for GPL and COPYING.LESSER for LGPL.
+
+sc_readme_link_install:
+ @require='INSTALL' \
+ in_vc_files='$(top_srcdir)/README$$' \
+ halt='The README file should refer to INSTALL' \
+ $(_sc_search_regexp)
+
+sc_readme_link_copying:
+ @require='COPYING' \
+ in_vc_files='$(top_srcdir)/README$$' \
+ halt='The README file should refer to COPYING[.LESSER]' \
+ $(_sc_search_regexp)
+
+vc-diff-check:
+ $(AM_V_GEN)(unset CDPATH; cd $(srcdir) && $(VC) diff) > vc-diffs || :
+ $(AM_V_at)if test -s vc-diffs; then \
+ cat vc-diffs; \
+ echo "Some files are locally modified:" 1>&2; \
+ exit 1; \
+ else \
+ rm vc-diffs; \
+ fi
+
+rel-files = $(DIST_ARCHIVES)
+
+gnulib-version = $$(cd $(gnulib_dir) \
+ && { git describe || git rev-parse --short=10 HEAD; } )
+bootstrap-tools ?= autoconf,automake,gnulib
+
+gpgv = $$(gpgv2 --version >/dev/null && echo gpgv2 || echo gpgv)
+# If it's not already specified, derive the GPG key ID from
+# the signed tag we've just applied to mark this release.
+gpg_key_ID ?= \
+ $$(cd $(srcdir) \
+ && git cat-file tag v$(VERSION) \
+ | $(gpgv) --status-fd 1 --keyring /dev/null - - 2>/dev/null \
+ | $(AWK) '/^\[GNUPG:] ERRSIG / {print $$3; exit}')
+gpg_key_email ?= \
+ $$(gpg --list-key --with-colons $(gpg_key_ID) 2>/dev/null \
+ | $(AWK) -F: '/^uid/ {print $$10; exit}' \
+ | $(SED) -n 's/.*<\(.*\)>/\1/p')
+gpg_keyring_url ?= https://savannah.gnu.org/project/release-gpgkeys.php?group=$(PACKAGE)&download=1
+
+translation_project_ ?= coordinator@translationproject.org
+
+# Make info-gnu the default only for a stable release.
+announcement_Cc_stable = $(translation_project_), $(PACKAGE_BUGREPORT)
+announcement_mail_headers_stable = \
+ To: info-gnu@gnu.org \
+ Cc: $(announcement_Cc_) \
+ Mail-Followup-To: $(PACKAGE_BUGREPORT)
+
+announcement_Cc_alpha = $(translation_project_)
+announcement_mail_headers_alpha = \
+ To: $(PACKAGE_BUGREPORT) \
+ Cc: $(announcement_Cc_)
+
+announcement_mail_Cc_beta = $(announcement_mail_Cc_alpha)
+announcement_mail_headers_beta = $(announcement_mail_headers_alpha)
+
+announcement_Cc_ ?= $(announcement_Cc_$(release-type))
+announcement_mail_headers_ ?= $(announcement_mail_headers_$(release-type))
+announcement: NEWS ChangeLog $(rel-files)
+# Not $(AM_V_GEN) since the output of this command serves as
+# announcement message: it would start with " GEN announcement".
+ $(AM_V_at)$(srcdir)/$(_build-aux)/announce-gen \
+ --mail-headers='$(announcement_mail_headers_)' \
+ --release-type=$(release-type) \
+ --package=$(PACKAGE) \
+ --prev=$(PREV_VERSION) \
+ --curr=$(VERSION) \
+ --gpg-key-id=$(gpg_key_ID) \
+ $$(test -n "$(gpg_key_email)" && \
+ echo --gpg-key-email="$(gpg_key_email)") \
+ $$(test -n "$(gpg_keyring_url)" && \
+ echo --gpg-keyring-url="$(gpg_keyring_url)") \
+ --srcdir=$(srcdir) \
+ --news=$(srcdir)/NEWS \
+ --bootstrap-tools=$(bootstrap-tools) \
+ $$(case ,$(bootstrap-tools), in (*,gnulib,*) \
+ echo --gnulib-version=$(gnulib-version);; esac) \
+ $(addprefix --url-dir=, $(url_dir_list))
+
+.PHONY: release-commit
+release-commit:
+ $(AM_V_GEN)cd $(srcdir) \
+ && $(_build-aux)/do-release-commit-and-tag \
+ -C $(abs_builddir) $(RELEASE)
+
+## ---------------- ##
+## Updating files. ##
+## ---------------- ##
+
+ftp-gnu = https://ftp.gnu.org/gnu
+www-gnu = https://www.gnu.org
+
+upload_dest_dir_ ?= $(PACKAGE)
+upload_command = \
+ $(srcdir)/$(_build-aux)/gnupload $(GNUPLOADFLAGS) \
+ --to $(gnu_rel_host):$(upload_dest_dir_) \
+ $(rel-files)
+emit_upload_commands:
+ @echo =====================================
+ @echo =====================================
+ @echo '$(upload_command)'
+ @echo '# send the ~/announce-$(my_distdir) e-mail'
+ @echo =====================================
+ @echo =====================================
+
+.PHONY: upload
+upload:
+ $(AM_V_GEN)$(upload_command)
+
+define emit-commit-log
+ printf '%s\n' 'maint: post-release administrivia' '' \
+ '* NEWS: Add header line for next release.' \
+ '* .prev-version: Record previous version.' \
+ '* cfg.mk (old_NEWS_hash): Auto-update.'
+endef
+
+.PHONY: no-submodule-changes
+no-submodule-changes:
+ $(AM_V_GEN)if test -d $(srcdir)/.git \
+ && git --version >/dev/null 2>&1; then \
+ diff=$$(cd $(srcdir) && git submodule -q foreach \
+ git diff-index --name-only HEAD) \
+ || exit 1; \
+ case $$diff in '') ;; \
+ *) echo '$(ME): submodule files are locally modified:'; \
+ echo "$$diff"; exit 1;; esac; \
+ else \
+ : ; \
+ fi
+
+submodule-checks ?= no-submodule-changes public-submodule-commit
+
+# Ensure that each sub-module commit we're using is public.
+# Without this, it is too easy to tag and release code that
+# cannot be built from a fresh clone.
+.PHONY: public-submodule-commit
+public-submodule-commit:
+ $(AM_V_GEN)if test -d $(srcdir)/.git \
+ && git --version >/dev/null 2>&1; then \
+ cd $(srcdir) && \
+ git submodule --quiet foreach \
+ 'test "$$(git rev-parse "$$sha1")" \
+ = "$$(git merge-base origin "$$sha1")"' \
+ || { echo '$(ME): found non-public submodule commit' >&2; \
+ exit 1; }; \
+ else \
+ : ; \
+ fi
+# This rule has a high enough utility/cost ratio that it should be a
+# dependent of "check" by default. However, some of us do occasionally
+# commit a temporary change that deliberately points to a non-public
+# submodule commit, and want to be able to use rules like "make check".
+# In that case, run e.g., "make check gl_public_submodule_commit="
+# to disable this test.
+gl_public_submodule_commit ?= public-submodule-commit
+check: $(gl_public_submodule_commit)
+
+.PHONY: alpha beta stable release
+ALL_RECURSIVE_TARGETS += alpha beta stable
+alpha beta stable: $(local-check) writable-files $(submodule-checks)
+ $(AM_V_GEN)test $@ = stable \
+ && { echo $(VERSION) | $(GREP) -E '^[0-9]+(\.[0-9]+)+$$' \
+ || { echo "invalid version string: $(VERSION)" 1>&2; exit 1;};}\
+ || :
+ $(AM_V_at)$(MAKE) vc-diff-check
+ $(AM_V_at)$(MAKE) news-check
+ $(AM_V_at)$(MAKE) distcheck
+ $(AM_V_at)$(MAKE) dist
+ $(AM_V_at)$(MAKE) $(release-prep-hook) RELEASE_TYPE=$@
+ $(AM_V_at)$(MAKE) -s emit_upload_commands RELEASE_TYPE=$@
+
+release:
+ $(AM_V_GEN)$(MAKE) _version
+ $(AM_V_at)$(MAKE) $(release-type)
+
+# Override this in cfg.mk if you follow different procedures.
+release-prep-hook ?= release-prep
+
+gl_noteworthy_news_ = * Noteworthy changes in release ?.? (????-??-??) [?]
+.PHONY: release-prep
+release-prep:
+ $(AM_V_GEN)$(MAKE) --no-print-directory -s announcement \
+ > ~/announce-$(my_distdir)
+ $(AM_V_at)if test -d $(release_archive_dir); then \
+ ln $(rel-files) $(release_archive_dir); \
+ chmod a-w $(rel-files); \
+ fi
+ $(AM_V_at)echo $(VERSION) > $(prev_version_file)
+ $(AM_V_at)$(MAKE) update-NEWS-hash
+ $(AM_V_at)perl -pi \
+ -e '$$. == 3 and print "$(gl_noteworthy_news_)\n\n\n"' \
+ $(srcdir)/NEWS
+ $(AM_V_at)msg=$$($(emit-commit-log)) || exit 1; \
+ cd $(srcdir) && $(VC) commit -m "$$msg" -a
+
+# Override this with e.g., -s $(srcdir)/some_other_name.texi
+# if the default $(PACKAGE)-derived name doesn't apply.
+gendocs_options_ ?=
+
+.PHONY: web-manual
+web-manual:
+ $(AM_V_GEN)test -z "$(manual_title)" \
+ && { echo define manual_title in cfg.mk 1>&2; exit 1; } || :
+ $(AM_V_at)cd '$(srcdir)/doc'; \
+ $(SHELL) ../$(_build-aux)/gendocs.sh $(gendocs_options_) \
+ -o '$(abs_builddir)/doc/manual' \
+ --email $(PACKAGE_BUGREPORT) $(PACKAGE) \
+ "$(PACKAGE_NAME) - $(manual_title)"
+ $(AM_V_at)echo " *** Upload the doc/manual directory to web-cvs."
+
+.PHONY: web-manual-update
+web-manual-update:
+ $(AM_V_GEN)cd $(srcdir) \
+ && $(_build-aux)/gnu-web-doc-update -C $(abs_builddir)
+
+
+# Code Coverage
+
+init-coverage:
+ $(MAKE) $(AM_MAKEFLAGS) clean
+ lcov --directory . --zerocounters
+
+COVERAGE_CCOPTS ?= "-g --coverage"
+COVERAGE_OUT ?= doc/coverage
+
+build-coverage:
+ $(MAKE) $(AM_MAKEFLAGS) CFLAGS=$(COVERAGE_CCOPTS) CXXFLAGS=$(COVERAGE_CCOPTS)
+ $(MAKE) $(AM_MAKEFLAGS) CFLAGS=$(COVERAGE_CCOPTS) CXXFLAGS=$(COVERAGE_CCOPTS) check
+ mkdir -p $(COVERAGE_OUT)
+ lcov --directory . --output-file $(COVERAGE_OUT)/$(PACKAGE).info \
+ --capture
+
+gen-coverage:
+ genhtml --output-directory $(COVERAGE_OUT) \
+ $(COVERAGE_OUT)/$(PACKAGE).info \
+ --highlight --frames --legend \
+ --title "$(PACKAGE_NAME)"
+
+coverage:
+ $(MAKE) init-coverage
+ $(MAKE) build-coverage
+ $(MAKE) gen-coverage
+
+# Some projects carry local adjustments for gnulib modules via patches in
+# a gnulib patch directory whose default name is gl/ (defined in bootstrap
+# via local_gl_dir=gl). Those patches become stale as the originals evolve
+# in gnulib. Use this rule to refresh any stale patches. It applies each
+# patch to the original in $(gnulib_dir) and uses the temporary result to
+# generate a fuzz-free .diff file. If you customize the name of your local
+# gnulib patch directory via bootstrap.conf, this rule detects that name.
+# Run this from a non-VPATH (i.e., srcdir) build directory.
+.PHONY: refresh-gnulib-patches
+refresh-gnulib-patches:
+ gl=gl; \
+ if test -f bootstrap.conf; then \
+ t=$$(perl -lne '/^\s*local_gl_dir=(\S+)/ and $$d=$$1;' \
+ -e 'END{defined $$d and print $$d}' bootstrap.conf); \
+ test -n "$$t" && gl=$$t; \
+ fi; \
+ for diff in $$(cd $$gl; git ls-files | $(GREP) '\.diff$$'); do \
+ b=$$(printf %s "$$diff"|$(SED) 's/\.diff$$//'); \
+ VERSION_CONTROL=none \
+ patch "$(gnulib_dir)/$$b" "$$gl/$$diff" || exit 1; \
+ ( cd $(gnulib_dir) || exit 1; \
+ git diff "$$b" > "../$$gl/$$diff"; \
+ git checkout $$b ) || exit 1; \
+ done
+
+# Update gettext files.
+PACKAGE ?= $(shell basename $(PWD))
+PO_DOMAIN ?= $(PACKAGE)
+POURL = https://translationproject.org/latest/$(PO_DOMAIN)/
+PODIR ?= po
+refresh-po:
+ rm -f $(PODIR)/*.po && \
+ echo "$(ME): getting translations into po (please ignore the robots.txt ERROR 404)..." && \
+ wget --no-verbose --directory-prefix $(PODIR) --no-directories --recursive --level 1 --accept .po --accept .po.1 $(POURL) && \
+ echo 'en@boldquot' > $(PODIR)/LINGUAS && \
+ echo 'en@quot' >> $(PODIR)/LINGUAS && \
+ ls $(PODIR)/*.po | $(SED) 's/\.po//;s,$(PODIR)/,,' | \
+ sort >> $(PODIR)/LINGUAS
+
+# Indentation
+
+indent_args ?= -ppi 1
+C_SOURCES ?= $$($(VC_LIST_EXCEPT) | grep '\.[ch]\(.in\)\?$$')
+INDENT_SOURCES ?= $(C_SOURCES)
+exclude_file_name_regexp--indent ?= $(exclude_file_name_regexp--sc_indent)
+
+.PHONY: indent
+indent: # Running indent once is not idempotent, but running it twice is.
+ $(AM_V_GEN)indent $(indent_args) $(INDENT_SOURCES) && \
+ indent $(indent_args) $(INDENT_SOURCES)
+
+sc_indent:
+ @if ! indent --version 2> /dev/null | grep 'GNU indent' > /dev/null; then \
+ echo 1>&2 '$(ME): sc_indent: GNU indent is missing'; \
+ else \
+ fail=0; files="$(INDENT_SOURCES)"; \
+ for f in $$files; do \
+ indent $(indent_args) -st $$f \
+ | indent $(indent_args) -st - \
+ | diff -u $$f - || fail=1; \
+ done; \
+ test $$fail = 1 && \
+ { echo 1>&2 '$(ME): code format error, try "make indent"'; \
+ exit 1; } || :; \
+ fi
+
+# If you want to set UPDATE_COPYRIGHT_* environment variables,
+# put the assignments in this variable.
+update-copyright-env ?=
+
+# Run this rule once per year (usually early in January)
+# to update all FSF copyright year lists in your project.
+# If you have an additional project-specific rule,
+# add it in cfg.mk along with a line 'update-copyright: prereq'.
+# By default, exclude all variants of COPYING; you can also
+# add exemptions (such as ChangeLog..* for rotated change logs)
+# in the file .x-update-copyright.
+.PHONY: update-copyright
+update-copyright:
+ $(AM_V_GEN)$(GREP) -l -w Copyright \
+ $$(export VC_LIST_EXCEPT_DEFAULT=COPYING && $(VC_LIST_EXCEPT)) \
+ | $(update-copyright-env) xargs $(srcdir)/$(_build-aux)/$@
+
+# This tight_scope test is skipped with a warning if $(_gl_TS_headers) is not
+# overridden and $(_gl_TS_dir)/Makefile.am does not mention noinst_HEADERS.
+
+# NOTE: to override any _gl_TS_* default value, you must
+# define the variable(s) using "export" in cfg.mk.
+_gl_TS_dir ?= src
+
+ALL_RECURSIVE_TARGETS += sc_tight_scope
+sc_tight_scope: tight-scope.mk
+ @fail=0; \
+ if ! $(GREP) '^ *export _gl_TS_headers *=' $(srcdir)/cfg.mk \
+ > /dev/null \
+ && ! $(GREP) -w noinst_HEADERS $(srcdir)/$(_gl_TS_dir)/Makefile.am \
+ > /dev/null 2>&1; then \
+ echo '$(ME): skipping $@'; \
+ else \
+ $(MAKE) -s -C $(_gl_TS_dir) \
+ -f Makefile \
+ -f $(abs_top_srcdir)/cfg.mk \
+ -f $(abs_top_builddir)/$< \
+ _gl_tight_scope \
+ || fail=1; \
+ fi; \
+ rm -f $<; \
+ exit $$fail
+
+tight-scope.mk: $(ME)
+ @perl -ne '/^# TS-start/.../^# TS-end/ and print' $(srcdir)/$(ME) > $@-t
+ @mv $@-t $@
+
+ifeq (a,b)
+# TS-start
+
+# Most functions should have static scope.
+# Any that don't must be marked with 'extern', but 'main'
+# and 'usage' are exceptions: they're always extern, but
+# do not need to be marked. Symbols matching '__.*' are
+# reserved by the compiler, so are automatically excluded below.
+_gl_TS_unmarked_extern_functions ?= main usage
+_gl_TS_function_match ?= /^(?:$(_gl_TS_extern)) +.*?(\w+) *\(/
+
+# If your project uses a macro like "XTERN", then put
+# the following in cfg.mk to override this default:
+# export _gl_TS_extern = extern|XTERN
+_gl_TS_extern ?= extern
+
+# The second nm|grep checks for file-scope variables with 'extern' scope.
+# Without gnulib's progname module, you might put program_name here.
+# Symbols matching '__.*' are reserved by the compiler,
+# so are automatically excluded below.
+_gl_TS_unmarked_extern_vars ?=
+
+# NOTE: the _match variables are perl expressions -- not mere regular
+# expressions -- so that you can extend them to match other patterns
+# and easily extract matched variable names.
+# For example, if your project declares some global variables via
+# a macro like this: GLOBAL(type, var_name, initializer), then you
+# can override this definition to automatically extract those names:
+# export _gl_TS_var_match = \
+# /^(?:$(_gl_TS_extern)) .*?\**(\w+)(\[.*?])?;/ || /\bGLOBAL\(.*?,\s*(.*?),/
+_gl_TS_var_match ?= /^(?:$(_gl_TS_extern)) .*?(\w+)(\[.*?])?;/
+
+# The names of object files in (or relative to) $(_gl_TS_dir).
+_gl_TS_obj_files ?= *.$(OBJEXT)
+
+# Files in which to search for the one-line style extern declarations.
+# $(_gl_TS_dir)-relative.
+_gl_TS_headers ?= $(noinst_HEADERS)
+_gl_TS_other_headers ?= *.h
+
+.PHONY: _gl_tight_scope
+_gl_tight_scope: $(bin_PROGRAMS)
+ sed_wrap='s/^/^_?/;s/$$/$$/'; \
+ t=exceptions-$$$$; \
+ trap 's=$$?; rm -f $$t; exit $$s' 0; \
+ for sig in 1 2 3 13 15; do \
+ eval "trap 'v=`expr $$sig + 128`; (exit $$v); exit $$v' $$sig"; \
+ done; \
+ src=`for f in $(SOURCES); do \
+ test -f $$f && d= || d=$(srcdir)/; echo $$d$$f; done`; \
+ hdr=`for f in $(_gl_TS_headers); do \
+ test -f $$f && d= || d=$(srcdir)/; echo $$d$$f; done`; \
+ ( printf '%s\n' '__.*' $(_gl_TS_unmarked_extern_functions); \
+ $(GREP) -h -A1 '^extern .*[^;]$$' $$src \
+ | $(GREP) -vE '^(extern |--|#)' | $(SED) 's/ .*//; /^$$/d'; \
+ perl -lne \
+ '$(_gl_TS_function_match) and print $$1' $$hdr; \
+ ) | sort -u | $(SED) "$$sed_wrap" > $$t; \
+ nm -g $(_gl_TS_obj_files)|$(SED) -n 's/.* T //p'|$(GREP) -Ev -f $$t \
+ && { echo the above functions should have static scope >&2; \
+ exit 1; } || : ; \
+ ( printf '%s\n' '__.*' main $(_gl_TS_unmarked_extern_vars); \
+ perl -lne '$(_gl_TS_var_match) and print $$1' \
+ $$hdr $(_gl_TS_other_headers) \
+ ) | sort -u | $(SED) "$$sed_wrap" > $$t; \
+ nm -g $(_gl_TS_obj_files) | $(SED) -n 's/.* [BCDGRS] //p' \
+ | sort -u | $(GREP) -Ev -f $$t \
+ && { echo the above variables should have static scope >&2; \
+ exit 1; } || :
+# TS-end
+endif
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 0000000..4693fec
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,8 @@
+check_PROGRAMS = %D%/json-lexer
+
+%C%_json_lexer_SOURCES = \
+ %D%/json-lexer.c \
+ %D%/liballoc/allocator.h \
+ %D%/libjson/neoas-json-lexer.h \
+ %D%/libjson/neoas-json-parser.h \
+ %D%/libjson/neoas-json-lexer-and-parser.h
diff --git a/src/json-lexer.c b/src/json-lexer.c
new file mode 100644
index 0000000..4357d1a
--- /dev/null
+++ b/src/json-lexer.c
@@ -0,0 +1,282 @@
+#include <config.h>
+#include "libjson/neoas-json-lexer-and-parser.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <stdbool.h>
+
+static inline void *checked_alloc (void *, size_t n, size_t type);
+static inline void *checked_realloc (void *, void *allocated, size_t n,
+ size_t type);
+static inline void checked_free (void *, void *memory);
+static inline void assert_everything_freed (void *);
+
+#define MAX_ALLOCS 64
+struct alloc_registry
+{
+ size_t n_allocated;
+ void *addresses[MAX_ALLOCS];
+ size_t sizes[MAX_ALLOCS];
+};
+
+static inline void *
+checked_alloc (void *ctx, size_t n, size_t type)
+{
+ struct alloc_registry *registry = ctx;
+ if (registry->n_allocated >= MAX_ALLOCS)
+ {
+ return NULL;
+ }
+ if (n * type >= 8192)
+ {
+ return NULL;
+ }
+ void *mem = calloc (n, type);
+ if (mem == NULL)
+ {
+ return NULL;
+ }
+ registry->addresses[registry->n_allocated] = mem;
+ registry->sizes[registry->n_allocated] = n * type;
+ registry->n_allocated++;
+ return mem;
+}
+
+static inline void *
+checked_realloc (void *ctx, void *mem, size_t n, size_t type)
+{
+ struct alloc_registry *registry = ctx;
+ for (size_t i = 0; i < registry->n_allocated; i++)
+ {
+ if (registry->addresses[i] == mem)
+ {
+ void *newmem = realloc (mem, n * type);
+ if (newmem == NULL)
+ {
+ return NULL;
+ }
+ registry->addresses[i] = newmem;
+ registry->sizes[i] = n * type;
+ return newmem;
+ }
+ }
+ assert (false);
+}
+
+static inline void
+checked_free (void *ctx, void *mem)
+{
+ struct alloc_registry *registry = ctx;
+ if (mem == NULL)
+ {
+ return;
+ }
+ for (size_t i = 0; i < registry->n_allocated; i++)
+ {
+ if (registry->addresses[i] == mem)
+ {
+ free (registry->addresses[i]);
+ registry->n_allocated--;
+ registry->addresses[i] = registry->addresses[registry->n_allocated];
+ registry->sizes[i] = registry->sizes[registry->n_allocated];
+ return;
+ }
+ }
+ assert (false);
+}
+
+static inline void
+assert_everything_freed (void *ctx)
+{
+ struct alloc_registry *registry = ctx;
+ assert (registry->n_allocated == 0);
+}
+
+static inline int
+handle_null (void *ctx)
+{
+ assert (ctx == NULL);
+ printf ("Null\n");
+ return 0;
+}
+
+static inline int
+handle_bool (void *ctx, bool value)
+{
+ assert (ctx == NULL);
+ printf ("Boolean: %c\n", value ? 'T' : 'F');
+ return 0;
+}
+
+static inline int
+handle_number (void *ctx, double value)
+{
+ assert (ctx == NULL);
+ printf ("Number: %g\n", value);
+ return 0;
+}
+
+static inline int
+handle_string (void *ctx, size_t len, const char *str)
+{
+ assert (ctx == NULL);
+ printf ("String (%lu): ", len);
+ for (size_t i = 0; i < len; i++)
+ {
+ if (str[i] < 32 || str[i] == '\r' || str[i] == '\n')
+ {
+ printf ("\\%d", (int) ((uint8_t) str[i]));
+ }
+ else
+ {
+ printf ("%c", str[i]);
+ }
+ }
+ printf ("\n");
+ return 0;
+}
+
+static inline int
+handle_object_start (void *ctx, void **user_data)
+{
+ assert (ctx == NULL);
+ *user_data = NULL;
+ printf ("Open object\n");
+ return 0;
+}
+
+static inline int
+object_next_key (void *ctx, void *user_data, size_t len, const char *key)
+{
+ assert (ctx == NULL);
+ assert (user_data == NULL);
+ printf ("Switch to key (%lu): ", len);
+ for (size_t i = 0; i < len; i++)
+ {
+ if (key[i] < 32 || key[i] == '\r' || key[i] == '\n')
+ {
+ printf ("\\%d", (int) ((uint8_t) key[i]));
+ }
+ else
+ {
+ printf ("%c", key[i]);
+ }
+ }
+ printf ("\n");
+ return 0;
+}
+
+static inline int
+handle_object_stop (void *ctx, void *user_data)
+{
+ assert (ctx == NULL);
+ assert (user_data == NULL);
+ printf ("Close object\n");
+ return 0;
+}
+
+static inline int
+handle_array_start (void *ctx, void **user_data)
+{
+ assert (ctx == NULL);
+ *user_data = NULL;
+ printf ("Open array\n");
+ return 0;
+}
+
+static inline int
+array_next_key (void *ctx, void *user_data, size_t index)
+{
+ assert (ctx == NULL);
+ assert (user_data == NULL);
+ printf ("Switch to array key (%lu)\n", index);
+ return 0;
+}
+
+static inline int
+handle_array_stop (void *ctx, void *user_data)
+{
+ assert (ctx == NULL);
+ assert (user_data == NULL);
+ printf ("Close array\n");
+ return 0;
+}
+
+static inline int
+handle_next (void *ctx)
+{
+ assert (ctx == NULL);
+ printf ("Next\n");
+ return 0;
+}
+
+static inline int
+handle_syntax_error (void *ctx)
+{
+ assert (ctx == NULL);
+ printf ("Syntax error\n");
+ return 0;
+}
+
+static inline int
+handle_grammar_error (void *ctx)
+{
+ assert (ctx == NULL);
+ printf ("Grammar error\n");
+ return 0;
+}
+
+static inline int
+handle_rejected_input (void *ctx, char c)
+{
+ assert (ctx == NULL);
+ printf ("Rejected input: %c\n", c);
+ return 0;
+}
+
+static inline ssize_t
+do_pull (void *ctx, size_t request, void *data)
+{
+ assert (ctx == NULL);
+ assert (request >= 1);
+ int c = getchar ();
+ if (c >= 0)
+ {
+ uint8_t b = c;
+ uint8_t *dst = data;
+ *dst = b;
+ return 1;
+ }
+ return 0;
+}
+
+int
+main (int argc, char *argv[])
+{
+ /* This does not call bindtextdomain(...) because it is not a
+ user-facing program. */
+ (void) argc;
+ (void) argv;
+ struct alloc_registry registry = {
+ .n_allocated = 0
+ };
+ struct memory_allocator allocator = {
+ .allocate = checked_alloc,
+ .reallocate = checked_realloc,
+ .deallocate = checked_free,
+ .context = &registry
+ };
+ int error = json_lexer_and_parser_lex (&allocator, do_pull, NULL,
+ handle_null, handle_bool,
+ handle_number, handle_string,
+ handle_object_start, object_next_key,
+ handle_object_stop,
+ handle_array_start, array_next_key,
+ handle_array_stop,
+ handle_syntax_error,
+ handle_grammar_error,
+ handle_rejected_input, NULL);
+ assert_everything_freed (&registry);
+ return error;
+}
diff --git a/src/liballoc/allocator.h b/src/liballoc/allocator.h
new file mode 100644
index 0000000..48be179
--- /dev/null
+++ b/src/liballoc/allocator.h
@@ -0,0 +1,141 @@
+#ifndef NEOAS_ALLOCATOR_H_INCLUDED
+# define NEOAS_ALLOCATOR_H_INCLUDED
+
+struct memory_allocator
+{
+ void *(*allocate) (void *, size_t, size_t);
+ void *(*reallocate) (void *, void *, size_t, size_t);
+ void (*deallocate) (void *, void *);
+ void *context;
+};
+
+static inline const struct memory_allocator *default_memory_allocator (void);
+
+static inline int safe_allocate (const struct memory_allocator *allocator,
+ size_t n, size_t size, void **allocated);
+
+static inline int safe_reallocate (const struct memory_allocator *allocator,
+ void **memory, size_t n, size_t size);
+
+static inline void safe_deallocate (const struct memory_allocator *allocator,
+ void **allocated);
+
+static inline void copy_allocator (struct memory_allocator *dest,
+ const struct memory_allocator *src);
+
+# include <stdlib.h>
+
+# define ALLOCATE(ptr, n) \
+ (safe_allocate (allocator, n, sizeof (*ptr), (void **) (&ptr)))
+
+# define REALLOCATE(ptr, n) \
+ (safe_reallocate (allocator, (void **) (&ptr), n, sizeof (*ptr)))
+
+# define DEALLOCATE(ptr) \
+ (safe_deallocate (allocator, (void **) (&ptr)))
+
+static void *
+default_allocate (void *unused, size_t n, size_t type)
+{
+ (void) unused;
+ return calloc (n, type);
+}
+
+static void *
+default_reallocate (void *unused, void *mem, size_t n, size_t type)
+{
+ (void) unused;
+ return realloc (mem, n * type);
+}
+
+static void
+default_deallocate (void *unused, void *mem)
+{
+ (void) unused;
+ free (mem);
+}
+
+static inline const struct memory_allocator *
+default_memory_allocator (void)
+{
+ static const struct memory_allocator alloc = {
+ .allocate = default_allocate,
+ .reallocate = default_reallocate,
+ .deallocate = default_deallocate,
+ .context = NULL
+ };
+ return &alloc;
+}
+
+static inline int
+safe_allocate (const struct memory_allocator *allocator,
+ size_t n, size_t size, void **allocated)
+{
+ if (allocator == NULL)
+ {
+ allocator = default_memory_allocator ();
+ }
+ *allocated = allocator->allocate ((void *) allocator->context, n, size);
+ if (*allocated == NULL)
+ {
+ return -1;
+ }
+ return 0;
+}
+
+static inline int
+safe_reallocate (const struct memory_allocator *allocator,
+ void **memory, size_t n, size_t size)
+{
+ if (allocator == NULL)
+ {
+ allocator = default_memory_allocator ();
+ }
+ if (n == 0)
+ {
+ n = 1;
+ }
+ if (size == 0)
+ {
+ size = 1;
+ }
+ void *reallocated =
+ allocator->reallocate (allocator->context, *memory, n, size);
+ if (reallocated == NULL)
+ {
+ return -1;
+ }
+ *memory = reallocated;
+ return 0;
+}
+
+static inline void
+safe_deallocate (const struct memory_allocator *allocator, void **allocated)
+{
+ if (allocator == NULL)
+ {
+ allocator = default_memory_allocator ();
+ }
+ void *ptr = *allocated;
+ if (ptr)
+ {
+ allocator->deallocate (allocator->context, ptr);
+ }
+ *allocated = NULL;
+}
+
+static inline void
+copy_allocator (struct memory_allocator *dest,
+ const struct memory_allocator *src)
+{
+ if (src == NULL)
+ {
+ src = default_memory_allocator ();
+ }
+ dest->allocate = src->allocate;
+ dest->reallocate = src->reallocate;
+ dest->deallocate = src->deallocate;
+ dest->context = src->context;
+}
+
+#endif /* not NEOAS_ALLOCATOR_H_INCLUDED */
diff --git a/src/libjson/neoas-json-lexer-and-parser.h b/src/libjson/neoas-json-lexer-and-parser.h
new file mode 100644
index 0000000..81d204c
--- /dev/null
+++ b/src/libjson/neoas-json-lexer-and-parser.h
@@ -0,0 +1,551 @@
+#ifndef NEOAS_JSON_LEXER_AND_PARSER_H_INCLUDED
+# define NEOAS_JSON_LEXER_AND_PARSER_H_INCLUDED
+
+# include <stdbool.h>
+# include <stddef.h>
+# include <sys/types.h>
+
+struct memory_allocator;
+
+struct json_lexer_and_parser;
+
+static inline int json_lexer_and_parser_init (const struct memory_allocator
+ *allocator,
+ struct json_lexer_and_parser
+ *lexer_and_parser);
+
+static inline void json_lexer_and_parser_deinit (struct json_lexer_and_parser
+ *lexer_and_parser);
+
+static inline void json_lexer_and_parser_set (struct json_lexer_and_parser
+ *lexer_and_parser,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *,
+ bool),
+ int (*handle_number) (void *,
+ double),
+ int (*handle_string) (void *,
+ size_t,
+ const char
+ *),
+ int (*handle_object_start) (void
+ *,
+ void
+ **),
+ int (*object_next_key) (void *,
+ void *,
+ size_t,
+ const
+ char *),
+ int (*handle_object_stop) (void
+ *,
+ void
+ *),
+ int (*handle_array_start) (void
+ *,
+ void
+ **),
+ int (*array_next_key) (void *,
+ void *,
+ size_t),
+ int (*handle_array_stop) (void
+ *,
+ void
+ *),
+ int (*handle_syntax_error) (void
+ *),
+ int (*handle_grammar_error)
+ (void *),
+ int (*handle_rejected_input)
+ (void *, char c),
+ void *context);
+
+static inline int json_lexer_and_parser_push (struct json_lexer_and_parser
+ *lexer_and_parser, char c);
+
+static inline int json_lexer_and_parser_terminate (struct
+ json_lexer_and_parser
+ *lexer_and_parser);
+
+static inline int json_lexer_and_parser_lex (const struct memory_allocator
+ *allocator,
+ ssize_t (*pull) (void *,
+ size_t request,
+ void *data),
+ void *pull_context,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *,
+ bool),
+ int (*handle_number) (void *,
+ double),
+ int (*handle_string) (void *,
+ size_t,
+ const char
+ *),
+ int (*handle_object_start) (void
+ *,
+ void
+ **),
+ int (*object_next_key) (void *,
+ void *,
+ size_t,
+ const
+ char *),
+ int (*handle_object_stop) (void
+ *,
+ void
+ *),
+ int (*handle_array_start) (void
+ *,
+ void
+ **),
+ int (*array_next_key) (void *,
+ void *,
+ size_t),
+ int (*handle_array_stop) (void *,
+ void
+ *),
+ int (*handle_syntax_error) (void
+ *),
+ int (*handle_grammar_error) (void
+ *),
+ int (*handle_rejected_input)
+ (void *, char c), void *context);
+
+static inline int json_lexer_and_parser_string (const struct memory_allocator
+ *allocator, size_t n,
+ const char *str,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *,
+ bool),
+ int (*handle_number) (void *,
+ double),
+ int (*handle_string) (void *,
+ size_t,
+ const
+ char *),
+ int (*handle_object_start)
+ (void *, void **),
+ int (*object_next_key) (void
+ *,
+ void
+ *,
+ size_t,
+ const
+ char
+ *),
+ int (*handle_object_stop)
+ (void *, void *),
+ int (*handle_array_start)
+ (void *, void **),
+ int (*array_next_key) (void *,
+ void *,
+ size_t),
+ int (*handle_array_stop) (void
+ *,
+ void
+ *),
+ int (*handle_syntax_error)
+ (void *),
+ int (*handle_grammar_error)
+ (void *),
+ int (*handle_rejected_input)
+ (void *, char c),
+ void *context);
+
+# include "neoas-json-lexer.h"
+# include "neoas-json-parser.h"
+
+struct json_lexer_and_parser
+{
+ struct json_lexer lexer;
+ struct json_parser parser;
+ int (*handle_null) (void *);
+ int (*handle_bool) (void *, bool);
+ int (*handle_number) (void *, double);
+ int (*handle_string) (void *, size_t, const char *);
+ int (*handle_object_start) (void *, void **);
+ int (*object_next_key) (void *, void *, size_t, const char *);
+ int (*handle_object_stop) (void *, void *);
+ int (*handle_array_start) (void *, void **);
+ int (*array_next_key) (void *, void *, size_t);
+ int (*handle_array_stop) (void *, void *);
+ int (*handle_syntax_error) (void *);
+ int (*handle_grammar_error) (void *);
+ int (*handle_rejected_input) (void *, char c);
+ void *context;
+};
+
+static inline int
+json_lexer_and_parser_init (const struct memory_allocator *allocator,
+ struct json_lexer_and_parser *lexer_and_parser)
+{
+ int l = json_lexer_init (allocator, &(lexer_and_parser->lexer));
+ int p = json_parser_init (allocator, &(lexer_and_parser->parser));
+ if (l || p)
+ {
+ json_lexer_deinit (&(lexer_and_parser->lexer));
+ json_parser_deinit (&(lexer_and_parser->parser));
+ return -1;
+ }
+ return 0;
+}
+
+static inline void
+json_lexer_and_parser_deinit (struct json_lexer_and_parser *lexer_and_parser)
+{
+ json_lexer_deinit (&(lexer_and_parser->lexer));
+ json_parser_deinit (&(lexer_and_parser->parser));
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_null (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_null (&(lp->parser));
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_bool (void *context, bool value)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_bool (&(lp->parser), value);
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_number (void *context, double value)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_number (&(lp->parser), value);
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_string (void *context, size_t len,
+ const char *str)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_string (&(lp->parser), len, str);
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_object_start (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_object_start (&(lp->parser));
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_object_assoc (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_object_assoc (&(lp->parser));
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_object_stop (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_object_stop (&(lp->parser));
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_array_start (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_array_start (&(lp->parser));
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_array_stop (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_array_stop (&(lp->parser));
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_next (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return json_parser_push_next (&(lp->parser));
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_syntax_error (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_syntax_error (lp->context);
+}
+
+static inline int
+json_lexer_and_parser_lexer_handle_rejected_input (void *context, char c)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_rejected_input (lp->context, c);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_null (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_null (lp->context);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_bool (void *context, bool value)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_bool (lp->context, value);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_number (void *context, double value)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_number (lp->context, value);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_string (void *context, size_t len,
+ const char *str)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_string (lp->context, len, str);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_object_start (void *context,
+ void **user_data)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_object_start (lp->context, user_data);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_object_next_key (void *context,
+ void *user_data,
+ size_t len,
+ const char *key)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->object_next_key (lp->context, user_data, len, key);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_object_stop (void *context,
+ void *user_data)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_object_stop (lp->context, user_data);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_array_start (void *context,
+ void **user_data)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_array_start (lp->context, user_data);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_array_next_key (void *context,
+ void *user_data,
+ size_t index)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->array_next_key (lp->context, user_data, index);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_array_stop (void *context,
+ void *user_data)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_array_stop (lp->context, user_data);
+}
+
+static inline int
+json_lexer_and_parser_parser_handle_grammar_error (void *context)
+{
+ struct json_lexer_and_parser *lp = context;
+ return lp->handle_grammar_error (lp->context);
+}
+
+static inline void
+json_lexer_and_parser_set (struct json_lexer_and_parser *lexer_and_parser,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool),
+ int (*handle_number) (void *, double),
+ int (*handle_string) (void *, size_t,
+ const char *),
+ int (*handle_object_start) (void *, void **),
+ int (*object_next_key) (void *, void *, size_t,
+ const char *),
+ int (*handle_object_stop) (void *, void *),
+ int (*handle_array_start) (void *, void **),
+ int (*array_next_key) (void *, void *, size_t),
+ int (*handle_array_stop) (void *, void *),
+ int (*handle_syntax_error) (void *),
+ int (*handle_grammar_error) (void *),
+ int (*handle_rejected_input) (void *, char c),
+ void *context)
+{
+ lexer_and_parser->handle_null = handle_null;
+ lexer_and_parser->handle_bool = handle_bool;
+ lexer_and_parser->handle_number = handle_number;
+ lexer_and_parser->handle_string = handle_string;
+ lexer_and_parser->handle_object_start = handle_object_start;
+ lexer_and_parser->object_next_key = object_next_key;
+ lexer_and_parser->handle_object_stop = handle_object_stop;
+ lexer_and_parser->handle_array_start = handle_array_start;
+ lexer_and_parser->array_next_key = array_next_key;
+ lexer_and_parser->handle_array_stop = handle_array_stop;
+ lexer_and_parser->handle_syntax_error = handle_syntax_error;
+ lexer_and_parser->handle_grammar_error = handle_grammar_error;
+ lexer_and_parser->handle_rejected_input = handle_rejected_input;
+ lexer_and_parser->context = context;
+ json_lexer_set (&(lexer_and_parser->lexer),
+ json_lexer_and_parser_lexer_handle_null,
+ json_lexer_and_parser_lexer_handle_bool,
+ json_lexer_and_parser_lexer_handle_number,
+ json_lexer_and_parser_lexer_handle_string,
+ json_lexer_and_parser_lexer_handle_object_start,
+ json_lexer_and_parser_lexer_handle_object_assoc,
+ json_lexer_and_parser_lexer_handle_object_stop,
+ json_lexer_and_parser_lexer_handle_array_start,
+ json_lexer_and_parser_lexer_handle_array_stop,
+ json_lexer_and_parser_lexer_handle_next,
+ json_lexer_and_parser_lexer_handle_syntax_error,
+ json_lexer_and_parser_lexer_handle_rejected_input,
+ lexer_and_parser);
+ json_parser_set (&(lexer_and_parser->parser),
+ json_lexer_and_parser_parser_handle_null,
+ json_lexer_and_parser_parser_handle_bool,
+ json_lexer_and_parser_parser_handle_number,
+ json_lexer_and_parser_parser_handle_string,
+ json_lexer_and_parser_parser_handle_object_start,
+ json_lexer_and_parser_parser_handle_object_next_key,
+ json_lexer_and_parser_parser_handle_object_stop,
+ json_lexer_and_parser_parser_handle_array_start,
+ json_lexer_and_parser_parser_handle_array_next_key,
+ json_lexer_and_parser_parser_handle_array_stop,
+ json_lexer_and_parser_parser_handle_grammar_error,
+ lexer_and_parser);
+}
+
+static inline int
+json_lexer_and_parser_push (struct json_lexer_and_parser *lexer_and_parser,
+ char c)
+{
+ return json_lexer_push (&(lexer_and_parser->lexer), c);
+}
+
+static inline int
+json_lexer_and_parser_terminate (struct json_lexer_and_parser
+ *lexer_and_parser)
+{
+ return json_lexer_terminate (&(lexer_and_parser->lexer));
+}
+
+static inline int
+json_lexer_and_parser_lex (const struct memory_allocator *allocator,
+ ssize_t (*pull) (void *, size_t request,
+ void *data), void *pull_context,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool),
+ int (*handle_number) (void *, double),
+ int (*handle_string) (void *, size_t,
+ const char *),
+ int (*handle_object_start) (void *, void **),
+ int (*object_next_key) (void *, void *, size_t,
+ const char *),
+ int (*handle_object_stop) (void *, void *),
+ int (*handle_array_start) (void *, void **),
+ int (*array_next_key) (void *, void *, size_t),
+ int (*handle_array_stop) (void *, void *),
+ int (*handle_syntax_error) (void *),
+ int (*handle_grammar_error) (void *),
+ int (*handle_rejected_input) (void *, char c),
+ void *context)
+{
+ struct json_lexer_and_parser lp;
+ if (json_lexer_and_parser_init (allocator, &lp) < 0)
+ {
+ return -1;
+ }
+ json_lexer_and_parser_set (&lp, handle_null, handle_bool, handle_number,
+ handle_string, handle_object_start,
+ object_next_key, handle_object_stop,
+ handle_array_start, array_next_key,
+ handle_array_stop, handle_syntax_error,
+ handle_grammar_error, handle_rejected_input,
+ context);
+ int error = json_lexer_lex (allocator, pull, pull_context,
+ json_lexer_and_parser_lexer_handle_null,
+ json_lexer_and_parser_lexer_handle_bool,
+ json_lexer_and_parser_lexer_handle_number,
+ json_lexer_and_parser_lexer_handle_string,
+ json_lexer_and_parser_lexer_handle_object_start,
+ json_lexer_and_parser_lexer_handle_object_assoc,
+ json_lexer_and_parser_lexer_handle_object_stop,
+ json_lexer_and_parser_lexer_handle_array_start,
+ json_lexer_and_parser_lexer_handle_array_stop,
+ json_lexer_and_parser_lexer_handle_next,
+ json_lexer_and_parser_lexer_handle_syntax_error,
+ json_lexer_and_parser_lexer_handle_rejected_input,
+ &lp);
+ json_lexer_and_parser_deinit (&lp);
+ return error;
+}
+
+static inline int
+json_lexer_and_parser_string (const struct memory_allocator *allocator,
+ size_t n,
+ const char *str,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool),
+ int (*handle_number) (void *, double),
+ int (*handle_string) (void *, size_t,
+ const char *),
+ int (*handle_object_start) (void *, void **),
+ int (*object_next_key) (void *, void *, size_t,
+ const char *),
+ int (*handle_object_stop) (void *, void *),
+ int (*handle_array_start) (void *, void **),
+ int (*array_next_key) (void *, void *, size_t),
+ int (*handle_array_stop) (void *, void *),
+ int (*handle_syntax_error) (void *),
+ int (*handle_grammar_error) (void *),
+ int (*handle_rejected_input) (void *, char c),
+ void *context)
+{
+ struct json_lexer_and_parser lp;
+ if (json_lexer_and_parser_init (allocator, &lp) < 0)
+ {
+ return -1;
+ }
+ json_lexer_and_parser_set (&lp, handle_null, handle_bool, handle_number,
+ handle_string, handle_object_start,
+ object_next_key, handle_object_stop,
+ handle_array_start, array_next_key,
+ handle_array_stop, handle_syntax_error,
+ handle_grammar_error, handle_rejected_input,
+ context);
+ int error = json_lexer_string (allocator, n, str,
+ json_lexer_and_parser_lexer_handle_null,
+ json_lexer_and_parser_lexer_handle_bool,
+ json_lexer_and_parser_lexer_handle_number,
+ json_lexer_and_parser_lexer_handle_string,
+ json_lexer_and_parser_lexer_handle_object_start,
+ json_lexer_and_parser_lexer_handle_object_assoc,
+ json_lexer_and_parser_lexer_handle_object_stop,
+ json_lexer_and_parser_lexer_handle_array_start,
+ json_lexer_and_parser_lexer_handle_array_stop,
+ json_lexer_and_parser_lexer_handle_next,
+ json_lexer_and_parser_lexer_handle_syntax_error,
+ json_lexer_and_parser_lexer_handle_rejected_input,
+ &lp);
+ json_lexer_and_parser_deinit (&lp);
+ return error;
+}
+
+#endif /* NEOAS_JSON_LEXER_AND_PARSER_H_INCLUDED */
diff --git a/src/libjson/neoas-json-lexer.h b/src/libjson/neoas-json-lexer.h
new file mode 100644
index 0000000..d57d3c8
--- /dev/null
+++ b/src/libjson/neoas-json-lexer.h
@@ -0,0 +1,1036 @@
+#ifndef NEOAS_JSON_LEXER_H_INCLUDED
+# define NEOAS_JSON_LEXER_H_INCLUDED
+
+# include <stdbool.h>
+# include <stddef.h>
+# include <stdint.h>
+# include <sys/types.h>
+# include <math.h>
+
+struct memory_allocator;
+
+struct json_lexer;
+
+static inline int json_lexer_init (const struct memory_allocator *allocator,
+ struct json_lexer *lexer);
+
+static inline void json_lexer_deinit (struct json_lexer *lexer);
+
+static inline void json_lexer_set (struct json_lexer *lexer,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool value),
+ int (*handle_number) (void *,
+ double value),
+ int (*handle_string) (void *, size_t len,
+ const char *str),
+ int (*handle_object_start) (void *),
+ int (*handle_object_assoc) (void *),
+ int (*handle_object_stop) (void *),
+ int (*handle_array_start) (void *),
+ int (*handle_array_stop) (void *),
+ int (*handle_next) (void *),
+ int (*handle_syntax_error) (void *),
+ /* Parsing a number stops when a
+ foreign character stops the
+ digit sequence. If the number
+ handler returns non-zero, the
+ foreign character is signalled
+ as a rejected input, so you can
+ resume parsing by pushing it. */
+ int (*handle_rejected_input) (void *,
+ char c),
+ void *context);
+
+static inline int json_lexer_push (struct json_lexer *lexer, char c);
+
+static inline int json_lexer_terminate (struct json_lexer *lexer);
+
+static inline int json_lexer_lex (const struct memory_allocator *allocator,
+ ssize_t (*pull) (void *, size_t request,
+ void *data),
+ void *pull_context,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool value),
+ int (*handle_number) (void *, double value),
+ int (*handle_string) (void *, size_t len,
+ const char *str),
+ int (*handle_object_start) (void *),
+ int (*handle_object_assoc) (void *),
+ int (*handle_object_stop) (void *),
+ int (*handle_array_start) (void *),
+ int (*handle_array_stop) (void *),
+ int (*handle_next) (void *),
+ int (*handle_syntax_error) (void *),
+ int (*handle_rejected_input) (void *,
+ char c),
+ void *context);
+
+static inline int json_lexer_string (const struct memory_allocator *allocator,
+ size_t n,
+ const char *str,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool value),
+ int (*handle_number) (void *,
+ double value),
+ int (*handle_string) (void *, size_t len,
+ const char *str),
+ int (*handle_object_start) (void *),
+ int (*handle_object_assoc) (void *),
+ int (*handle_object_stop) (void *),
+ int (*handle_array_start) (void *),
+ int (*handle_array_stop) (void *),
+ int (*handle_next) (void *),
+ int (*handle_syntax_error) (void *),
+ int (*handle_rejected_input) (void *,
+ char c),
+ void *context);
+
+struct json_lexer_stringbuf;
+
+static inline int json_lexer_stringbuf_init (const struct memory_allocator
+ *allocator,
+ struct json_lexer_stringbuf
+ *str);
+
+static inline void json_lexer_stringbuf_deinit (struct json_lexer_stringbuf
+ *str);
+
+static inline void json_lexer_stringbuf_set (struct json_lexer_stringbuf *str,
+ int (*handle_success) (void *,
+ size_t
+ size,
+ const char
+ *str),
+ int (*handle_error) (void *),
+ void *context);
+
+static inline int json_lexer_stringbuf_push (struct json_lexer_stringbuf *str,
+ char c);
+
+# include "../liballoc/allocator.h"
+
+struct json_lexer_stringbuf
+{
+ size_t max;
+ size_t size;
+ char *str;
+ uint8_t escape_size;
+ char escape_sequence[6];
+ int (*handle_success) (void *, size_t, const char *);
+ int (*handle_error) (void *);
+ void *context;
+ const struct memory_allocator allocator;
+};
+
+struct json_lexer_number;
+
+static inline void json_lexer_number_set (struct json_lexer_number *nb,
+ int (*handle_success) (void *,
+ double value,
+ char
+ lookahead),
+ int (*handle_error) (void *),
+ void *context);
+
+static inline int json_lexer_number_push (struct json_lexer_number *nb,
+ char c);
+
+static inline int json_lexer_number_terminate (struct json_lexer_number *nb);
+
+enum json_lexer_number_step
+{
+ LEXER_NUMBER_READ_SIGN,
+ LEXER_NUMBER_READ_INTEGER,
+ LEXER_NUMBER_READ_FRACTIONAL,
+ LEXER_NUMBER_READ_EXPONENT_SIGN,
+ LEXER_NUMBER_READ_EXPONENT
+};
+
+struct json_lexer_number
+{
+ enum json_lexer_number_step step;
+ double sign;
+ double digits;
+ int64_t extra_exponent;
+ int64_t exponent_sign;
+ uint64_t exponent;
+ int (*handle_success) (void *, double, char);
+ int (*handle_error) (void *);
+ void *context;
+};
+
+enum json_lexer_state
+{
+ JSON_LEXER_READING_WHITESPACE = 0,
+ JSON_LEXER_READING_STRING,
+ JSON_LEXER_READING_NUMBER,
+ JSON_LEXER_READING_NU,
+ JSON_LEXER_READING_NUL,
+ JSON_LEXER_READING_NULL,
+ JSON_LEXER_READING_FA,
+ JSON_LEXER_READING_FAL,
+ JSON_LEXER_READING_FALS,
+ JSON_LEXER_READING_FALSE,
+ JSON_LEXER_READING_TR,
+ JSON_LEXER_READING_TRU,
+ JSON_LEXER_READING_TRUE
+};
+
+struct json_lexer
+{
+ const struct memory_allocator allocator;
+ enum json_lexer_state state;
+ struct json_lexer_stringbuf str;
+ struct json_lexer_number nb;
+ int (*handle_null) (void *);
+ int (*handle_bool) (void *, bool value);
+ int (*handle_number) (void *, double value);
+ int (*handle_string) (void *, size_t len, const char *str);
+ int (*handle_object_start) (void *);
+ int (*handle_object_assoc) (void *);
+ int (*handle_object_stop) (void *);
+ int (*handle_array_start) (void *);
+ int (*handle_array_stop) (void *);
+ int (*handle_next) (void *);
+ int (*handle_syntax_error) (void *);
+ int (*handle_rejected_input) (void *, char c);
+ void *context;
+};
+
+# include <assert.h>
+
+static inline int
+json_lexer_init (const struct memory_allocator *allocator,
+ struct json_lexer *lexer)
+{
+ copy_allocator ((struct memory_allocator *) (&(lexer->allocator)),
+ allocator);
+ if (json_lexer_stringbuf_init (allocator, &(lexer->str)) != 0)
+ {
+ return -1;
+ }
+ assert (lexer->allocator.allocate == lexer->str.allocator.allocate);
+ assert (lexer->allocator.reallocate == lexer->str.allocator.reallocate);
+ assert (lexer->allocator.deallocate == lexer->str.allocator.deallocate);
+ return 0;
+}
+
+static inline void
+json_lexer_deinit (struct json_lexer *lexer)
+{
+ assert (lexer->allocator.allocate == lexer->str.allocator.allocate);
+ assert (lexer->allocator.reallocate == lexer->str.allocator.reallocate);
+ assert (lexer->allocator.deallocate == lexer->str.allocator.deallocate);
+ json_lexer_stringbuf_deinit (&(lexer->str));
+}
+
+static inline int json_lexer_handle_stringbuf_success (void *ctx, size_t size,
+ const char *str);
+static inline int json_lexer_handle_stringbuf_error (void *ctx);
+static inline int json_lexer_handle_number_success (void *ctx, double value,
+ char lookahead);
+static inline int json_lexer_handle_number_error (void *ctx);
+
+static inline void
+json_lexer_set (struct json_lexer *lexer,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool value),
+ int (*handle_number) (void *, double value),
+ int (*handle_string) (void *, size_t len, const char *str),
+ int (*handle_object_start) (void *),
+ int (*handle_object_assoc) (void *),
+ int (*handle_object_stop) (void *),
+ int (*handle_array_start) (void *),
+ int (*handle_array_stop) (void *),
+ int (*handle_next) (void *),
+ int (*handle_syntax_error) (void *),
+ /* Parsing a number stops when a
+ foreign character stops the
+ digit sequence. If the number
+ handler returns non-zero, the
+ foreign character is signalled
+ as a rejected input, so you can
+ resume parsing by pushing it. */
+ int (*handle_rejected_input) (void *, char c), void *context)
+{
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ json_lexer_stringbuf_set (&(lexer->str),
+ json_lexer_handle_stringbuf_success,
+ json_lexer_handle_stringbuf_error, lexer);
+ json_lexer_number_set (&(lexer->nb),
+ json_lexer_handle_number_success,
+ json_lexer_handle_number_error, lexer);
+ lexer->handle_null = handle_null;
+ lexer->handle_bool = handle_bool;
+ lexer->handle_number = handle_number;
+ lexer->handle_string = handle_string;
+ lexer->handle_object_start = handle_object_start;
+ lexer->handle_object_assoc = handle_object_assoc;
+ lexer->handle_object_stop = handle_object_stop;
+ lexer->handle_array_start = handle_array_start;
+ lexer->handle_array_stop = handle_array_stop;
+ lexer->handle_next = handle_next;
+ lexer->handle_syntax_error = handle_syntax_error;
+ lexer->handle_rejected_input = handle_rejected_input;
+ lexer->context = context;
+ assert (lexer->allocator.allocate == lexer->str.allocator.allocate);
+ assert (lexer->allocator.reallocate == lexer->str.allocator.reallocate);
+ assert (lexer->allocator.deallocate == lexer->str.allocator.deallocate);
+}
+
+static inline int
+json_lexer_push (struct json_lexer *lexer, char c)
+{
+ assert (lexer->allocator.allocate == lexer->str.allocator.allocate);
+ assert (lexer->allocator.reallocate == lexer->str.allocator.reallocate);
+ assert (lexer->allocator.deallocate == lexer->str.allocator.deallocate);
+ switch (lexer->state)
+ {
+ case JSON_LEXER_READING_WHITESPACE:
+ switch (c)
+ {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ /* Stays that way. */
+ return 0;
+
+ case '"':
+ /* Start reading a string. */
+ lexer->state = JSON_LEXER_READING_STRING;
+ return 0;
+
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ /* Start reading a number. */
+ lexer->state = JSON_LEXER_READING_NUMBER;
+ if (json_lexer_number_push (&(lexer->nb), c) != 0)
+ {
+ /* Impossible, - or [0-9] always start a number. */
+ assert (0);
+ }
+ return 0;
+
+ case 'n':
+ /* Start reading null */
+ lexer->state = JSON_LEXER_READING_NU;
+ return 0;
+
+ case 'f':
+ /* Start reading false */
+ lexer->state = JSON_LEXER_READING_FA;
+ return 0;
+
+ case 't':
+ /* Start reading true */
+ lexer->state = JSON_LEXER_READING_TR;
+ return 0;
+
+ case '{':
+ return lexer->handle_object_start (lexer->context);
+
+ case ':':
+ return lexer->handle_object_assoc (lexer->context);
+
+ case '}':
+ return lexer->handle_object_stop (lexer->context);
+
+ case '[':
+ return lexer->handle_array_start (lexer->context);
+
+ case ']':
+ return lexer->handle_array_stop (lexer->context);
+
+ case ',':
+ return lexer->handle_next (lexer->context);
+
+ default:
+ return lexer->handle_syntax_error (lexer->context);
+ }
+ break;
+
+ case JSON_LEXER_READING_STRING:
+ return json_lexer_stringbuf_push (&(lexer->str), c);
+
+ case JSON_LEXER_READING_NUMBER:
+ return json_lexer_number_push (&(lexer->nb), c);
+
+ case JSON_LEXER_READING_NU:
+ if (c == 'u')
+ {
+ lexer->state = JSON_LEXER_READING_NUL;
+ return 0;
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+
+ case JSON_LEXER_READING_NUL:
+ if (c == 'l')
+ {
+ lexer->state = JSON_LEXER_READING_NULL;
+ return 0;
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+
+ case JSON_LEXER_READING_NULL:
+ if (c == 'l')
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_null (lexer->context);
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+
+ case JSON_LEXER_READING_FA:
+ if (c == 'a')
+ {
+ lexer->state = JSON_LEXER_READING_FAL;
+ return 0;
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+
+ case JSON_LEXER_READING_FAL:
+ if (c == 'l')
+ {
+ lexer->state = JSON_LEXER_READING_FALS;
+ return 0;
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+
+ case JSON_LEXER_READING_FALS:
+ if (c == 's')
+ {
+ lexer->state = JSON_LEXER_READING_FALSE;
+ return 0;
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+
+ case JSON_LEXER_READING_FALSE:
+ if (c == 'e')
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_bool (lexer->context, false);
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+
+ case JSON_LEXER_READING_TR:
+ if (c == 'r')
+ {
+ lexer->state = JSON_LEXER_READING_TRU;
+ return 0;
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+
+ case JSON_LEXER_READING_TRU:
+ if (c == 'u')
+ {
+ lexer->state = JSON_LEXER_READING_TRUE;
+ return 0;
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+
+ case JSON_LEXER_READING_TRUE:
+ if (c == 'e')
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_bool (lexer->context, true);
+ }
+ else
+ {
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+ }
+ default:
+ assert (false);
+ }
+}
+
+static inline int
+json_lexer_terminate (struct json_lexer *lexer)
+{
+ assert (lexer->allocator.allocate == lexer->str.allocator.allocate);
+ assert (lexer->allocator.reallocate == lexer->str.allocator.reallocate);
+ assert (lexer->allocator.deallocate == lexer->str.allocator.deallocate);
+ switch (lexer->state)
+ {
+ case JSON_LEXER_READING_WHITESPACE:
+ return 0;
+ case JSON_LEXER_READING_NUMBER:
+ return json_lexer_number_terminate (&(lexer->nb));
+ default:
+ return lexer->handle_syntax_error (lexer->context);
+ }
+}
+
+static inline int
+json_lexer_handle_stringbuf_success (void *ctx, size_t size, const char *str)
+{
+ struct json_lexer *lexer = ctx;
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_string (lexer->context, size, str);
+}
+
+static inline int
+json_lexer_handle_stringbuf_error (void *ctx)
+{
+ struct json_lexer *lexer = ctx;
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+}
+
+static inline int
+json_lexer_handle_number_success (void *ctx, double value, char lookahead)
+{
+ struct json_lexer *lexer = ctx;
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ int error = lexer->handle_number (lexer->context, value);
+ if (error == 0)
+ {
+ return json_lexer_push (lexer, lookahead);
+ }
+ else
+ {
+ return lexer->handle_rejected_input (lexer->context, lookahead);
+ }
+}
+
+static inline int
+json_lexer_handle_number_error (void *ctx)
+{
+ struct json_lexer *lexer = ctx;
+ lexer->state = JSON_LEXER_READING_WHITESPACE;
+ return lexer->handle_syntax_error (lexer->context);
+}
+
+static inline int
+json_lexer_stringbuf_init (const struct memory_allocator *allocator,
+ struct json_lexer_stringbuf *str)
+{
+ str->max = 8;
+ copy_allocator ((struct memory_allocator *) (&(str->allocator)), allocator);
+ if (ALLOCATE (str->str, str->max) < 0)
+ {
+ return -1;
+ }
+ return 0;
+}
+
+static inline void
+json_lexer_stringbuf_deinit (struct json_lexer_stringbuf *str)
+{
+ if (str != NULL)
+ {
+ const struct memory_allocator *allocator = &(str->allocator);
+ DEALLOCATE (str->str);
+ }
+}
+
+static inline void
+json_lexer_stringbuf_set (struct json_lexer_stringbuf *str,
+ int (*handle_success) (void *, size_t,
+ const char *),
+ int (*handle_error) (void *), void *context)
+{
+ str->size = 0;
+ str->escape_size = 0;
+ str->handle_success = handle_success;
+ str->handle_error = handle_error;
+ str->context = context;
+}
+
+static inline int json_lexer_stringbuf_push_char (size_t *max,
+ size_t *size,
+ char **str,
+ const struct
+ memory_allocator *allocator,
+ char c);
+
+static inline int
+json_lexer_stringbuf_push (struct json_lexer_stringbuf *str, char c)
+{
+ if (str->escape_size)
+ {
+ assert (str->escape_size < 6);
+ /* Parsing an escape sequence. */
+ str->escape_sequence[str->escape_size++] = c;
+ /* Try to reduce the escape sequence. */
+ switch (str->escape_sequence[1])
+ {
+ case '"':
+ case '\\':
+ case '/':
+ str->escape_size = 0;
+ return json_lexer_stringbuf_push_char (&(str->max), &(str->size),
+ &(str->str),
+ &(str->allocator),
+ str->escape_sequence[1]);
+ case 'b':
+ str->escape_size = 0;
+ return json_lexer_stringbuf_push_char (&(str->max), &(str->size),
+ &(str->str),
+ &(str->allocator), '\b');
+ case 'f':
+ str->escape_size = 0;
+ return json_lexer_stringbuf_push_char (&(str->max), &(str->size),
+ &(str->str),
+ &(str->allocator), '\f');
+ case 'n':
+ str->escape_size = 0;
+ return json_lexer_stringbuf_push_char (&(str->max), &(str->size),
+ &(str->str),
+ &(str->allocator), '\n');
+ case 'r':
+ str->escape_size = 0;
+ return json_lexer_stringbuf_push_char (&(str->max), &(str->size),
+ &(str->str),
+ &(str->allocator), '\r');
+ case 't':
+ str->escape_size = 0;
+ return json_lexer_stringbuf_push_char (&(str->max), &(str->size),
+ &(str->str),
+ &(str->allocator), '\t');
+ case 'u':
+ if (str->escape_size == 6)
+ {
+ uint16_t escape_sequence = 0;
+ static const uint16_t factors[] = { 4096, 256, 16, 0 };
+ for (size_t i = 0; i < 4; i++)
+ {
+ char c = str->escape_sequence[i + 2];
+ if (c >= '0' && c <= '9')
+ {
+ escape_sequence += factors[i] * (c - '0');
+ }
+ else if (c >= 'a' && c <= 'f')
+ {
+ escape_sequence += factors[i] * (c - 'a' + 10);
+ }
+ else if (c >= 'A' && c <= 'F')
+ {
+ escape_sequence += factors[i] * (c - 'A' + 10);
+ }
+ else
+ {
+ str->escape_size = 0;
+ return str->handle_error (str->context);
+ }
+ }
+ str->escape_size = 0;
+ return json_lexer_stringbuf_push_char (&(str->max),
+ &(str->size),
+ &(str->str),
+ &(str->allocator),
+ (uint8_t)
+ escape_sequence);
+ }
+ else
+ {
+ return 0;
+ }
+ default:
+ str->escape_size = 0;
+ return str->handle_error (str->context);
+ }
+ }
+ else if (c == '\\')
+ {
+ str->escape_size = 1;
+ str->escape_sequence[0] = c;
+ return 0;
+ }
+ else if (c == '"')
+ {
+ assert (str->escape_size == 0);
+ int error = str->handle_success (str->context, str->size, str->str);
+ str->size = 0;
+ return error;
+ }
+ else
+ {
+ /* What about control characters? */
+ return json_lexer_stringbuf_push_char (&(str->max), &(str->size),
+ &(str->str), &(str->allocator),
+ c);
+ }
+}
+
+static inline void
+json_lexer_number_set (struct json_lexer_number *nb,
+ int (*handle_success) (void *, double, char),
+ int (*handle_error) (void *), void *context)
+{
+ nb->step = LEXER_NUMBER_READ_SIGN;
+ nb->sign = 1;
+ nb->digits = 0;
+ nb->extra_exponent = 0;
+ nb->exponent_sign = 1;
+ nb->exponent = 0;
+ nb->handle_success = handle_success;
+ nb->handle_error = handle_error;
+ nb->context = context;
+}
+
+static inline int
+json_lexer_number_accept (struct json_lexer_number *nb, char lookahead)
+{
+ const double final_exponent =
+ (((double) (nb->exponent_sign) * (double) (nb->exponent))
+ + (double) (nb->extra_exponent));
+ const double factor = pow (10.0, final_exponent);
+ const double value = nb->sign * nb->digits * factor;
+ nb->sign = +1;
+ nb->digits = 0;
+ nb->extra_exponent = 0;
+ nb->exponent_sign = 1;
+ nb->exponent = 0;
+ nb->step = LEXER_NUMBER_READ_SIGN;
+ return nb->handle_success (nb->context, value, lookahead);
+}
+
+static inline int
+json_lexer_number_push (struct json_lexer_number *nb, char c)
+{
+ /* FIXME: it accepts 0....123 as 0.123, 0345 as 345, ----4 as -4,
+ 123eEeeE4 as 123e4, .4 as 0.4 */
+ switch (nb->step)
+ {
+ case LEXER_NUMBER_READ_SIGN:
+ switch (c)
+ {
+ case '-':
+ nb->sign = -1;
+ nb->step = LEXER_NUMBER_READ_INTEGER;
+ return 0;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ nb->digits *= 10;
+ nb->digits += (c - '0');
+ nb->step = LEXER_NUMBER_READ_INTEGER;
+ return 0;
+ default:
+ nb->step = LEXER_NUMBER_READ_SIGN;
+ return nb->handle_error (nb->context);
+ }
+ break; /* unreachable */
+ case LEXER_NUMBER_READ_INTEGER:
+ switch (c)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ nb->digits *= 10;
+ nb->digits += (c - '0');
+ return 0;
+ case '.':
+ nb->step = LEXER_NUMBER_READ_FRACTIONAL;
+ return 0;
+ case 'e':
+ case 'E':
+ nb->step = LEXER_NUMBER_READ_EXPONENT_SIGN;
+ return 0;
+ default:
+ return json_lexer_number_accept (nb, c);
+ }
+ break; /* unreachable */
+ case LEXER_NUMBER_READ_FRACTIONAL:
+ switch (c)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ nb->digits *= 10;
+ nb->digits += (c - '0');
+ nb->extra_exponent -= 1;
+ return 0;
+ case 'e':
+ case 'E':
+ nb->step = LEXER_NUMBER_READ_EXPONENT_SIGN;
+ return 0;
+ default:
+ return json_lexer_number_accept (nb, c);
+ }
+ break; /* unreachable */
+ case LEXER_NUMBER_READ_EXPONENT_SIGN:
+ switch (c)
+ {
+ case '-':
+ nb->exponent_sign = -1;
+ nb->step = LEXER_NUMBER_READ_EXPONENT;
+ return 0;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ nb->exponent *= 10;
+ nb->exponent += (c - '0');
+ nb->step = LEXER_NUMBER_READ_EXPONENT;
+ return 0;
+ default:
+ return json_lexer_number_accept (nb, c);
+ }
+ break; /* unreachable */
+ case LEXER_NUMBER_READ_EXPONENT:
+ switch (c)
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ nb->exponent *= 10;
+ nb->exponent += (c - '0');
+ return 0;
+ default:
+ return json_lexer_number_accept (nb, c);
+ }
+ break; /* unreachable */
+ default:
+ assert (false);
+ }
+}
+
+static inline int
+json_lexer_number_terminate (struct json_lexer_number *nb)
+{
+ switch (nb->step)
+ {
+ case LEXER_NUMBER_READ_SIGN:
+ /* No number being parsed. */
+ return 0;
+ default:
+ /* FIXME: what if it’s "1.0E"? Accept as "1.0" for now */
+ return json_lexer_number_push (nb, '\0');
+ }
+}
+
+static inline int
+json_lexer_lex (const struct memory_allocator *allocator,
+ ssize_t (*pull) (void *, size_t request, void *data),
+ void *pull_context,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool value),
+ int (*handle_number) (void *, double value),
+ int (*handle_string) (void *, size_t len, const char *str),
+ int (*handle_object_start) (void *),
+ int (*handle_object_assoc) (void *),
+ int (*handle_object_stop) (void *),
+ int (*handle_array_start) (void *),
+ int (*handle_array_stop) (void *),
+ int (*handle_next) (void *),
+ int (*handle_syntax_error) (void *),
+ int (*handle_rejected_input) (void *, char c), void *context)
+{
+ struct json_lexer lexer;
+ int error = 0;
+ error = json_lexer_init (allocator, &lexer);
+ if (error != 0)
+ {
+ goto ret;
+ }
+ static const size_t buffer_size = 4096;
+ char *buffer = NULL;
+ if (ALLOCATE (buffer, buffer_size) < 0)
+ {
+ error = -2;
+ goto cleanup;
+ }
+ json_lexer_set (&lexer, handle_null, handle_bool, handle_number,
+ handle_string, handle_object_start,
+ handle_object_assoc, handle_object_stop,
+ handle_array_start, handle_array_stop,
+ handle_next, handle_syntax_error,
+ handle_rejected_input, context);
+ ssize_t n_available;
+ do
+ {
+ assert (lexer.allocator.allocate == lexer.str.allocator.allocate);
+ assert (lexer.allocator.reallocate == lexer.str.allocator.reallocate);
+ assert (lexer.allocator.deallocate == lexer.str.allocator.deallocate);
+ n_available = pull (pull_context, buffer_size, buffer);
+ if (n_available > 0)
+ {
+ assert (n_available >= 0);
+ assert ((size_t) n_available <= buffer_size);
+ for (ssize_t i = 0; i < n_available; i++)
+ {
+ error = json_lexer_push (&lexer, buffer[i]);
+ if (error != 0)
+ {
+ goto cleanup;
+ }
+ }
+ }
+ }
+ while (n_available > 0);
+ error = json_lexer_terminate (&lexer);
+ if (error != 0)
+ {
+ goto cleanup;
+ }
+cleanup:
+ assert (lexer.allocator.allocate == lexer.str.allocator.allocate);
+ assert (lexer.allocator.reallocate == lexer.str.allocator.reallocate);
+ assert (lexer.allocator.deallocate == lexer.str.allocator.deallocate);
+ DEALLOCATE (buffer);
+ json_lexer_deinit (&lexer);
+ret:
+ return error;
+}
+
+struct string_puller
+{
+ size_t size;
+ const char *str;
+};
+
+static inline ssize_t
+json_lexer_pull_from_string (void *ctx, size_t request, void *data)
+{
+ struct string_puller *puller = ctx;
+ ssize_t ret = 0;
+ char *output = data;
+ while (request != 0)
+ {
+ if (puller->size == 0)
+ {
+ return ret;
+ }
+ *output = *puller->str;
+ (puller->size)--;
+ (puller->str)++;
+ request--;
+ output++;
+ }
+ return ret;
+}
+
+static inline int
+json_lexer_string (const struct memory_allocator *allocator,
+ size_t n,
+ const char *str,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool value),
+ int (*handle_number) (void *, double value),
+ int (*handle_string) (void *, size_t len, const char *str),
+ int (*handle_object_start) (void *),
+ int (*handle_object_assoc) (void *),
+ int (*handle_object_stop) (void *),
+ int (*handle_array_start) (void *),
+ int (*handle_array_stop) (void *),
+ int (*handle_next) (void *),
+ int (*handle_syntax_error) (void *),
+ int (*handle_rejected_input) (void *, char c),
+ void *context)
+{
+ struct string_puller puller = {.size = n,.str = str };
+ return json_lexer_lex (allocator, json_lexer_pull_from_string, &puller,
+ handle_null, handle_bool, handle_number,
+ handle_string, handle_object_start,
+ handle_object_assoc, handle_object_stop,
+ handle_array_start, handle_array_stop,
+ handle_next, handle_syntax_error,
+ handle_rejected_input, context);
+}
+
+static inline int
+json_lexer_stringbuf_push_char (size_t *max,
+ size_t *size,
+ char **str,
+ const struct memory_allocator *allocator,
+ char c)
+{
+ if (*size >= *max)
+ {
+ size_t new_max = 2 * *max;
+ if (new_max == 0)
+ {
+ new_max = 1;
+ }
+ if (REALLOCATE (*str, new_max) < 0)
+ {
+ return -1;
+ }
+ *max = new_max;
+ }
+ (*str)[(*size)++] = c;
+ return 0;
+}
+
+#endif /* NEOAS_JSON_LEXER_H_INCLUDED */
diff --git a/src/libjson/neoas-json-parser.h b/src/libjson/neoas-json-parser.h
new file mode 100644
index 0000000..b586b52
--- /dev/null
+++ b/src/libjson/neoas-json-parser.h
@@ -0,0 +1,462 @@
+#ifndef NEOAS_JSON_PARSER_H_INCLUDED
+# define NEOAS_JSON_PARSER_H_INCLUDED
+
+# include <stdbool.h>
+
+struct memory_allocator;
+
+struct json_parser;
+
+static inline int json_parser_init (const struct memory_allocator *allocator,
+ struct json_parser *parser);
+
+static inline void json_parser_deinit (struct json_parser *parser);
+
+static inline void json_parser_set (struct json_parser *parser,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool),
+ int (*handle_number) (void *, double),
+ int (*handle_string) (void *, size_t,
+ const char *),
+ int (*handle_object_start) (void *,
+ void **),
+ int (*object_next_key) (void *, void *,
+ size_t,
+ const char *),
+ int (*handle_object_stop) (void *,
+ void *),
+ int (*handle_array_start) (void *,
+ void **),
+ int (*array_next_key) (void *, void *,
+ size_t),
+ int (*handle_array_stop) (void *, void *),
+ int (*handle_grammar_error) (void *),
+ void *context);
+
+static inline int json_parser_push_null (struct json_parser *parser);
+static inline int json_parser_push_bool (struct json_parser *parser,
+ bool boolean);
+static inline int json_parser_push_number (struct json_parser *parser,
+ double number);
+static inline int json_parser_push_string (struct json_parser *parser,
+ size_t length, const char *str);
+static inline int json_parser_push_object_start (struct json_parser *parser);
+static inline int json_parser_push_object_assoc (struct json_parser *parser);
+static inline int json_parser_push_object_stop (struct json_parser *parser);
+static inline int json_parser_push_array_start (struct json_parser *parser);
+static inline int json_parser_push_array_stop (struct json_parser *parser);
+static inline int json_parser_push_next (struct json_parser *parser);
+
+# include <assert.h>
+# include <string.h>
+
+enum json_parser_frame_type
+{
+ JSON_PARSER_FRAME_OBJECT,
+ JSON_PARSER_FRAME_ARRAY
+};
+
+struct json_parser_array_frame
+{
+ size_t next_index;
+};
+
+struct json_parser_object_frame
+{
+ bool has_key;
+};
+
+union json_parser_frame_u
+{
+ struct json_parser_array_frame array;
+ struct json_parser_object_frame object;
+};
+
+struct json_parser_frame
+{
+ enum json_parser_frame_type type;
+ union json_parser_frame_u u;
+ void *user_data;
+};
+
+struct json_parser
+{
+ size_t max_stack;
+ size_t stack_top;
+ struct json_parser_frame *stack;
+ struct memory_allocator allocator;
+ size_t next_key_size;
+ char *next_key;
+ int (*handle_null) (void *);
+ int (*handle_bool) (void *, bool);
+ int (*handle_number) (void *, double);
+ int (*handle_string) (void *, size_t, const char *);
+ int (*handle_object_start) (void *, void **);
+ int (*object_next_key) (void *, void *, size_t, const char *);
+ int (*handle_object_stop) (void *, void *);
+ int (*handle_array_start) (void *, void **);
+ int (*array_next_key) (void *, void *, size_t);
+ int (*handle_array_stop) (void *, void *);
+ int (*handle_grammar_error) (void *);
+ void *context;
+};
+
+static inline int
+json_parser_init (const struct memory_allocator *allocator,
+ struct json_parser *parser)
+{
+ parser->max_stack = 4;
+ parser->stack_top = 0;
+ if (ALLOCATE (parser->stack, parser->max_stack) < 0)
+ {
+ return -1;
+ }
+ copy_allocator (&(parser->allocator), allocator);
+ parser->next_key_size = 0;
+ parser->next_key = NULL;
+ parser->handle_null = NULL;
+ parser->handle_bool = NULL;
+ parser->handle_number = NULL;
+ parser->handle_string = NULL;
+ parser->handle_object_start = NULL;
+ parser->object_next_key = NULL;
+ parser->handle_object_stop = NULL;
+ parser->handle_array_start = NULL;
+ parser->array_next_key = NULL;
+ parser->handle_array_stop = NULL;
+ parser->handle_grammar_error = NULL;
+ parser->context = NULL;
+ return 0;
+}
+
+static inline void
+json_parser_deinit (struct json_parser *parser)
+{
+ const struct memory_allocator *allocator = &(parser->allocator);
+ for (size_t i = parser->stack_top; i-- > 0;)
+ {
+ struct json_parser_frame *frame = &(parser->stack[i]);
+ switch (frame->type)
+ {
+ case JSON_PARSER_FRAME_OBJECT:
+ /* Ignore failure, the front-end must liberate all memory. */
+ parser->handle_object_stop (parser->context, frame->user_data);
+ break;
+ case JSON_PARSER_FRAME_ARRAY:
+ parser->handle_array_stop (parser->context, frame->user_data);
+ break;
+ default:
+ assert (false);
+ }
+ }
+ DEALLOCATE (parser->stack);
+ DEALLOCATE (parser->next_key);
+}
+
+static inline void
+json_parser_set (struct json_parser *parser,
+ int (*handle_null) (void *),
+ int (*handle_bool) (void *, bool),
+ int (*handle_number) (void *, double),
+ int (*handle_string) (void *, size_t, const char *),
+ int (*handle_object_start) (void *, void **),
+ int (*object_next_key) (void *, void *, size_t,
+ const char *),
+ int (*handle_object_stop) (void *, void *),
+ int (*handle_array_start) (void *, void **),
+ int (*array_next_key) (void *, void *, size_t),
+ int (*handle_array_stop) (void *, void *),
+ int (*handle_grammar_error) (void *), void *context)
+{
+ const struct memory_allocator *allocator = &(parser->allocator);
+ DEALLOCATE (parser->next_key);
+ parser->next_key_size = 0;
+ parser->next_key = NULL;
+ parser->handle_null = handle_null;
+ parser->handle_bool = handle_bool;
+ parser->handle_number = handle_number;
+ parser->handle_string = handle_string;
+ parser->handle_object_start = handle_object_start;
+ parser->object_next_key = object_next_key;
+ parser->handle_object_stop = handle_object_stop;
+ parser->handle_array_start = handle_array_start;
+ parser->array_next_key = array_next_key;
+ parser->handle_array_stop = handle_array_stop;
+ parser->handle_grammar_error = handle_grammar_error;
+ parser->context = context;
+}
+
+static inline int
+json_parser_push_null (struct json_parser *parser)
+{
+ if (parser->next_key != NULL)
+ {
+ /* No: we should be reading a colon now */
+ return parser->handle_grammar_error (parser->context);
+ }
+ if (parser->stack_top == 0)
+ {
+ return parser->handle_grammar_error (parser->context);
+ }
+ return parser->handle_null (parser->context);
+}
+
+static inline int
+json_parser_push_bool (struct json_parser *parser, bool value)
+{
+ if (parser->next_key != NULL)
+ {
+ /* No: we should be reading a colon now */
+ return parser->handle_grammar_error (parser->context);
+ }
+ if (parser->stack_top == 0)
+ {
+ return parser->handle_grammar_error (parser->context);
+ }
+ return parser->handle_bool (parser->context, value);
+}
+
+static inline int
+json_parser_push_number (struct json_parser *parser, double value)
+{
+ if (parser->next_key != NULL)
+ {
+ /* No: we should be reading a colon now */
+ return parser->handle_grammar_error (parser->context);
+ }
+ if (parser->stack_top == 0)
+ {
+ return parser->handle_grammar_error (parser->context);
+ }
+ return parser->handle_number (parser->context, value);
+}
+
+static inline int
+json_parser_push_string (struct json_parser *parser, size_t size,
+ const char *key)
+{
+ const struct memory_allocator *allocator = &(parser->allocator);
+ /* There are 2 possibilities: either we’re reading an object key, or
+ we are reading a value. */
+ if (parser->stack_top == 0)
+ {
+ /* We’re not in an object, and this is a string: it cannot be a
+ top-level value. */
+ return parser->handle_grammar_error (parser->context);
+ }
+ struct json_parser_frame *top = &(parser->stack[parser->stack_top - 1]);
+ if (top->type == JSON_PARSER_FRAME_ARRAY)
+ {
+ goto reading_value;
+ }
+ assert (top->type == JSON_PARSER_FRAME_OBJECT);
+ struct json_parser_object_frame *frame = &(top->u.object);
+ if (frame->has_key)
+ {
+ goto reading_value;
+ }
+ if (parser->next_key != NULL)
+ {
+ /* Two strings before the colon: invalid */
+ return parser->handle_grammar_error (parser->context);
+ }
+ char *copy;
+ if (ALLOCATE (copy, size + 1) < 0)
+ {
+ return -1;
+ }
+ memcpy (copy, key, size);
+ copy[size] = '\0';
+ parser->next_key_size = size;
+ parser->next_key = copy;
+ return 0;
+reading_value:
+ if (parser->next_key != NULL)
+ {
+ /* No: we should be reading a colon now */
+ return parser->handle_grammar_error (parser->context);
+ }
+ return parser->handle_string (parser->context, size, key);
+}
+
+static inline int
+extend_stack (struct json_parser *parser)
+{
+ const struct memory_allocator *allocator = &(parser->allocator);
+ size_t new_size = 2 * parser->max_stack;
+ if (REALLOCATE (parser->stack, new_size) < 0)
+ {
+ return -1;
+ }
+ parser->max_stack = new_size;
+ return 0;
+}
+
+static inline int
+json_parser_push_object_start (struct json_parser *parser)
+{
+ if (parser->next_key != NULL)
+ {
+ /* No: we should be reading a colon now */
+ return parser->handle_grammar_error (parser->context);
+ }
+ if (parser->stack_top == parser->max_stack)
+ {
+ int error = extend_stack (parser);
+ if (error)
+ {
+ return error;
+ }
+ }
+ parser->stack[parser->stack_top].type = JSON_PARSER_FRAME_OBJECT;
+ parser->stack[parser->stack_top].u.object.has_key = false;
+ int error = parser->handle_object_start (parser->context,
+ &(parser->
+ stack[parser->
+ stack_top].user_data));
+ if (error)
+ {
+ parser->stack[parser->stack_top].user_data = NULL;
+ }
+ parser->stack_top++;
+ return error;
+}
+
+static inline int
+json_parser_push_object_assoc (struct json_parser *parser)
+{
+ const struct memory_allocator *allocator = &(parser->allocator);
+ if (parser->next_key == NULL)
+ {
+ /* No: we don’t have a key! */
+ return parser->handle_grammar_error (parser->context);
+ }
+ assert (parser->stack_top > 0);
+ struct json_parser_frame *frame = &(parser->stack[parser->stack_top - 1]);
+ if (frame->type != JSON_PARSER_FRAME_OBJECT)
+ {
+ /* No: wtf, a ':' in an array? */
+ return parser->handle_grammar_error (parser->context);
+ }
+ assert (frame->type == JSON_PARSER_FRAME_OBJECT);
+ if (frame->u.object.has_key)
+ {
+ /* No: two colons for the same key */
+ return parser->handle_grammar_error (parser->context);
+ }
+ frame->u.object.has_key = true;
+ int error = parser->object_next_key (parser->context, frame->user_data,
+ parser->next_key_size,
+ parser->next_key);
+ DEALLOCATE (parser->next_key);
+ parser->next_key = NULL;
+ return error;
+}
+
+static inline int
+json_parser_push_object_stop (struct json_parser *parser)
+{
+ if (parser->next_key != NULL)
+ {
+ /* No: we should be reading a colon now */
+ return parser->handle_grammar_error (parser->context);
+ }
+ if (parser->stack_top == 0)
+ {
+ /* More data past the end of stream */
+ return parser->handle_grammar_error (parser->context);
+ }
+ parser->stack_top--;
+ return parser->handle_object_stop (parser->context,
+ parser->stack[parser->
+ stack_top].user_data);
+}
+
+static inline int
+json_parser_push_array_start (struct json_parser *parser)
+{
+ if (parser->next_key != NULL)
+ {
+ /* No: we should be reading a colon now */
+ return parser->handle_grammar_error (parser->context);
+ }
+ if (parser->stack_top == parser->max_stack)
+ {
+ int error = extend_stack (parser);
+ if (error)
+ {
+ return error;
+ }
+ }
+ struct json_parser_frame *frame = &(parser->stack[parser->stack_top]);
+ frame->type = JSON_PARSER_FRAME_ARRAY;
+ frame->u.array.next_index = 0;
+ int error = parser->handle_array_start (parser->context,
+ &(frame->user_data));
+ if (error)
+ {
+ parser->stack[parser->stack_top].user_data = NULL;
+ }
+ parser->stack_top++;
+ if (error == 0)
+ {
+ error =
+ parser->array_next_key (parser->context, frame->user_data,
+ (frame->u.array.next_index)++);
+ }
+ return error;
+}
+
+static inline int
+json_parser_push_array_stop (struct json_parser *parser)
+{
+ if (parser->next_key != NULL)
+ {
+ /* No: we should be reading a colon now */
+ return parser->handle_grammar_error (parser->context);
+ }
+ if (parser->stack_top == 0)
+ {
+ /* More data past the end of stream */
+ return parser->handle_grammar_error (parser->context);
+ }
+ parser->stack_top--;
+ return parser->handle_array_stop (parser->context,
+ parser->stack[parser->
+ stack_top].user_data);
+}
+
+static inline int
+json_parser_push_next (struct json_parser *parser)
+{
+ if (parser->next_key != NULL)
+ {
+ /* wtf?? */
+ return parser->handle_grammar_error (parser->context);
+ }
+ if (parser->stack_top == 0)
+ {
+ /* This cannot be a top-level element */
+ return parser->handle_grammar_error (parser->context);
+ }
+ assert (parser->stack_top > 0);
+ struct json_parser_frame *frame = &(parser->stack[parser->stack_top - 1]);
+ if (frame->type == JSON_PARSER_FRAME_ARRAY)
+ {
+ return parser->array_next_key (parser->context, frame->user_data,
+ (frame->u.array.next_index)++);
+ }
+ else if (frame->type == JSON_PARSER_FRAME_OBJECT)
+ {
+ frame->u.object.has_key = false;
+ return 0;
+ }
+ else
+ {
+ assert (false);
+ }
+ abort ();
+ return 0;
+}
+
+#endif /* NEOAS_JSON_PARSER_H_INCLUDED */