diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/Makefile.am | 10 | ||||
-rw-r--r-- | src/Makefile.in | 604 | ||||
-rw-r--r-- | src/hashtable.c | 565 | ||||
-rw-r--r-- | src/hashtable.h | 143 | ||||
-rw-r--r-- | src/main.c | 183 | ||||
-rw-r--r-- | src/scriptbase.h | 77 | ||||
-rw-r--r-- | src/scriptbase_build.c | 540 | ||||
-rw-r--r-- | src/scriptbase_json_query.c | 213 | ||||
-rw-r--r-- | src/scriptbase_query.c | 278 | ||||
-rw-r--r-- | src/serve.c | 215 | ||||
-rw-r--r-- | src/string_buf.c | 308 | ||||
-rw-r--r-- | src/string_buf.h | 59 |
12 files changed, 3195 insertions, 0 deletions
diff --git a/src/Makefile.am b/src/Makefile.am new file mode 100644 index 0000000..9d3da43 --- /dev/null +++ b/src/Makefile.am @@ -0,0 +1,10 @@ +bin_PROGRAMS = hydrilla +hydrilla_SOURCES = \ + hashtable.c \ + main.c \ + scriptbase_build.c \ + scriptbase_json_query.c \ + scriptbase_query.c \ + serve.c \ + string_buf.c +hydrilla_LDFLAGS = -lmicrohttpd -lcjson diff --git a/src/Makefile.in b/src/Makefile.in new file mode 100644 index 0000000..9837951 --- /dev/null +++ b/src/Makefile.in @@ -0,0 +1,604 @@ +# Makefile.in generated by automake 1.16.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2018 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +bin_PROGRAMS = hydrilla$(EXEEXT) +subdir = src +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__installdirs = "$(DESTDIR)$(bindir)" +PROGRAMS = $(bin_PROGRAMS) +am_hydrilla_OBJECTS = hashtable.$(OBJEXT) main.$(OBJEXT) \ + scriptbase_build.$(OBJEXT) scriptbase_json_query.$(OBJEXT) \ + scriptbase_query.$(OBJEXT) serve.$(OBJEXT) \ + string_buf.$(OBJEXT) +hydrilla_OBJECTS = $(am_hydrilla_OBJECTS) +hydrilla_LDADD = $(LDADD) +hydrilla_LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(hydrilla_LDFLAGS) \ + $(LDFLAGS) -o $@ +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/hashtable.Po ./$(DEPDIR)/main.Po \ + ./$(DEPDIR)/scriptbase_build.Po \ + ./$(DEPDIR)/scriptbase_json_query.Po \ + ./$(DEPDIR)/scriptbase_query.Po ./$(DEPDIR)/serve.Po \ + ./$(DEPDIR)/string_buf.Po +am__mv = mv -f +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_@AM_V@) +am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_@AM_V@) +am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +SOURCES = $(hydrilla_SOURCES) +DIST_SOURCES = $(hydrilla_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/depcomp +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EXEEXT = @EXEEXT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build_alias = @build_alias@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host_alias = @host_alias@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +hydrilla_SOURCES = \ + hashtable.c \ + main.c \ + scriptbase_build.c \ + scriptbase_json_query.c \ + scriptbase_query.c \ + serve.c \ + string_buf.c + +hydrilla_LDFLAGS = -lmicrohttpd -lcjson +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ + fi; \ + for p in $$list; do echo "$$p $$p"; done | \ + sed 's/$(EXEEXT)$$//' | \ + while read p p1; do if test -f $$p \ + ; then echo "$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n;h' \ + -e 's|.*|.|' \ + -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ + sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) files[d] = files[d] " " $$1; \ + else { print "f", $$3 "/" $$4, $$1; } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ + $(INSTALL_PROGRAM_ENV) $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-binPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ + -e 's/$$/$(EXEEXT)/' \ + `; \ + test -n "$$list" || exit 0; \ + echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(bindir)" && rm -f $$files + +clean-binPROGRAMS: + -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) + +hydrilla$(EXEEXT): $(hydrilla_OBJECTS) $(hydrilla_DEPENDENCIES) $(EXTRA_hydrilla_DEPENDENCIES) + @rm -f hydrilla$(EXEEXT) + $(AM_V_CCLD)$(hydrilla_LINK) $(hydrilla_OBJECTS) $(hydrilla_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hashtable.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/main.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scriptbase_build.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scriptbase_json_query.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scriptbase_query.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/serve.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/string_buf.Po@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) + +.c.o: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< + +.c.obj: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: + for dir in "$(DESTDIR)$(bindir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-binPROGRAMS clean-generic mostlyclean-am + +distclean: distclean-am + -rm -f ./$(DEPDIR)/hashtable.Po + -rm -f ./$(DEPDIR)/main.Po + -rm -f ./$(DEPDIR)/scriptbase_build.Po + -rm -f ./$(DEPDIR)/scriptbase_json_query.Po + -rm -f ./$(DEPDIR)/scriptbase_query.Po + -rm -f ./$(DEPDIR)/serve.Po + -rm -f ./$(DEPDIR)/string_buf.Po + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-binPROGRAMS + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f ./$(DEPDIR)/hashtable.Po + -rm -f ./$(DEPDIR)/main.Po + -rm -f ./$(DEPDIR)/scriptbase_build.Po + -rm -f ./$(DEPDIR)/scriptbase_json_query.Po + -rm -f ./$(DEPDIR)/scriptbase_query.Po + -rm -f ./$(DEPDIR)/serve.Po + -rm -f ./$(DEPDIR)/string_buf.Po + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-binPROGRAMS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ + clean-binPROGRAMS clean-generic cscopelist-am ctags ctags-am \ + distclean distclean-compile distclean-generic distclean-tags \ + distdir dvi dvi-am html html-am info info-am install \ + install-am install-binPROGRAMS install-data install-data-am \ + install-dvi install-dvi-am install-exec install-exec-am \ + install-html install-html-am install-info install-info-am \ + install-man install-pdf install-pdf-am install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-compile mostlyclean-generic pdf pdf-am \ + ps ps-am tags tags-am uninstall uninstall-am \ + uninstall-binPROGRAMS + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/hashtable.c b/src/hashtable.c new file mode 100644 index 0000000..a397879 --- /dev/null +++ b/src/hashtable.c @@ -0,0 +1,565 @@ +/** + * C hashtable implementation + * + * Copyright (C) 2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + */ + +/* + * GENERAL INFO + * + * You might want to read the beginning of hashtable.h first. + * + * In some places "rehashing" and in other ones "resizing" seemed to + * be the right word to use. They mean more or less the same. + * + * Functions starting with ht_ are part of the API. Internal functions + * are declared static. I also made some of them inline (either + * because they were extremely short or only called from 1 place). + * + * Hashtable size is always a power of 2. + * + * When the hashtable is ¾ full, a new, 2x bigger table is allocated + * and whenever one of 4 basic operations (adding, removing, setting, + * getting) occurs, 4 slots are being rehashed from old table into 8 + * slots in new table. Similarly, when hashtable is ¼ full, a new, + * 2x smaller table is allocated and each of subsequent operations + * rehashes 8 entries from old table into 4 in new table. + * This mechanism has been made lazier: getting and removing don't + * trigger growing of ht even if it's 3/4 full. Similarly, getting, + * setting and adding don't trigger shrinking. + * Once resizing is triggered, however, any of the operations will + * contribute to rehashing. Even if, for example, the operation is + * ADD and the table is being shrinked. + * This means, that if we have a hashtable of size n which is ¾ full + * and growing is triggered, then each subsequent call to + * ht_{add,rem,get,set}() rehashes some entries and, depending on + * how frequently and how successfully each of these 4 funcs was + * called, at the end of resizing we get a size 2n hashtable which is + * between ¼ and ½ full. Similarly, if shrinking of a ¼ full + * hashtable of size n is triggered, then after some operations we + * get a size ½n hashtable, that is somewhere between ¼ and ¾ full. + * One can see now, that we always keep the hashtable between ¼ and ¾ + * full (with the exception of a minimal size one, that can be empty). + */ + +#include "hashtable.h" + +#include <stdlib.h> +#include <string.h> +#include <stdio.h> +#include <stdbool.h> + +#ifdef _USE_INLINE +#define INLINE inline +#else +#define INLINE +#endif + +/* + * We won't shrink hashtable below this size. Newly created one will + * be this big. + */ +#define MIN_SIZE 4 + +/* Special value of ht->rehashing_position. */ +#define NOT_REHASHING ((ssize_t) -1) + +/* + * Those are possible return values of do_resizing_related_stuff() + * and rehash_some_entries() (which only returns the first 2). + */ +#define REHASHING_IN_PROGRESS 0 +#define REHASHING_NOT_IN_PROGRESS 1 +#define REHASHING_NO_MEM 2 + +/* Struct used to store a pair. */ +struct ht_node +{ + const void *key; + const void *val; + struct ht_node *next; +}; + +enum op + { + GET, + GET_THREADSAFE, + ADD, + SET, + REM, + }; + +int ht_init(hashtable_t *ht, + size_t (*hash)(const void *key), + int (*cmp)(const void *key1, const void *key2)) +{ + if (!(ht->tab = calloc(MIN_SIZE, sizeof(struct ht_node**)))) + return HT_NO_MEM; + + ht->tab_size = MIN_SIZE; + ht->rehashing_position = NOT_REHASHING; + ht->entries = 0; + ht->hashfunc = hash; + ht->cmpfunc = cmp; + + return HT_OK; +} + +/* First come some utilities :) */ + +static INLINE size_t min(size_t n1, size_t n2) +{ + return n1 < n2 ? n1 : n2; +} + +static INLINE size_t hash2(size_t n) +{ + /* I found this "hash improver" on the internet. */ + n ^= (n >> 20) ^ (n >> 12); + return n ^ (n >> 7) ^ (n >> 4); +} + +/* Below are 2 list-handling utility functions. */ +static INLINE struct ht_node *join_lists(struct ht_node *l1, + struct ht_node *l2) +{ + if (!l1) return l2; + if (!l2) return l1; + + struct ht_node *l1_last; + + for (l1_last = l1; l1_last->next; l1_last = l1_last->next); + + /* Append l2 to the end of l1. */ + l1_last->next = l2; + + /* For convenience return the first element of the resulting list. */ + return l1; +} + + +static INLINE void push(struct ht_node *node, struct ht_node **list) +{ + node->next = *list; + *list = node; +} + +/* + * The following 2 rehash_* functions are helpers of rehash_some_entries(). + * This func rehashes 1 chain of entries in tab[] into 2 chains in newtab[]. + */ +static INLINE void rehash_position_growing(hashtable_t *ht) +{ + /* There are 2 possible new positions of an entry in a 2x bigger ht. */ + struct ht_node *list0 = NULL, *list1 = NULL; + + size_t old_position = ht->rehashing_position, + new_position0 = old_position, + new_position1 = old_position | ht->tab_size; + + struct ht_node *pair = ht->tab[old_position], *next_pair; + + while (pair) + { + next_pair = pair->next; + + size_t new_position = hash2(ht->hashfunc(pair->key)) + & (ht->new_size - 1); + + push(pair, new_position == new_position1 ? &list1 : &list0); + + pair = next_pair; + } + + ht->newtab[new_position0] = list0; + ht->newtab[new_position1] = list1; + + ht->rehashing_position++; +} + +/* This func rehashes 2 chains of entries in tab[] into 1 chain in newtab[]. */ +static INLINE void rehash_2positions_shrinking(hashtable_t *ht) +{ + size_t new_position = ht->rehashing_position, + old_position0 = new_position, + old_position1 = new_position | ht->new_size; + + ht->newtab[new_position] = join_lists(ht->tab[old_position0], + ht->tab[old_position1]); + + ht->rehashing_position++; +} + +/* + * Rehashes 4(8) positions from tab to newtab. If those were the last + * enetries to rehash, the function takes care of everything + * (like deallocating old tab) and returns REHASHING_NOT_IN_PROGRESS. + * Otherwise, returns REHASHING_IN_PROGRESS. + * Caller must make sure rehashing was started b4 calling this func. + */ +static int rehash_some_entries(hashtable_t *ht) +{ + int rehashes_left = 4; + + if (ht->new_size > ht->tab_size) /* growing ht */ + { + while(rehashes_left--) rehash_position_growing(ht); + if (ht->rehashing_position != ht->tab_size) + return REHASHING_IN_PROGRESS; + } + else /* shrinking ht */ + { + while(rehashes_left--) rehash_2positions_shrinking(ht); + if (ht->rehashing_position != ht->new_size) + return REHASHING_IN_PROGRESS; + } + + /* rehashing finishes */ + ht->rehashing_position = NOT_REHASHING; + ht->tab_size = ht->new_size; + free(ht->tab); + ht->tab = ht->newtab; + + return REHASHING_NOT_IN_PROGRESS; +} + +static INLINE bool resizing_taking_place(hashtable_t *ht) +{ + return !(ht->rehashing_position == NOT_REHASHING); +} + +void ht_finish_resizing(hashtable_t *ht) +{ + if (resizing_taking_place(ht)) + while (rehash_some_entries(ht) == REHASHING_IN_PROGRESS); +} + +static INLINE bool needs_growing(hashtable_t *ht) +{ + return ht->entries >= 3 * ht->tab_size / 4; +} + +static INLINE bool needs_shrinking(hashtable_t *ht) +{ + return ht->tab_size > MIN_SIZE + && ht->entries <= ht->tab_size / 4; +} +/* + * Each of hashtable operations (add, set, rem, get) should also + * attempt to do part of resizing. This way resizing operation + * which is O(n) is distributed among many hashtable accesses + * each of them still being O(1). Without this the the amortized + * complexity of ht accesses would still be O(1), but a single access + * would sometimes be O(n). + * Other function that adds, sets, gets or removes sth from ht uses + * this one to do this "part of resizing" mentioned above. + * This func returns REHASHING_NO_MEM on failed malloc (won't happen + * for GET or REM operation) and REHASHING_[NOT_]IN_PROGRESS otherwise. + */ +static INLINE int +do_resizing_related_stuff(hashtable_t *ht, const void *key, enum op op) +{ + bool resizing = resizing_taking_place(ht); + + if (!resizing) + { + size_t new_size; + + switch (op) + { + case GET: + goto dont_start_resizing; + case ADD: + case SET: + if (needs_growing(ht)) + new_size = ht->tab_size * 2; + else + goto dont_start_resizing; + break; + default: /* case REM */ + if (needs_shrinking(ht)) + new_size = ht->tab_size / 2; + else + goto dont_start_resizing; + } + + struct ht_node **newtab; + if (!(newtab = malloc(new_size * sizeof(struct ht_node*)))) + return op == REM ? REHASHING_NOT_IN_PROGRESS : REHASHING_NO_MEM; + + ht->newtab = newtab; + ht->new_size = new_size; + ht->rehashing_position = 0; + + resizing = true; + } + + dont_start_resizing: + + return resizing ? + rehash_some_entries(ht) : REHASHING_NOT_IN_PROGRESS; +} + +/* + * This is a chaining hashtable, so each element in the array (table) + * is actually a list of entries. All operations (adding, removing, + * etc.) need to find the right list of entries (here called "bucket") + * for a given key first, so it makes sense to do it in a separate + * function. The bucket may be in tab or newtab if resizing is taking + * place. Being informed by the caller if resizing is in progress, + * this func does not need to check for it by itself. + */ +static INLINE struct ht_node **find_bucket(hashtable_t *ht, const void *key, + bool resizing_in_progress) +{ + size_t hash = hash2(ht->hashfunc(key)), + destination_tab_size, position; + + struct ht_node **destination_tab; + + if (resizing_in_progress) + /* + * Here we must check whether our key's bucket is still + * in ht->tab or already rehashed to ht->newtab. + */ + { + size_t smaller_tab_size = min(ht->tab_size, ht->new_size), + smaller_tab_position = hash & (smaller_tab_size - 1); + + if (smaller_tab_position < ht->rehashing_position) + { + destination_tab = ht->newtab; + destination_tab_size = ht->new_size; + } + else + { + destination_tab = ht->tab; + destination_tab_size = ht->tab_size; + } + } + else + /* In this case we know, we're working on ht->tab and not newtab. */ + { + destination_tab = ht->tab; + destination_tab_size = ht->tab_size; + } + + position = hash & (destination_tab_size - 1); + return &destination_tab[position]; +} + +/* + * Operations of adding, removing, etc. all work on list of entries + * (bucket) to wchich key hashes and they have some common logic, so + * it made sense to make a single function, that does the right + * operation based on an enum passed to it. + */ +static INLINE int +perform_operation_on_bucket(hashtable_t *ht, struct ht_node **bucket, + const void *key, const void *val, + void **keyptr, void **valptr, + enum op op) +{ + struct ht_node **pairptr, *pair; + + for (pairptr = bucket, pair = *pairptr; + pair; + pairptr = &pair->next, pair = pair->next) + + if (!ht->cmpfunc(key, pair->key)) + { + if (op == ADD) + return HT_KEY_PRESENT; + + if (keyptr) *keyptr = (void*) pair->key; + if (valptr) *valptr = (void*) pair->val; + + switch (op) + { + case GET: + case GET_THREADSAFE: + { + return HT_OK; + } + case SET: + { + pair->key = key; + pair->val = val; + return HT_OK; + } + default: /* case REM */ + { + *pairptr = pair->next; + free(pair); + ht->entries--; + return HT_OK; + } + } + } + + if (op == GET || op == GET_THREADSAFE || op == REM) + return HT_KEY_ABSENT; + + /* op == ADD || op == SET */ + + struct ht_node *new_pair = malloc(sizeof(struct ht_node)); + if (!new_pair) + return HT_NO_MEM; + + *new_pair = (struct ht_node) {.key = key, .val = val}; + push(new_pair, bucket); + ht->entries++; + + return HT_OK; +} + +/* Generic function for performing of adding, removing, setting and getting. */ +static int perform_operation(hashtable_t *ht, const void *key, const void *val, + void **keyptr, void **valptr, enum op op) +{ + bool resizing_in_progress; + + if (op == GET_THREADSAFE) { + resizing_in_progress = resizing_taking_place(ht); + goto skip_resizing; + } + + switch (do_resizing_related_stuff(ht, key, op)) + { + case REHASHING_IN_PROGRESS: + resizing_in_progress = true; + break; + case REHASHING_NOT_IN_PROGRESS: + resizing_in_progress = false; + break; + default: /* case REHASHING_NO_MEM */ + return HT_NO_MEM; + } + + struct ht_node **bucket; + + skip_resizing: + bucket = find_bucket(ht, key, resizing_in_progress); + + return perform_operation_on_bucket(ht, bucket, key, val, + keyptr, valptr, op); +} + +/* The 5 functions below are the main part of the API. */ +int ht_get(hashtable_t *ht, const void *key, + void **storedkey, void **val) +{ + return perform_operation(ht, key, NULL, storedkey, val, GET); +} + +int ht_get_threadsafe(hashtable_t *ht, const void *key, + void **storedkey, void **val) +{ + return perform_operation(ht, key, NULL, storedkey, val, + GET_THREADSAFE); +} + +int ht_add(hashtable_t *ht, const void *key, const void *val) +{ + return perform_operation(ht, key, val, NULL, NULL, ADD); +} + +int ht_set(hashtable_t *ht, const void *key, const void *val, + void **oldkey, void **oldval) +{ + return perform_operation(ht, key, val, oldkey, oldval, SET); +} + +int ht_rem(hashtable_t *ht, const void *key, + void **storedkey, void **val) +{ + return perform_operation(ht, key, NULL, storedkey, val, REM); +} + +/* + * As mentioned in hashtable.h, this func does not deallocate keys + * nor vals. One could use ht_map_destroy() if that is needed. + */ +void ht_destroy(hashtable_t *ht) +{ + ssize_t position; + + if (!ht->entries) + goto free_tab; + + ht_finish_resizing(ht); + + struct ht_node **tab = ht->tab; + + for (position = ht->tab_size - 1; position >= 0; position--) + { + struct ht_node *pair = tab[position], *nextpair; + + while (pair) + { + nextpair = pair->next; + free(pair); + pair = nextpair; + } + } + + free_tab: + free(ht->tab); +} + +void ht_map(hashtable_t *ht, void *arg, + void (*mapfunc)(const void *key, void *val, void *arg)) +{ + ssize_t position; + + if (!ht->entries) + return; + + ht_finish_resizing(ht); + + struct ht_node **tab = ht->tab, *pair; + + for (position = ht->tab_size - 1; position >= 0; position--) + { + for (pair = tab[position]; pair; pair = pair->next) + mapfunc(pair->key, (void*) pair->val, arg); + } +} + +void ht_map_destroy(hashtable_t *ht, void *arg, + void (*mapfunc)(void *key, void *val, void *arg)) +{ + /* + * If mapfunc() deallocates keys, the following 2 lines make + * assumption on ht_destroy(), that it doesn't call ht->hashfunc() + * or ht->cmpfunc() on keys. + */ + ht_map(ht, arg, (void (*)(const void*, void*, void*)) mapfunc); + ht_destroy(ht); +} + +/* + * These 2 functions are for easy making of hashtable with strings as + * keys. Note that this hash is *not* secure against DoS attacks. + */ +size_t ht_string_hash(const char *key) +{ + size_t i = 0, hash = (size_t) 0xa1bad2dead3beef4; + + do + { + char shift = ((unsigned char) key[i]) % sizeof(size_t); + hash += ((hash >> shift) | (hash << (sizeof(size_t) - shift))) + ^ key[i]; + } + while (key[i++]); + + return hash; +} + +int ht_string_init(hashtable_t *ht) +{ + return ht_init(ht, (size_t (*)(const void*)) &ht_string_hash, + (int (*)(const void*, const void*)) &strcmp); +} diff --git a/src/hashtable.h b/src/hashtable.h new file mode 100644 index 0000000..7d85b33 --- /dev/null +++ b/src/hashtable.h @@ -0,0 +1,143 @@ +/** + * C hashtable implementation + * + * Copyright (C) 2018-2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + */ + +/* + * https://git.koszko.org/C-hashtable + * Note that this version is likely to be more up-to-date than the one linked. + * + * This is a separate chaining hashtable for general use. It's not + * universal: it uses malloc() and free(), so it requires a standard + * library to function and it's for single-threaded use only. It does, + * however, have one advantage: it rehashes automatically, both when + * it grows in size and when it shrinks, while retaining O(1) access + * time. A normal hashtable with rehashing would have amortized O(1) + * access time, but there would be single access with O(n) time + * complexity for each rehashing. In this hashtable, rehashing is done + * in parts. For example, a ht_add(), aside from adding an entry, + * might also rehash 4 other entries from old table to the new one and + * leave the rest unrehashed. + * Of course, it is assumed that a good hash function is provided + * by the programmer. If not, accesses may still degenerate to O(n). + * Hence, this hashtable is not secure against DoS attacks. + */ + +#ifndef HASHTABLE_H +#define HASHTABLE_H + +#include <sys/types.h> /* for ssize_t */ + +/* These are possible return values of some ht_ functions (see below). */ +#define HT_OK 0 +#define HT_NO_MEM -1 +#define HT_KEY_PRESENT -2 +#define HT_KEY_ABSENT -3 + +typedef struct +{ + /* All members are considered implementation details, except for "entries", + * which can be read, but should not be modified by external code. + */ + size_t entries; + + /* + * tab[] is where entries (chains of entries) are stored. + * When rehashing, newtab[] is also used. + */ + struct ht_node **tab, **newtab; + + /* sizes of tab[] and newtab[], obviously */ + size_t tab_size, new_size; + + size_t (*hashfunc)(const void* key); + int (*cmpfunc)(const void* key1, const void *key2); + + /* + * When no rehashing is taking place, rehashing_position is -1 (#define'd as + * NOT_REHASHING in hashtable.c). At any other time, rehashing_position is the + * lowest not yet rehashed position in the smaller table. + */ + ssize_t rehashing_position; +} hashtable_t; + +/* + * All int functions return 0 (#define'd as HT_OK) on success and in + * case of failure they return error codes, as described below. + */ + +/* May fail with HT_NO_MEM. */ +int ht_init(hashtable_t *ht, + size_t (*hash)(const void* key), + int (*cmp)(const void* key1, const void *key2)); + +/* May fail with HT_NO_MEM and HT_KEY_PRESENT. */ +int ht_add(hashtable_t *ht, const void *key, const void *val); + +/* + * May fail with HT_NO_MEM. If key was not yet present in hashtable, *oldkey and + * *oldval are not modified. Otherwise, just-replaced pair is stored in them. + */ +int ht_set(hashtable_t *ht, const void *key, const void *val, + void **oldkey, void **oldval); + +/* + * If present, the looked for pair is stored in *storedkey and *val. Otherwise, + * they're not modified and HT_KEY_ABSENT is returned. storedkey and/or val can + * be NULL. + */ +int ht_get(hashtable_t *ht, const void *key, void **storedkey, void **val); + +/* + * Works like ht_get() but is thread-safe with regard to other calls to + * ht_get_threadsafe() on the same hashtable. Note that the hash and compare + * functions supplied to the hashtable also have to be thread-safe (that + * requirement is of course met for those used by ht_string_init()). + */ +int ht_get_threadsafe(hashtable_t *ht, const void *key, + void **storedkey, void **val); + +/* Works like the above but also removes the pair from ht if found. */ +int ht_rem(hashtable_t *ht, const void *key, void **storedkey, void **val); + +/* + * De-initializes the hashtable freeing all its structures. The programmer is + * responsible for freeing keys and values if they were allocated from the heap + * (see ht_map_destroy() below). + */ +void ht_destroy(hashtable_t *ht); + +/* Calls ht_finish_resizing(), then maps through ht. */ +void ht_map(hashtable_t *ht, void *arg, + void (*mapfunc)(const void *key, void *val, void *arg)); + +/* + * It might be tempting to use ht_map() to free() all keys and values stored in + * ht and then call ht_destroy(). If you think about it, ht_map() would leave + * hashtable in a broken state - with keys being deallocated. Depending on the + * implementation, ht_destroy() could cope with that, but we'd rather not + * guarrantee anything, so here's another function just for that - mapping + * through entries and destroying the hashtable immediately after, explicitly + * allowing the mapping function to deallocate keys. + */ +void ht_map_destroy(hashtable_t *ht, void *arg, + void (*mapfunc)(void *key, void *val, void *arg)); + +/* + * If hashtable is in the process of being rehashed, this function + * processes it to the end. Otherwise - it does nothing. + */ +void ht_finish_resizing(hashtable_t *ht); + +/* Included, since strings are commonly used as keys. */ +size_t ht_string_hash(const char *key); + +/* + * May fail with HT_NO_MEM. Initializes ht for use with string keys + * (using ht_string_hash() and strcmp()). + */ +int ht_string_init(hashtable_t *ht); + +#endif /* HASHTABLE_H */ diff --git a/src/main.c b/src/main.c new file mode 100644 index 0000000..4902585 --- /dev/null +++ b/src/main.c @@ -0,0 +1,183 @@ +/** + * part of Hydrilla + * Program entry point. + * + * Copyright (C) 2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + */ + +#define _POSIX_C_SOURCE 200809L /* S_IFMT, S_IFDIR */ + +#include <dirent.h> +#include <errno.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <string.h> +#include <stdlib.h> + +#include <cjson/cJSON.h> + +#include "string_buf.h" +#include "hashtable.h" + +#include "scriptbase.h" + +#define PRESERVE_ERRNO(call) \ + do { \ + int VERY_UNLIKELY_TO_COLLIDE_NAME_$$$$$$$$##__LINE__ = errno; \ + call; \ + errno = VERY_UNLIKELY_TO_COLLIDE_NAME_$$$$$$$$##__LINE__; \ + } while (0) + +static const char default_search_path[] = "/var/lib/hydrilla/content/"; + +static int process_scriptbase_subdir(struct scriptbase *base, + struct dirent *subdir, + struct stringbuf *path_buf, + struct stringbuf *json_buf) +{ + struct stat statbuf; + cJSON *index_json; + int retval = -1; + size_t initial_len = path_buf->buf_filled, len_dirname; + + if (sb_string(path_buf, subdir->d_name)) + return -1; + + len_dirname = path_buf->buf_filled; + + if (stat(path_buf->buf, &statbuf)) + return -1; + + if ((statbuf.st_mode & S_IFMT) != S_IFDIR) + return 0; + + if (sb_string(path_buf, "/index.json")) + return -1; + + printf("Reading %s\n", path_buf->buf); + + stringbuf_truncate(json_buf, 0); + if (sb_filepath(json_buf, path_buf->buf)) + return -1; + + index_json = cJSON_Parse(json_buf->buf); + if (!index_json) { + fprintf(stderr, "Failed to parse json.\n"); + return -1; + } + + stringbuf_truncate(path_buf, len_dirname); + + retval = catalogue_component(path_buf->buf + initial_len, + index_json, base); + + PRESERVE_ERRNO(cJSON_Delete(index_json)); + + return retval; +} + +static int prepare_scriptbase_from_dir(const char *dir_path, + struct scriptbase *base) +{ + DIR *maindir = NULL; + struct dirent *subdir; + struct stringbuf path_buf; + struct stringbuf json_buf; + size_t base_path_len; + int retval = -1; + + printf("Searching %s\n", dir_path); + stringbuf_init(&path_buf); + stringbuf_init(&json_buf); + + if (scriptbase_init(base, "https://hydrilla.koszko.org/resources")) + goto end; + + maindir = opendir(dir_path); + if (!maindir) + goto end; + + if (sb_sprintf(&path_buf, "%s/", dir_path)) + goto end; + + base_path_len = path_buf.buf_filled; + + while (true) { + errno = 0; + subdir = readdir(maindir); + if (!subdir) { + if (!errno) + retval = 0; + break; + } + + if (!strcmp(subdir->d_name, "..") || + !strcmp(subdir->d_name, ".")) + continue; + + stringbuf_truncate(&path_buf, base_path_len); + + errno = 0; + if (process_scriptbase_subdir(base, subdir, + &path_buf, &json_buf)) { + fprintf(stderr, "Error processing subdirectory %s%s", + subdir->d_name, errno ? ": " : ".\n"); + if (errno) + perror(NULL); + } + } + +end: + if (errno) + perror(NULL); + stringbuf_destroy(&path_buf); + stringbuf_destroy(&json_buf); + if (maindir) + closedir(maindir); + if (retval) + scriptbase_destroy(base); + + printf("Search in %s %s.\n", dir_path, retval ? "failed" : "finished"); + + return retval; +} + +static void print_component_name(const void *key, void *val, void *arg) +{ + char type = (char) (size_t) arg; + bool unfilled; + + unfilled = + (type == 's' && !((struct script*) val)->filled) || + (type == 'b' && !((struct bag*) val)->filled); + + printf("%s%s\n", (const char*) key, + unfilled ? " (referenced only)" : ""); +} + +int serve_scriptbase(struct scriptbase *base, bool); + +int main(int argc, char *argv[]) +{ + struct scriptbase base; + const char *search_path = default_search_path; + + if (argc > 1) + search_path = argv[1]; + + if (prepare_scriptbase_from_dir(search_path, &base)) + return -1; + puts("## LOADED SCRIPTS:"); + ht_map(&base.scripts, (void*) 's', print_component_name); + puts("## LOADED BAGS:"); + ht_map(&base.bags, (void*) 'b', print_component_name); + puts("## LOADED PAGES:"); + ht_map(&base.pages, (void*) 'p', print_component_name); + + if (serve_scriptbase(&base, !!getenv("HYDRILLA_WAIT_SIGTERM"))) + fprintf(stderr, "Error serving scriptbase.\n"); + + scriptbase_destroy(&base); + return 0; +} diff --git a/src/scriptbase.h b/src/scriptbase.h new file mode 100644 index 0000000..f7694f7 --- /dev/null +++ b/src/scriptbase.h @@ -0,0 +1,77 @@ +/** + * part of Hydrilla + * Scriptbase struct and functions operating on it. + * + * Copyright (C) 2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + */ + +#ifndef SCRIPTBASE_H +#define SCRIPTBASE_H + +#include <stdbool.h> + +#include <cjson/cJSON.h> + +#include "hashtable.h" + +union component { + struct script *script; + struct bag *bag; + void *any; +}; + +struct component_ref { + struct component_ref *next; + union component component; + const char *component_type; +}; + +struct script { + char *name; + char *location; + char *sha256; + bool filled; +}; + +struct bag { + char *name; + struct component_ref *components, *last_component; + bool filled; +}; + +struct page { + char *pattern; + union component payload; + const char *payload_type; +}; + +struct scriptbase { + char *repo_url; + hashtable_t scripts; + hashtable_t bags; + hashtable_t pages; +}; + +int catalogue_component(const char *path, cJSON *index_json, + struct scriptbase *base); + +int scriptbase_init(struct scriptbase *base, const char *repo_url); + +void scriptbase_destroy(struct scriptbase *base); + +const struct script *get_script(const char *name, struct scriptbase *base); +const struct bag *get_bag(const char *name, struct scriptbase *base); +const struct page *get_pattern(const char *pattern, struct scriptbase *base); + +char *get_script_json(const char *name, struct scriptbase *base); +char *get_bag_json(const char *name, struct scriptbase *base); +char *get_pattern_json(const char *name, struct scriptbase *base); +char *get_page_query_json(const char *name, struct scriptbase *base); + +int init_url_lookup_regex(void); +void destroy_url_lookup_regex(void); +int lookup_url(const char *url, struct scriptbase *base, + int (*callback)(struct page*, void*), void *data); + +#endif /* SCRIPTBASE_H */ diff --git a/src/scriptbase_build.c b/src/scriptbase_build.c new file mode 100644 index 0000000..433e839 --- /dev/null +++ b/src/scriptbase_build.c @@ -0,0 +1,540 @@ +/** + * part of Hydrilla + * Routines for building scriptbase from resources and configuratons (stored in + * `/var/lib/hydrilla/content/' or another, user-specified location. + * + * Copyright (C) 2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + */ + +#define _POSIX_C_SOURCE 200809L /* strdup() */ + +#include <errno.h> +#include <stdio.h> +#include <string.h> +#include <stdlib.h> + +#include "hashtable.h" +#include "string_buf.h" + +#include "scriptbase.h" + +static const char scriptstr[] = "script", bagstr[] = "bag", pagestr[] = "page"; + + +static void *add_or_get(const char *component_name, char component_type, + struct scriptbase *base); + +static struct script *script_create(const char *name) +{ + struct script *script; + + script = malloc(sizeof(struct script)); + if (!script) + return NULL; + + script->name = strdup(name); + + if (!script->name) + goto free_script; + + script->location = NULL; + script->sha256 = NULL; + script->filled = false; + + return script; + +free_script: + free(script); + + return NULL; +} + +static int script_fill(struct script *script, const char *dir, + const char *filename, const char *sha256) +{ + char *location = NULL; + size_t location_len = 0, location_filled = 0; + + if (sb_raw_sprintf(&location, &location_len, &location_filled, + "%s/%s", dir, filename)) + goto free_location; + script->location = location; + + script->sha256 = strdup(sha256); + if (!script->sha256) + goto free_location; + + script->filled = true; + + return 0; + +free_location: + free(location); + script->location = NULL; + + return -1; +} + +static void script_free(struct script *script) +{ + if (!script) + return; + + free(script->name); + free(script->location); + free(script->sha256); + free(script); +} + +static struct bag *bag_create(const char *name) +{ + struct bag *bag; + + bag = malloc(sizeof(struct bag)); + if (!bag) + return NULL; + + bag->name = strdup(name); + + if (!bag->name) + goto free_bag; + + bag->components = NULL; + bag->last_component = NULL; + bag->filled = false; + + return bag; + +free_bag: + free(bag); + + return NULL; +} + +static int bag_add(struct bag *bag, const char *component_name, + const char *component_type, struct scriptbase *base) +{ + struct component_ref *new_ref; + + new_ref = malloc(sizeof(struct component_ref)); + if (!new_ref) + return -1; + + new_ref->component.any = + add_or_get(component_name, *component_type, base); + + if (!new_ref->component.any) + goto free_ref; + + new_ref->component_type = component_type; + new_ref->next = NULL; + + *(bag->components ? &bag->last_component->next : &bag->components) = + new_ref; + bag->last_component = new_ref; + bag->filled = true; + + return 0; + +free_ref: + free(new_ref); + + return -1; +} + +static void bag_free(struct bag *bag) +{ + struct component_ref *tmp1, *tmp2; + + if (!bag) + return; + + tmp1 = bag->components; + + while (tmp1) { + tmp2 = tmp1->next; + free(tmp1); + tmp1 = tmp2; + } + + free(bag->name); + free(bag); +} + +static struct page *page_create(const char *pattern, const char *payload_name, + const char *payload_type, + struct scriptbase *base) +{ + struct page *page; + + page = malloc(sizeof(struct page)); + if (!page) + return NULL; + + page->pattern = strdup(pattern); + if (!page->pattern) + goto free_page; + + if (payload_name) { + page->payload.any = + add_or_get(payload_name, *payload_type, base); + if (!page->payload.any) + goto free_pattern; + } else { + page->payload.any = NULL; + } + + page->payload_type = payload_type; + + return page; + +free_pattern: + free(page->pattern); + +free_page: + free(page); + + return NULL; +} + +static void page_free(struct page *page) +{ + if (!page) + return; + + free(page->pattern); + free(page); +} + +static void *add_or_get(const char *component_name, char component_type, + struct scriptbase *base) +{ + void *component; + hashtable_t *relevant_ht; + bool found = true; + + relevant_ht = component_type == *bagstr ? &base->bags : &base->scripts; + if (ht_get(relevant_ht, component_name, NULL, &component)) { + if (component_type == *bagstr) + component = bag_create(component_name); + else + component = script_create(component_name); + + found = false; + } + + if (!component) + return NULL; + + /* Name is at the same position in both struct bag and struct script. */ + if (!found) { + switch (ht_add(relevant_ht, ((struct bag*) component)->name, + component)) { + case HT_NO_MEM: + errno = ENOMEM; + case HT_KEY_PRESENT: + goto free_component; + } + } + + return component; + +free_component: + if (component_type == *bagstr) + bag_free(component); + else + script_free(component); + + return NULL; +} + +int scriptbase_init(struct scriptbase *base, const char *repo_url) +{ + base->repo_url = strdup(repo_url); + if (!base->repo_url) + goto end; + + if (ht_string_init(&base->scripts)) + goto free_url; + + if (ht_string_init(&base->bags)) + goto free_scripts; + + if (ht_string_init(&base->pages)) + goto free_bags; + + return 0; + +free_bags: + ht_destroy(&base->bags); + +free_scripts: + ht_destroy(&base->scripts); + +free_url: + free(base->repo_url); + +end: + errno = ENOMEM; + return -1; +} + +static void destroy_cb(void *key, void *val, void *arg) +{ + char type = *((const char*) arg); + + if (type == 's') + script_free(val); + else if (type == 'b') + bag_free(val); + else + page_free(val); +} + +void scriptbase_destroy(struct scriptbase *base) +{ + char keys[] = {'s', 'b', 'p'}; + + ht_map_destroy(&base->scripts, keys, destroy_cb); + ht_map_destroy(&base->bags, keys + 1, destroy_cb); + ht_map_destroy(&base->pages, keys + 2, destroy_cb); + + free(base->repo_url); +} + +static int catalogue_script(const char *path, cJSON *index_json, + struct scriptbase *base) +{ + const cJSON *name, *sha256, *location; + const char *bad = NULL; + struct script *script; + bool filling_existing = false; + + name = cJSON_GetObjectItemCaseSensitive(index_json, "name"); + sha256 = cJSON_GetObjectItemCaseSensitive(index_json, "sha256"); + location = cJSON_GetObjectItemCaseSensitive(index_json, "location"); + + if (!cJSON_IsString(name) || name->valuestring == NULL) + bad = "name"; + else if (!cJSON_IsString(sha256) || sha256->valuestring == NULL) + bad = "sha256"; + else if (!cJSON_IsString(location) || location->valuestring == NULL) + bad = "location"; + + if (bad) { + fprintf(stderr, "Missing or invalid field \"%s\".\n", bad); + errno = 0; + return -1; + } + + filling_existing = !ht_get(&base->scripts, name->valuestring, + NULL, (void**) &script); + + if (filling_existing) { + if (script->filled) { + fprintf(stderr, "Multiple occurences of script %s.\n", + name->valuestring); + errno = 0; + return -1; + } + } else { + script = script_create(name->valuestring); + if (!script) + return -1; + } + + if (script_fill(script, path, location->valuestring, + sha256->valuestring)) + goto free_script; + + if (!filling_existing && ht_add(&base->scripts, script->name, script)) { + errno = ENOMEM; + goto free_script; + } + + return 0; + +free_script: + script_free(script); + return -1; +} + +static int component_ref_from_json(const cJSON *component_cJSON, + const char **component_type, + const char **component_name) +{ + const cJSON *component_type_cJSON, *component_name_cJSON; + + if (!cJSON_IsArray(component_cJSON)) + return -1; + + component_type_cJSON = component_cJSON->child; + if (!component_type_cJSON) + return -1; + component_name_cJSON = component_type_cJSON->next; + if (!component_name_cJSON || component_name_cJSON->next) + return -1; + + if (!cJSON_IsString(component_type_cJSON) || + !cJSON_IsString(component_name_cJSON)) + return -1; + + if (!strcmp(scriptstr, component_type_cJSON->valuestring)) + *component_type = scriptstr; + else if (!strcmp(bagstr, component_type_cJSON->valuestring)) + *component_type = bagstr; + else + return -1; + + *component_name = component_name_cJSON->valuestring; + + return 0; +} + +static int catalogue_bag(const char *path, cJSON *index_json, + struct scriptbase *base) +{ + const cJSON *name, *components, *component; + const char *component_type, *component_name; + struct bag *bag; + bool filling_existing = false; + + name = cJSON_GetObjectItemCaseSensitive(index_json, "name"); + + if (!cJSON_IsString(name) || name->valuestring == NULL) { + fprintf(stderr, "Missing or invalid field \"name\".\n"); + errno = 0; + return -1; + } + + filling_existing = !ht_get(&base->bags, name->valuestring, + NULL, (void**) &bag); + if (filling_existing) { + if (bag->filled) { + fprintf(stderr, "Multiple occurences of bag %s.\n", + name->valuestring); + errno = 0; + return -1; + } + } else { + bag = bag_create(name->valuestring); + if (!bag) + return -1; + } + + bag->filled = true; + + components = cJSON_GetObjectItemCaseSensitive(index_json, "components"); + if (!components) + return 0; + if (!cJSON_IsArray(components)) + goto invalid_components; + + cJSON_ArrayForEach(component, components) { + if (component_ref_from_json(component, + &component_type, &component_name)) + goto invalid_components; + + /* + * component_type now points to a static buffer and + * component_name to cJSON-owned memory + */ + + if (bag_add(bag, component_name, component_type, base)) + goto free_bag; + } + + if (!filling_existing && ht_add(&base->bags, bag->name, bag)) { + errno = ENOMEM; + goto free_bag; + } + + return 0; + +invalid_components: + fprintf(stderr, "Invalid field \"components\""); + errno = 0; + +free_bag: + bag_free(bag); + return -1; +} + +static int catalogue_page(const char *path, cJSON *index_json, + struct scriptbase *base) +{ + const cJSON *pattern, *payload; + const char *payload_type = "", *payload_name = NULL; + struct page *page; + + pattern = cJSON_GetObjectItemCaseSensitive(index_json, "pattern"); + payload = cJSON_GetObjectItemCaseSensitive(index_json, "payload"); + + if (!cJSON_IsString(pattern) || pattern->valuestring == NULL) { + fprintf(stderr, "Missing or invalid field \"pattern\".\n"); + errno = 0; + return -1; + } + + if (!payload) + goto skip_payload; + + if (component_ref_from_json(payload, &payload_type, &payload_name)) + goto invalid_payload; + +skip_payload: + page = page_create(pattern->valuestring, payload_name, payload_type, + base); + if (!page) + goto free_page; + + switch (ht_add(&base->pages, page->pattern, page)) { + case 0: + return 0; + case HT_NO_MEM: + errno = ENOMEM; + break; + case HT_KEY_PRESENT: + fprintf(stderr, "Multiple occurences of pattern %s.\n", + page->pattern); + errno = 0; + } + +free_page: + page_free(page); + return -1; + +invalid_payload: + fprintf(stderr, "Invalid field \"payload\""); + errno = 0; + + return 0; +} + +int catalogue_component(const char *path, cJSON *index_json, + struct scriptbase *base) +{ + const cJSON *type; + + type = cJSON_GetObjectItemCaseSensitive(index_json, "type"); + if (!cJSON_IsString(type) || type->valuestring == NULL) + goto bad_type; + + if (!strcmp(type->valuestring, scriptstr)) + return catalogue_script(path, index_json, base); + else if (!strcmp(type->valuestring, bagstr)) + return catalogue_bag(path, index_json, base); + else if (!strcmp(type->valuestring, pagestr)) + return catalogue_page(path, index_json, base); + else + goto bad_type; + + return 0; + +bad_type: + fprintf(stderr, "Missing or invalid type."); + + errno = 0; + return -1; +} diff --git a/src/scriptbase_json_query.c b/src/scriptbase_json_query.c new file mode 100644 index 0000000..e413b3e --- /dev/null +++ b/src/scriptbase_json_query.c @@ -0,0 +1,213 @@ +/** + * part of Hydrilla + * Routines that perform queries on in-memory scriptbase and return results in + * the form of JSON strings. + * + * Copyright (C) 2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + */ + +#include <errno.h> + +#include "scriptbase.h" + +#define ADD_COMPONENT(object, key, construct, adder) \ + new = cJSON_Create##construct; \ + if (!new) \ + goto free_json; \ + adder(object, key, new) + +#define ADD_TO_OBJECT(object, key, construct) \ + ADD_COMPONENT(object, key, construct, cJSON_AddItemToObject) + +#define ADD_KEY(key, construct) ADD_TO_OBJECT(json, key, construct) + +#define ARRAY_ADDER(object, key, var) cJSON_AddItemToArray(object, var) + +#define ADD_TO_ARRAY(object, construct) \ + ADD_COMPONENT(object, dummy, construct, ARRAY_ADDER) + +char *get_script_json(const char *name, struct scriptbase *base) +{ + const struct script *script; + cJSON *json, *new; + char *printed = NULL; + + script = get_script(name, base); + if (!script || !script->filled) + return NULL; + + json = cJSON_CreateObject(); + if (!json) + goto free_json; + + ADD_KEY("name", String(script->name)); + ADD_KEY("location", String(script->location)); + ADD_KEY("sha256", String(script->sha256)); + + printed = cJSON_Print(json); + +free_json: + cJSON_Delete(json); + + if (!printed) + errno = ENOMEM; + + return printed; +} + +char *get_bag_json(const char *name, struct scriptbase *base) +{ + const struct bag *bag; + cJSON *json, *new, *components, *current_component; + struct component_ref *ref; + char type_prefix[] = "\0"; + char *printed = NULL; + + bag = get_bag(name, base); + if (!bag || !bag->filled) + return NULL; + + json = cJSON_CreateObject(); + if (!json) + goto free_json; + + ADD_KEY("name", String(bag->name)); + ADD_KEY("components", Array()); + components = new; + + for (ref = bag->components; ref; ref = ref->next) { + ADD_TO_ARRAY(components, Array()); + current_component = new; + type_prefix[0] = ref->component_type[0]; + ADD_TO_ARRAY(current_component, String(type_prefix)); + /* name is at the same offset in struct bag and struct script */ + ADD_TO_ARRAY(current_component, + String(ref->component.bag->name)); + } + + printed = cJSON_Print(json); + +free_json: + cJSON_Delete(json); + + if (!printed) + errno = ENOMEM; + + return printed; +} + +cJSON *page_to_cJSON(const struct page *page) +{ + cJSON *json, *new, *payload; + char type_prefix[] = "\0"; + + json = cJSON_CreateObject(); + if (!json) + goto free_json; + + ADD_KEY("pattern", String(page->pattern)); + if (!page->payload.any) + goto skip_payload; + ADD_KEY("payload", Array()); + payload = new; + type_prefix[0] = page->payload_type[0]; + ADD_TO_ARRAY(payload, String(type_prefix)); + /* name is at the same offset in struct bag and struct script */ + ADD_TO_ARRAY(payload, String(page->payload.bag->name)); + +skip_payload: + return json; + +free_json: + cJSON_Delete(json); + + errno = ENOMEM; + + return NULL; +} + +char *get_pattern_json(const char *pattern, struct scriptbase *base) +{ + const struct page *page; + cJSON *json; + char *printed = NULL; + + page = get_pattern(pattern, base); + if (!page) + return NULL; + + json = page_to_cJSON(page); + if (!json) + return NULL; + + printed = cJSON_Print(json); + cJSON_Delete(json); + + if (!printed) + errno = ENOMEM; + + return printed; +} + +struct page_array_building { + cJSON *page_array; + bool OOM_error; +}; + +int lookup_callback(struct page *page, void *data) +{ + struct page_array_building *building = data; + cJSON *page_json; + + page_json = page_to_cJSON(page); + if (!page_json) { + building->OOM_error = true; + return -1; + } + + cJSON_AddItemToArray(building->page_array, page_json); + + return 0; +} + +#include <stdio.h> +char *get_page_query_json(const char *url, struct scriptbase *base) +{ + struct page_array_building building = {NULL, false}; + char *printed = NULL; + int result = -2; + + building.page_array = cJSON_CreateArray(); + if (!building.page_array) + goto free_json; + + result = lookup_url(url, base, lookup_callback, &building); + + printf("lookup returned value is %d\n", result); + + if (building.OOM_error) + result = -2; + if (result < 0) + goto free_json; + + printed = cJSON_Print(building.page_array); + if (!printed) + result = -2; + +free_json: + cJSON_Delete(building.page_array); + + switch (result) { + case 0: + break; + case -1: + case -3: + errno = EINVAL; + break; + case -2: + errno = ENOMEM; + } + + return printed; +} diff --git a/src/scriptbase_query.c b/src/scriptbase_query.c new file mode 100644 index 0000000..fe9a910 --- /dev/null +++ b/src/scriptbase_query.c @@ -0,0 +1,278 @@ +/** + * part of Hydrilla + * Routines for querying in-memory scriptbase, operating on data structures from + * `scripbase.h'. + * + * Copyright (C) 2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + */ + +#include <stddef.h> +#include <regex.h> +#include <stdbool.h> +#include <string.h> + +#include "hashtable.h" +#include "string_buf.h" + +#include "scriptbase.h" + +const struct script *get_script(const char *name, struct scriptbase *base) +{ + void *val; + + if (ht_get_threadsafe(&base->scripts, name, NULL, &val)) + return NULL; + + return ((struct script*) val)->filled ? val : NULL; +} + +const struct bag *get_bag(const char *name, struct scriptbase *base) +{ + void *val; + + if (ht_get_threadsafe(&base->bags, name, NULL, &val)) + return NULL; + + return ((struct bag*) val)->filled ? val : NULL; +} + +const struct page *get_pattern(const char *pattern, struct scriptbase *base) +{ + void *val = NULL; + + ht_get_threadsafe(&base->pages, pattern, NULL, &val); + + return val; +} + +static const char url_regex[] = + "^" + "([a-zA-Z]{1,20}://)" /* protocol */ + "([^/?#]{1,253})" /* domain */ + "(/[^?#]*)?" /* path */ + "\\\\?[^#]*" /* query */ + "#?.*" /* target */ + "$"; + +static regex_t url_regex_comp; +static bool url_regex_ready; + +int init_url_lookup_regex(void) +{ + int retval; + + retval = regcomp(&url_regex_comp, url_regex, REG_EXTENDED); + + url_regex_ready = !retval; + + return retval; +} + +void destroy_url_lookup_regex(void) +{ + if (!url_regex_ready) { + fprintf(stderr, "Attempt to destroy uninitialized regex in " __FILE__ "\n"); + return; + } + + regfree(&url_regex_comp); +} + +#define URL_REGEX_NMATCH 4 + +#define PROTOCOL_MATCH 1 +#define DOMAIN_MATCH 2 +#define PATH_MATCH 3 + +static int lookup_url_path(const char *path_begin, const char *path_end, + struct stringbuf *buf, struct scriptbase *base, + int (*callback)(struct page*, void*), void *data) +{ + const char *segment_end = path_begin; + int segments_dropped = 0; + int initial_len = buf->buf_filled; + size_t len_path, previous_segment; + void *val; + bool trailing_dash = path_end != path_begin && path_end[-1] == '/'; + char asterisks[] = "/***"; + int trailing_asterisks = 0, i; + int result; + + while (true) { + do { + if (path_begin >= path_end) + goto after_path_normalization; + } while (*(path_begin++) == '/'); + path_begin -= 2; + + segment_end = path_begin + 1; + while (*segment_end != '/' && ++segment_end < path_end); + + if (sb_bytes(buf, path_begin, segment_end - path_begin)) + return -2; + + path_begin = segment_end; + } + +after_path_normalization: +#define TRY_WILDCARD(condition, wildcard) \ + if (condition) { \ + stringbuf_truncate(buf, len_path); \ + if (sb_string(buf, wildcard)) \ + return -2; \ + \ + result = ht_get_threadsafe(&base->pages, buf->buf, \ + NULL, &val); \ + if (!result && callback(val, data)) \ + return 1; \ + } + + while (true) { + len_path = buf->buf_filled; + previous_segment = len_path; + while (previous_segment > initial_len && + buf->buf[--previous_segment] != '/'); + + if (!trailing_asterisks) {/* only on first iteration */ + trailing_asterisks = -1; + + for (i = 3; i > 0; i--) { + asterisks[i + 1] = '\0'; + + if (strncmp(buf->buf + previous_segment, + asterisks, i + 1)) + continue; + + trailing_asterisks = i; + + if (i != 3) + break; + + if (buf->buf[previous_segment + i + 1] == '*') + trailing_asterisks = -1; + + break; + } + } + + TRY_WILDCARD(segments_dropped == 0, ""); + TRY_WILDCARD(segments_dropped == 0 && trailing_dash, "/"); + TRY_WILDCARD(segments_dropped == 1 && trailing_asterisks != 1, + "/*"); + TRY_WILDCARD(segments_dropped > 1, "/**"); + TRY_WILDCARD(segments_dropped > 0 && + (segments_dropped > 1 || trailing_asterisks != 3), + "/***"); + + stringbuf_truncate(buf, previous_segment); + + if (previous_segment == len_path) + return 0; + + /* + * We only ever care if this count is 0, 1 or > 1, + * hence size_t is not necessary. + */ + if (segments_dropped < 2) + segments_dropped++; + } + +#undef TRY_WILDCARD +} + +static int lookup_url_domain(const char *domain_begin, const char *domain_end, + const char *path_begin, const char *path_end, + struct stringbuf *buf, struct scriptbase *base, + int (*callback)(struct page*, void*), void *data) +{ + const char *next_label = domain_begin; + int labels_dropped = 0; + int initial_len = buf->buf_filled; + int result; + +#define TRY_WILDCARD(condition, wildcard) \ + if (condition) { \ + stringbuf_truncate(buf, initial_len); \ + if (sb_string(buf, wildcard) || \ + sb_bytes(buf, domain_begin, domain_end - domain_begin)) \ + return -2; \ + \ + result = lookup_url_path(path_begin, path_end, \ + buf, base, callback, data); \ + if (result) \ + return result; \ + } + + while (true) { + domain_begin = next_label; + + while (*(next_label++) != '.') { + if (next_label >= domain_end) + return 0; + } + + TRY_WILDCARD(labels_dropped == 0, ""); + TRY_WILDCARD(labels_dropped == 1, "*."); + TRY_WILDCARD(labels_dropped > 0, "**."); + TRY_WILDCARD(true, "***."); + + labels_dropped++; + } + +#undef TRY_WILDCARD +} + +static int lookup_url_proto(const char *proto_begin, const char *proto_end, + const char *domain_begin, const char *domain_end, + const char *path_begin, const char *path_end, + struct stringbuf *buf, struct scriptbase *base, + int (*callback)(struct page*, void*), void *data) +{ + if (sb_bytes(buf, proto_begin, proto_end - proto_begin)) + return -2; + + return lookup_url_domain(domain_begin, domain_end, path_begin, path_end, + buf, base, callback, data); +} + +int lookup_url(const char *url, struct scriptbase *base, + int (*callback)(struct page*, void*), void *data) +{ + regmatch_t reg_matched[URL_REGEX_NMATCH]; + struct stringbuf buf; + const char *path_begin, *path_end; + int retval; + + if (!url_regex_ready) { + fprintf(stderr, "Regex not initialized in " __FILE__ "\n"); + return -3; + } + + printf("matching: %s\n", url); + + if (regexec(&url_regex_comp, url, + URL_REGEX_NMATCH, reg_matched, 0) || + reg_matched[DOMAIN_MATCH].rm_so == -1) + return -1; + + stringbuf_init(&buf); + + path_begin = url + reg_matched[PATH_MATCH].rm_so; + path_end = url + reg_matched[PATH_MATCH].rm_eo; + if (path_begin == url - 1) { + path_begin = NULL; + path_end = NULL; + } + + retval = lookup_url_proto(url + reg_matched[PROTOCOL_MATCH].rm_so, + url + reg_matched[PROTOCOL_MATCH].rm_eo, + url + reg_matched[DOMAIN_MATCH].rm_so, + url + reg_matched[DOMAIN_MATCH].rm_eo, + path_begin, path_end, + &buf, base, callback, data); + + stringbuf_destroy(&buf); + + return retval; +} diff --git a/src/serve.c b/src/serve.c new file mode 100644 index 0000000..8ef0ce3 --- /dev/null +++ b/src/serve.c @@ -0,0 +1,215 @@ +/** + * part of Hydrilla + * Serving data queries over HTTP using libmicrohttpd. + * + * Copyright (C) 2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + * + * This file is based on Christian Grothoff's public comain code from + * `doc/examples/logging.c' in libmicrohttpd source tree. + */ + +#define _POSIX_C_SOURCE 199506L /* sigwait() */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> +#include <signal.h> + +#include <sys/types.h> +#include <sys/select.h> +#include <sys/socket.h> + +#include <microhttpd.h> + +#include "scriptbase.h" + +#define PORT 10111 + +#define ARRLEN(array) (sizeof(array) / sizeof(*array)) + +static struct MHD_Response *default_response, *error_response, + *not_found_response; + +static struct { + struct MHD_Response **response; + const char *const text; +} static_responses[] = { + {&default_response, "<html><body>Nothing to see here</body></html>"}, + {&error_response, "<html><body>Error occured</body></html>"}, + {¬_found_response, "<html><body>Not found</body></html>"} +}; + +static int add_CORS_header(struct MHD_Response *response) +{ + return -1 * (MHD_add_response_header(response, + "Access-Control-Allow-Origin", + "*") == MHD_NO); +} + +typedef char *(*request_handler_t)(const char *queried, + struct scriptbase *base); + +static struct { + const char *path; + request_handler_t request_handler; +} resources[] = { + {"/script", get_script_json}, + {"/bag", get_bag_json}, + {"/pattern", get_pattern_json}, + {"/query", get_page_query_json} +}; + +struct request_handling { + struct scriptbase *base; + request_handler_t handler; + char *json_response; + int error_number; +}; + +static int handle_query_argument(void *cls, enum MHD_ValueKind kind, + const char *key, const char *value) +{ + struct request_handling *handling = cls; + + if (strcmp(key, "n")) { + fprintf(stderr, "Unknown argument: \"%s\" = \"%s\"\n", + key, value); + return MHD_YES; + } + + errno = 0; + + handling->json_response = handling->handler(value, handling->base); + + handling->error_number = errno; + + return MHD_NO; +} + +static int answer_with(struct MHD_Connection *connection, + request_handler_t handler, struct scriptbase *base) +{ + struct request_handling handling = {base, handler, NULL, EINVAL}; + struct MHD_Response *response; + int retval; + + MHD_get_connection_values(connection, MHD_GET_ARGUMENT_KIND, + handle_query_argument, &handling); + + if (!handling.json_response) { + if (handling.error_number) + goto send_error; + goto send_not_found; + } + + response = MHD_create_response_from_buffer + (strlen(handling.json_response), handling.json_response, + MHD_RESPMEM_MUST_FREE); + if (!response || add_CORS_header(response)) + goto send_error; + + retval = MHD_queue_response(connection, MHD_HTTP_OK, response); + MHD_destroy_response(response); + + return retval; + +send_not_found: + return MHD_queue_response(connection, MHD_HTTP_NOT_FOUND, + not_found_response); + +send_error: + free(handling.json_response); + return MHD_queue_response(connection, MHD_HTTP_INTERNAL_SERVER_ERROR, + error_response); +} + +static int answer(void *data, + struct MHD_Connection *connection, + const char *url, + const char *method, + const char *version, + const char *upload_data, + size_t *upload_data_size, + void **ptr) +{ + static int aptr; + int i; + struct scriptbase *base = data; + + printf("New %s request for %s using version %s\n", method, url, version); + + if (strcmp(method, "GET")) + return MHD_NO; + + if (&aptr != *ptr) { + /* do never respond on first call */ + *ptr = &aptr; + return MHD_YES; + } + *ptr = NULL; /* reset when done */ + + for (i = 0; i < ARRLEN(resources); i++) { + if (strcmp(resources[i].path, url)) + continue; + + return answer_with(connection, resources[i].request_handler, + base); + } + + return MHD_queue_response(connection, MHD_HTTP_OK, default_response); +} + +int getchar(void); + +int serve_scriptbase(struct scriptbase *base, bool wait_for_sigterm) +{ + int i; + struct MHD_Daemon *daemon; + sigset_t sigterm_set; + int signal_number; /* only written, not read */ + int retval = -1; + + if (init_url_lookup_regex()) + return -1; + + for (i = 0; i < ARRLEN(static_responses); i++) { + *static_responses[i].response = MHD_create_response_from_buffer + (strlen(static_responses[i].text), + (char*) static_responses[i].text, + MHD_RESPMEM_PERSISTENT); + + if (!*static_responses[i].response || + add_CORS_header(*static_responses[i].response)) + goto free_resources; + } + + daemon = MHD_start_daemon(MHD_USE_INTERNAL_POLLING_THREAD, PORT, NULL, + NULL, &answer, (void*) base, MHD_OPTION_END); + if (!daemon) + goto free_resources; + + if (wait_for_sigterm) { + sigemptyset(&sigterm_set); + sigaddset(&sigterm_set, SIGTERM); + sigwait(&sigterm_set, &signal_number); + } else { + getchar(); + } + + MHD_stop_daemon(daemon); + + retval = 0; + +free_resources: + for (i = 0; i < ARRLEN(static_responses); i++) { + if (*static_responses[i].response) + MHD_destroy_response(*static_responses[i].response); + *static_responses[i].response = NULL; + } + + destroy_url_lookup_regex(); + + return retval; +} diff --git a/src/string_buf.c b/src/string_buf.c new file mode 100644 index 0000000..332d023 --- /dev/null +++ b/src/string_buf.c @@ -0,0 +1,308 @@ +/** + * C string buffers for easy construction of complex strings + * + * Copyright (C) 2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + */ + +#include <string.h> +#include <stdlib.h> +#include <stdbool.h> +#include <stdarg.h> +#include <unistd.h> + +#define STRING_BUF_C +#include "string_buf.h" + +#define MINIMUM_EXTEND_BYTES 63 + +void stringbuf_init(struct stringbuf *sb) +{ + sb->buf = NULL; + sb->buf_len = 0; + sb->buf_filled = 0; +} + +void stringbuf_destroy(struct stringbuf *sb) +{ + free(sb->buf); +} + +void stringbuf_truncate(struct stringbuf *sb, size_t len) +{ + if (sb->buf_len <= len) + return; + + sb->buf_filled = len; + sb->buf[len] = '\0'; +} + +#define _UNPACKED_ARGS &sb->buf, &sb->buf_len, &sb->buf_filled + +int extend_buf(struct stringbuf *sb, size_t extend_len) +{ + return extend_buf_raw(_UNPACKED_ARGS, extend_len); +} + +int extend_buf_raw(_RAW_BUF_ARGS, size_t extend_len) +{ + ssize_t space_left = *buf_len - *buf_filled - 1; + size_t new_size, size_required, size_more_space; + char *new_buf; + + if (space_left >= 0 && space_left >= extend_len) + return 0; + + size_required = *buf_filled + extend_len + 1; + size_more_space = size_required + MINIMUM_EXTEND_BYTES; + new_size = *buf_len * 2; + if (new_size < size_more_space) + new_size = size_more_space; + + new_buf = realloc(*buf, new_size); + if (!new_buf) { + new_size = size_required; + new_buf = realloc(*buf, new_size); + } + if (!new_buf) + return -1; + + *buf = new_buf; + *buf_len = new_size; + return 0; +} + +_SB_HEAD(bytes, const void *bytes, size_t bytes_len) +{ + return sb_raw_bytes(_UNPACKED_ARGS, bytes, bytes_len); +} + +_SB_RAW_HEAD(bytes, const void *bytes, size_t bytes_len) +{ + if (extend_buf_raw(buf, buf_len, buf_filled, bytes_len)) + return -1; + + memcpy(*buf + *buf_filled, bytes, bytes_len); + (*buf)[*buf_filled + bytes_len] = '\0'; + *buf_filled += bytes_len; + + return 0; +} + +_SB_HEAD(string, const char *string) +{ + return sb_raw_string(_UNPACKED_ARGS, string); +} + +_SB_RAW_HEAD(string, const char *string) +{ + size_t string_len = strlen(string); + + return sb_raw_bytes(buf, buf_len, buf_filled, string, string_len); +} + +_SB_HEAD(char, char c) +{ + return sb_raw_char(_UNPACKED_ARGS, c); +} + +_SB_RAW_HEAD(char, char c) +{ + return sb_raw_bytes(buf, buf_len, buf_filled, &c, 1); +} + +_SB_HEAD(long, long num) +{ + return sb_raw_long(_UNPACKED_ARGS, num); +} + +_SB_RAW_HEAD(long, long num) +{ + unsigned char repr[3 * sizeof(long) + 1]; + int i; + bool neg = num < 0; + + for (i = sizeof(repr); num; num /= 10) + repr[--i] = '0' + num % 10; + + if (i == sizeof(repr)) + repr[--i] = '0'; + else if (neg) + repr[--i] = '-'; + + sb_raw_bytes(buf, buf_len, buf_filled, + repr + i, sizeof(repr) - i); + + return 0; +} + +_SB_HEAD(file, FILE *file) +{ + return sb_raw_file(_UNPACKED_ARGS, file); +} + +_SB_RAW_HEAD(file, FILE *file) +{ + long file_size; + + if (fseek(file, 0, SEEK_END)) + return -1; + + file_size = ftell(file); + if (file_size < 0) + return -1; + + if (extend_buf_raw(buf, buf_len, buf_filled, file_size)) + return -1; + + rewind(file); + + if (fread(*buf + *buf_filled, file_size, 1, file) != 1) + return -1; + + (*buf)[*buf_filled + file_size] = '\0'; + *buf_filled += file_size; + + return 0; +} + +_SB_HEAD(filepath, const char *path) +{ + return sb_raw_filepath(_UNPACKED_ARGS, path); +} + +_SB_RAW_HEAD(filepath, const char *path) +{ + FILE *file; + int retval; + + file = fopen(path, "r"); + if (!file) + return -1; + + retval = sb_raw_file(buf, buf_len, buf_filled, file); + + fclose(file); + return retval; +} + +_SB_HEAD(vsprintf, const char *fmt, va_list ap) +{ + return sb_raw_vsprintf(_UNPACKED_ARGS, fmt, ap); +} + +_SB_RAW_HEAD(vsprintf, const char *fmt, va_list ap) +{ + const unsigned char *in_pos = (const unsigned char*) fmt; + char c; + size_t i = 0; + bool percent = false; + + long num_arg; + int (*sb_cb)(char**, size_t*, size_t*, void*); + + while (in_pos[i]) { + c = in_pos[i++]; + + if (!percent) { + if (c == '%') { + percent = true; + if (sb_raw_bytes(buf, buf_len, buf_filled, + in_pos, i - 1)) + return -1; + } + + continue; + } + + percent = false; + in_pos += i; + i = 0; + + switch (c) { + case 'd': + case 'u': + num_arg = c == 'd' ? + va_arg(ap, int) : va_arg(ap, unsigned); + + if (sb_raw_long(buf, buf_len, buf_filled, num_arg)) + return -1; + break; + case 'f': + if (sb_raw_file(buf, buf_len, buf_filled, + va_arg(ap, FILE*))) + return -1; + break; + case 'p': + if (sb_raw_filepath(buf, buf_len, buf_filled, + va_arg(ap, const char*))) + return -1; + break; + case 's': + if (sb_raw_string(buf, buf_len, buf_filled, + va_arg(ap, const char*))) + return -1; + break; + case '_': + sb_cb = va_arg(ap, int (*)(char**, size_t*, + size_t*, void*)); + if (sb_cb(buf, buf_len, buf_filled, + va_arg(ap, void*))) + return -1; + break; + case '%': + in_pos--; + i++; + } + } + + if (!percent && sb_raw_bytes(buf, buf_len, buf_filled, in_pos, i)) + return -1; + + return 0; +} + +_SB_HEAD(sprintf, const char *fmt, ...) +{ + va_list ap; + int res; + + va_start(ap, fmt); + res = sb_raw_vsprintf(_UNPACKED_ARGS, fmt, ap); + va_end(ap); + + return res; +} + +_SB_RAW_HEAD(sprintf, const char *fmt, ...) +{ + va_list ap; + int res; + + va_start(ap, fmt); + res = sb_raw_vsprintf(buf, buf_len, buf_filled, fmt, ap); + va_end(ap); + + return res; +} + +int crop_buf(struct stringbuf *sb) +{ + return crop_buf_raw(_UNPACKED_ARGS); +} + +int crop_buf_raw(_RAW_BUF_ARGS) +{ + char *new_buf; + + if (*buf_len <= *buf_filled + 1) + return 0; + + new_buf = realloc(*buf, *buf_filled + 1); + if (!new_buf) + return -1; + + *buf = new_buf; + *buf_len = *buf_filled + 1; + return 0; +} diff --git a/src/string_buf.h b/src/string_buf.h new file mode 100644 index 0000000..160b7cb --- /dev/null +++ b/src/string_buf.h @@ -0,0 +1,59 @@ +/** + * C string buffers for easy construction of complex strings + * + * Copyright (C) 2021 Wojtek Kosior + * Redistribution terms are gathered in the `copyright' file. + */ + +#ifndef STRING_BUF_H +#define STRING_BUF_H + +#include <stdio.h> +#include <stdarg.h> + +struct stringbuf { + char *buf; + size_t buf_len; + size_t buf_filled; +}; + +void stringbuf_init(struct stringbuf *sb); +void stringbuf_destroy(struct stringbuf *sb); +void stringbuf_truncate(struct stringbuf *sb, size_t len); + +#define _RAW_BUF_ARGS char **buf, size_t *buf_len, size_t *buf_filled + +#define _SB_HEAD(name, ...) \ + int sb_##name(struct stringbuf *sb, __VA_ARGS__) + +#define _SB_RAW_HEAD(name, ...) \ + int sb_raw_##name(_RAW_BUF_ARGS, __VA_ARGS__) + +#define _SB_DEFINE_2(name, ...) \ + _SB_HEAD(name, __VA_ARGS__); \ + _SB_RAW_HEAD(name, __VA_ARGS__) + +int extend_buf_raw(_RAW_BUF_ARGS, size_t extend_len); +int extend_buf(struct stringbuf *sb, size_t extend_len); + +int crop_buf_raw(_RAW_BUF_ARGS); +int crop_buf(struct stringbuf *sb); + +_SB_DEFINE_2(bytes, const void *bytes, size_t bytes_len); +_SB_DEFINE_2(string, const char *string); +_SB_DEFINE_2(char, char c); +_SB_DEFINE_2(long, long num); +_SB_DEFINE_2(file, FILE *file); +_SB_DEFINE_2(filepath, const char *path); +_SB_DEFINE_2(vsprintf, const char *fmt, va_list ap); +_SB_DEFINE_2(sprintf, const char *fmt, ...); + +#undef _SB_DEFINE_2 + +#ifndef STRING_BUF_C +#undef _RAW_BUF_ARGS +#undef _SB_HEAD +#undef _SB_RAW_HEAD +#endif + +#endif /* STRING_BUF_H */ |