From f0112604a6b9f4af393389bda50bb3f504e68cfa Mon Sep 17 00:00:00 2001 From: Michael Bunk Date: Mon, 9 Feb 2015 12:05:48 +0100 Subject: [PATCH 001/248] Remove misguided call to srand() A random number generator needs to be initialized once per process after a fork, but not after each request, more so with an argument that changes only once per second. This fixes SpiderLabs#778 This is a copy of my commit deec149ca363dd14213afd1f9d7f71a71959ef31. --- apache2/modsecurity.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/apache2/modsecurity.c b/apache2/modsecurity.c index da89faa7a2..5bda4cff82 100644 --- a/apache2/modsecurity.c +++ b/apache2/modsecurity.c @@ -237,8 +237,6 @@ static void modsecurity_persist_data(modsec_rec *msr) { } /* Remove stale collections. */ - srand(time(NULL)); - if (rand() < RAND_MAX/100) { arr = apr_table_elts(msr->collections); te = (apr_table_entry_t *)arr->elts; From fdcab3a60d4175f3a66792e84a4b2e0b3b7507de Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Fri, 16 Oct 2015 11:13:56 -0300 Subject: [PATCH 002/248] Adds information about the issue #836 on the CHANGES file --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 031648e4d5..b67b413f64 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Remove misguided call to srand() + [Issues #778, #781 and #836 - Michael Bunk, @gilperon] * Fix compilation problem while ssdeep is installed in non-standard location. [Issude #872 - Kurt Newman] From 7ba07bd5476bd90d63079df9015ec7d9772a4f60 Mon Sep 17 00:00:00 2001 From: Eugene Alekseev Date: Mon, 14 Sep 2015 16:47:19 +0300 Subject: [PATCH 003/248] Fix buffer overflow on empty strings in key. Sometimes apache segfalult on memory copying when key.dptr is some kind of empty string and key.dsize seems to be 0. --- apache2/persist_dbm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apache2/persist_dbm.c b/apache2/persist_dbm.c index b698e7913f..42123f8c18 100644 --- a/apache2/persist_dbm.c +++ b/apache2/persist_dbm.c @@ -626,7 +626,7 @@ int collections_remove_stale(modsec_rec *msr, const char *col_name) { */ rc = apr_sdbm_firstkey(dbm, &key); while(rc == APR_SUCCESS) { - char *s = apr_pstrmemdup(msr->mp, key.dptr, key.dsize - 1); + char *s = apr_pstrmemdup(msr->mp, key.dptr, strlen(key.dptr)); *(char **)apr_array_push(keys_arr) = s; rc = apr_sdbm_nextkey(dbm, &key); } From 198032208a2e6ca6f0520fcc021b787f0fef37b8 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 26 Oct 2015 13:49:05 -0300 Subject: [PATCH 004/248] Improves #927 by checking earlier if the string is empty or not --- apache2/persist_dbm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/apache2/persist_dbm.c b/apache2/persist_dbm.c index 42123f8c18..76c3820baf 100644 --- a/apache2/persist_dbm.c +++ b/apache2/persist_dbm.c @@ -626,8 +626,10 @@ int collections_remove_stale(modsec_rec *msr, const char *col_name) { */ rc = apr_sdbm_firstkey(dbm, &key); while(rc == APR_SUCCESS) { - char *s = apr_pstrmemdup(msr->mp, key.dptr, strlen(key.dptr)); - *(char **)apr_array_push(keys_arr) = s; + if (key.dsize) { + char *s = apr_pstrmemdup(msr->mp, key.dptr, key.dsize - 1); + *(char **)apr_array_push(keys_arr) = s; + } rc = apr_sdbm_nextkey(dbm, &key); } apr_sdbm_unlock(dbm); From 18d79b62211cb65987beb5c35abe929dca724ca1 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 26 Oct 2015 13:55:17 -0300 Subject: [PATCH 005/248] Adds information about the issue #927 on the CHANGES file --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index b67b413f64..470a9fc6db 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Fix crash while adding empty keys to persistent collections. + [Issues #927 - Eugene Alekseev, Marc Stern and ModSecurity team] * Remove misguided call to srand() [Issues #778, #781 and #836 - Michael Bunk, @gilperon] * Fix compilation problem while ssdeep is installed in non-standard From 6d458be8ca8b528db23e09ef8a0247448c80fc7d Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Tue, 27 Oct 2015 14:03:45 -0300 Subject: [PATCH 006/248] Fix typo in CHANGES file --- CHANGES | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 470a9fc6db..0564f80f45 100644 --- a/CHANGES +++ b/CHANGES @@ -2,12 +2,12 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ * Fix crash while adding empty keys to persistent collections. - [Issues #927 - Eugene Alekseev, Marc Stern and ModSecurity team] + [Issue #927 - Eugene Alekseev, Marc Stern and ModSecurity team] * Remove misguided call to srand() [Issues #778, #781 and #836 - Michael Bunk, @gilperon] * Fix compilation problem while ssdeep is installed in non-standard location. - [Issude #872 - Kurt Newman] + [Issue #872 - Kurt Newman] * Fix invalid storage reference by apr_psprintf at msc_crypt.c [Issue #609 - Jeff Trawick] From 3044ad012b7fe0b417374bd032dc183b50ab4fbc Mon Sep 17 00:00:00 2001 From: Andrew Elble Date: Thu, 30 Apr 2015 12:03:42 -0400 Subject: [PATCH 007/248] Fix the variable resolution duration (Issue #662) apr_time_usec is apparently defined as follows: Which leads DURATION to not behave as expected when duration exceeds one second. --- apache2/re_variables.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apache2/re_variables.c b/apache2/re_variables.c index b83af1c605..c69085feef 100644 --- a/apache2/re_variables.c +++ b/apache2/re_variables.c @@ -1828,7 +1828,7 @@ static int var_duration_generate(modsec_rec *msr, msre_var *var, msre_rule *rule rvar = apr_pmemdup(mptmp, var, sizeof(msre_var)); rvar->value = apr_psprintf(mptmp, "%" APR_TIME_T_FMT, - (apr_time_usec(apr_time_now() - msr->r->request_time))); + (apr_time_now() - msr->r->request_time)); rvar->value_len = strlen(rvar->value); apr_table_addn(vartab, rvar->name, (void *)rvar); From 2e7ae24b16ec84723ee556d238047e7dd671cf30 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Tue, 27 Oct 2015 14:39:20 -0300 Subject: [PATCH 008/248] Adds information about the issue #662 on the CHANGES file --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 0564f80f45..03e4a283af 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Fix variable resolution duration (Content of the DURATION variable). + [Issue #662 - Andrew Elble] * Fix crash while adding empty keys to persistent collections. [Issue #927 - Eugene Alekseev, Marc Stern and ModSecurity team] * Remove misguided call to srand() From 9dbb31b6ce1a96c9da50bb1e9c8b56b9c5c54d43 Mon Sep 17 00:00:00 2001 From: Kurt Newman Date: Tue, 5 May 2015 16:04:23 -0500 Subject: [PATCH 009/248] Fix WITH_APU_CRYPTO check on 32-bit Linux platform All of the apr flags needed to compile APU_HAVE_CRYPTO check in configure.ac aren't passed. While this works fine for 64-bit machines (because _LARGEFILE64_SOURCE is already defined), this does not work on 32-bit. This in-turn breaks the apr_off_t definition in apr.h. By passing along the apr --cflags and --cppflags to compile, 32-bit machines will allow WITH_APU_CRYPTO to be set if there's support for it. --- CHANGES | 2 ++ configure.ac | 11 ++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 03e4a283af..becf7acb14 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Fix apr_crypto.h check on 32-bit Linux platform + [Issue #882 - Kurt Newman] * Fix variable resolution duration (Content of the DURATION variable). [Issue #662 - Andrew Elble] * Fix crash while adding empty keys to persistent collections. diff --git a/configure.ac b/configure.ac index f00172a771..7517885893 100644 --- a/configure.ac +++ b/configure.ac @@ -712,7 +712,12 @@ CHECK_YAJL() CHECK_SSDEEP() #AC_SEARCH_LIBS([fuzzy_hash_buf], [fuzzy]) -CFLAGS="$CFLAGS $APU_CFLAGS" +# Temporarily set cflags for apr_crypto check, then restore +# since it's already used correctly to compile modsecurity module. +ORIG_CFLAGS="$CFLAGS $APU_CFLAGS" +ORIG_CPPFLAGS="$CPPFLAGS" +CFLAGS="$CFLAGS $APR_CFLAGS" +CPPFLAGS="$CPPFLAGS $APR_CPPFLAGS" AC_TRY_COMPILE( [#include ], [ @@ -725,6 +730,10 @@ AC_TRY_COMPILE( ], [ AC_MSG_WARN([APR util was not compiled with crypto support. SecRemoteRule will not support the parameter 'crypto']) ] ) +# Restore env vars so that we don't clutter with duplicates that +# are eventually appended later on +CFLAGS="$ORIG_CFLAGS" +CPPFLAGS="$ORIG_CPPFLAGS" # Current our unique download backend is curl, furhter we can support more. if test ! -z "${CURL_VERSION}"; then From fecefbe8b4fc628dfef9ebff0317bfb9fd871078 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Fri, 30 Oct 2015 14:03:19 -0300 Subject: [PATCH 010/248] Adds information about the issue #883 on the CHANGES file --- CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index becf7acb14..7b042994c2 100644 --- a/CHANGES +++ b/CHANGES @@ -2,7 +2,7 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ * Fix apr_crypto.h check on 32-bit Linux platform - [Issue #882 - Kurt Newman] + [Issue #882, #883 - Kurt Newman] * Fix variable resolution duration (Content of the DURATION variable). [Issue #662 - Andrew Elble] * Fix crash while adding empty keys to persistent collections. From 76dfc1a90b2b191426171a0953f7424331106880 Mon Sep 17 00:00:00 2001 From: vfolin Date: Mon, 16 Feb 2015 12:52:52 +0100 Subject: [PATCH 011/248] Fix apache logging limitation by using correct apache call. Apache 2.4 brought the option to change the ErrorLogFormat. However, many fields remain empty, as ModSecurity uses the wrong apache logging function. This fixes this behaviour with the use of ap_log_rerror. --- apache2/apache2_util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apache2/apache2_util.c b/apache2/apache2_util.c index 0960dc8e63..ed5b0ba216 100644 --- a/apache2/apache2_util.c +++ b/apache2/apache2_util.c @@ -268,7 +268,7 @@ static void internal_log_ex(request_rec *r, directory_config *dcfg, modsec_rec * else hostname = ""; #if AP_SERVER_MAJORVERSION_NUMBER > 1 && AP_SERVER_MINORVERSION_NUMBER > 2 - ap_log_error(APLOG_MARK, APLOG_ERR | APLOG_NOERRNO, 0, r->server, + ap_log_rerror(APLOG_MARK, APLOG_ERR | APLOG_NOERRNO, 0, r, "[client %s] ModSecurity: %s%s [uri \"%s\"]%s", r->useragent_ip ? r->useragent_ip : r->connection->client_ip, str1, hostname, log_escape(msr->mp, r->uri), unique_id); #else From 8f3bc3cbac6222c6fec6d905c6023999fe0a5a92 Mon Sep 17 00:00:00 2001 From: Christian Folini Date: Mon, 30 Nov 2015 14:40:06 +0100 Subject: [PATCH 012/248] Introduced ap_log_rerror declaration to standalone/server.c --- standalone/server.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/standalone/server.c b/standalone/server.c index 53beccf9bf..05b3bf3ac4 100644 --- a/standalone/server.c +++ b/standalone/server.c @@ -285,6 +285,31 @@ AP_DECLARE(void) ap_log_error_(const char *file, int line, int module_index, modsecLogHook(modsecLogObj, level, errstr); } +#if AP_SERVER_MAJORVERSION_NUMBER > 1 && AP_SERVER_MINORVERSION_NUMBER < 3 +AP_DECLARE(void) ap_log_rerror(const char *file, int line, int level, + apr_status_t status, const request_rec *r, + const char *fmt, ...) +// __attribute__((format(printf,6,7))) +#else +AP_DECLARE(void) ap_log_rerror_(const char *file, int line, int module_index, + int level, apr_status_t status, + const request_rec *r, const char *fmt, ...) +// __attribute__((format(printf,7,8))) +#endif +{ + va_list args; + char errstr[MAX_STRING_LEN]; + + va_start(args, fmt); + + apr_vsnprintf(errstr, MAX_STRING_LEN, fmt, args); + + va_end(args); + + if(modsecLogHook != NULL) + modsecLogHook(modsecLogObj, level, errstr); +} + #if AP_SERVER_MAJORVERSION_NUMBER > 1 && AP_SERVER_MINORVERSION_NUMBER < 3 AP_DECLARE(void) ap_log_perror(const char *file, int line, int level, apr_status_t status, apr_pool_t *p, From 8defe8ac3d720dcfbb23b2f0fae41ede872a14f3 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Thu, 22 Oct 2015 18:09:32 -0300 Subject: [PATCH 013/248] Adds information about the pull request #840 on the CHANGES file --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 7b042994c2..6690f6cfa6 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Fix apache logging limitation by using correct apache call. + [Issue #840 - Christian Folini] * Fix apr_crypto.h check on 32-bit Linux platform [Issue #882, #883 - Kurt Newman] * Fix variable resolution duration (Content of the DURATION variable). From 51f1ff6ebf20c5f40d39baddb574993cefa13a46 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 28 Sep 2015 13:03:32 -0300 Subject: [PATCH 014/248] iis-installer: Adds IIS 10 on the suported list As reported on issue #931 the installer was marking ModSecurity as incompatible with IIS 10. --- CHANGES | 2 ++ iis/installer.wxs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 6690f6cfa6..aa3f1a041a 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Understands IIS 10 as compatible on Windows installer. + [Issue #931 - Anton Serbulov, Pavel Vasilevich and ModSecurity team] * Fix apache logging limitation by using correct apache call. [Issue #840 - Christian Folini] * Fix apr_crypto.h check on 32-bit Linux platform diff --git a/iis/installer.wxs b/iis/installer.wxs index 51f4878b14..49d2e5e42a 100644 --- a/iis/installer.wxs +++ b/iis/installer.wxs @@ -91,7 +91,7 @@ - + From b3ce3da2fc1c19e998ba850751d4deb2d8e23d8a Mon Sep 17 00:00:00 2001 From: Elia Pinto Date: Fri, 26 Sep 2014 06:52:45 -0700 Subject: [PATCH 015/248] mlogc-batch-load.pl.in: fix searching SecAuditLogStorageDir files with Apache 2.4 Setting SecAuditLogType concurrent the files created by modsecurity in the directory SecAuditLogStorageDir are of the type %Y%m%d-%H%M%S-UNIQUE_ID where UNIQUE_ID is produced by the module mod_unique_id. In apache 2.2 UNIQUE_ID was 24 characters long while in apache 2.4 is 27 characters long, as a result of the new version of mod_unique_id. This patch extends the regular expression for searching these log files for apache 2.4 and apache 2.2, and also with future releases, at least with regard to the length of UNIQUE_ID Signed-off-by: Elia Pinto --- mlogc/mlogc-batch-load.pl.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mlogc/mlogc-batch-load.pl.in b/mlogc/mlogc-batch-load.pl.in index 53da8786a7..60030771db 100755 --- a/mlogc/mlogc-batch-load.pl.in +++ b/mlogc/mlogc-batch-load.pl.in @@ -38,7 +38,7 @@ find( (($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size) = stat($_)) && -f _ && #### MODSEC-204 /^\d{8}-\d+-\w{24}$/s - /^\d{8}-\d+-.{24}$/s + /^\d{8}-\d+-.{24,}$/s && (($fn = $File::Find::name) =~ s/^\Q$ROOTDIR\E//) && push(@AUDIT, [$fn, $size]); }, From c373256d467ef48f0a7d83f243ec53d492018e49 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 4 Jan 2016 15:02:31 -0300 Subject: [PATCH 016/248] Adds information about the pull request #775 on the CHANGES file --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index aa3f1a041a..de1448f8dd 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * mlogc-batch-load.pl.in: fix searching SecAuditLogStorageDir + files with Apache 2.4 + [Issue #775 - Elia Pinto] * Understands IIS 10 as compatible on Windows installer. [Issue #931 - Anton Serbulov, Pavel Vasilevich and ModSecurity team] * Fix apache logging limitation by using correct apache call. From 3a7fdf8fc037f6ebfe5fd20d70432a9af1a38250 Mon Sep 17 00:00:00 2001 From: Wesley M Date: Tue, 8 Dec 2015 23:26:26 -0300 Subject: [PATCH 017/248] Refactoring conditional directives for if wrappers, alternative if statements and incomplete if conditions. --- apache2/apache2_config.c | 25 ++++++++++++++----------- apache2/mod_security2.c | 13 +++++++------ standalone/config.c | 11 ++++++----- 3 files changed, 27 insertions(+), 22 deletions(-) diff --git a/apache2/apache2_config.c b/apache2/apache2_config.c index bfbcb83468..bad5c5dc79 100644 --- a/apache2/apache2_config.c +++ b/apache2/apache2_config.c @@ -755,6 +755,9 @@ static const char *add_rule(cmd_parms *cmd, directory_config *dcfg, int type, char *rid = NULL; msre_rule *rule = NULL; extern msc_engine *modsecurity; + int type_with_lua = 1; + int type_rule; + int rule_actionset; int offset = 0; #ifdef DEBUG_CONF @@ -787,25 +790,25 @@ static const char *add_rule(cmd_parms *cmd, directory_config *dcfg, int type, } /* Rules must have uniq ID */ - if ( + type_rule = (dcfg->tmp_chain_starter == NULL); #if defined(WITH_LUA) - type != RULE_TYPE_LUA && + type_rule = (type != RULE_TYPE_LUA && type_rule); #endif - (dcfg->tmp_chain_starter == NULL)) + if (type_rule) if(rule->actionset == NULL) return "ModSecurity: Rules must have at least id action"; if(rule->actionset != NULL && (dcfg->tmp_chain_starter == NULL)) { - if(rule->actionset->id == NOT_SET_P + rule_actionset = (rule->actionset->id == NOT_SET_P); #if defined(WITH_LUA) - && (type != RULE_TYPE_LUA) + rule_actionset = (rule_actionset && (type != RULE_TYPE_LUA)); #endif - ) - return "ModSecurity: No action id present within the rule"; + if (rule_actionset) + return "ModSecurity: No action id present within the rule"; #if defined(WITH_LUA) - if(type != RULE_TYPE_LUA) + type_with_lua = (type != RULE_TYPE_LUA); #endif - { + if (type_with_lua){ rid = apr_hash_get(dcfg->rule_id_htab, rule->actionset->id, APR_HASH_KEY_STRING); if(rid != NULL) { return "ModSecurity: Found another rule with the same id"; @@ -1666,7 +1669,7 @@ static const char *cmd_rule_perf_time(cmd_parms *cmd, void *_dcfg, } char *parser_conn_limits_operator(apr_pool_t *mp, const char *p2, - TreeRoot **whitelist, TreeRoot **suspicious_list, + TreeRoot **whitelist, TreeRoot **suspicious_list, const char *filename) { int res = 0; @@ -1753,7 +1756,7 @@ static const char *cmd_conn_read_state_limit(cmd_parms *cmd, void *_dcfg, if (param) return param; } - + conn_read_state_limit = limit; return NULL; diff --git a/apache2/mod_security2.c b/apache2/mod_security2.c index 519f2cc8db..5404fd8744 100644 --- a/apache2/mod_security2.c +++ b/apache2/mod_security2.c @@ -1086,6 +1086,7 @@ static void hook_error_log(const char *file, int line, int level, apr_status_t s { modsec_rec *msr = NULL; error_message_t *em = NULL; + int msr_ap_server; #if AP_SERVER_MAJORVERSION_NUMBER > 1 && AP_SERVER_MINORVERSION_NUMBER > 2 if (info == NULL) return; @@ -1102,15 +1103,15 @@ static void hook_error_log(const char *file, int line, int level, apr_status_t s /* Create a context for requests we never had the chance to process */ #if AP_SERVER_MAJORVERSION_NUMBER > 1 && AP_SERVER_MINORVERSION_NUMBER > 2 - if ((msr == NULL) + msr_ap_server = ((msr == NULL) && ((info->level & APLOG_LEVELMASK) < APLOG_DEBUG) - && apr_table_get(info->r->subprocess_env, "UNIQUE_ID")) + && apr_table_get(info->r->subprocess_env, "UNIQUE_ID")); #else - if ((msr == NULL) + msr_ap_server = ((msr == NULL) && ((level & APLOG_LEVELMASK) < APLOG_DEBUG) - && apr_table_get(r->subprocess_env, "UNIQUE_ID")) + && apr_table_get(r->subprocess_env, "UNIQUE_ID")); #endif - { + if (msr_ap_server) { #if AP_SERVER_MAJORVERSION_NUMBER > 1 && AP_SERVER_MINORVERSION_NUMBER > 2 msr = create_tx_context((request_rec *)info->r); #else @@ -1484,7 +1485,7 @@ static int hook_connection_early(conn_rec *conn) conn_read_state_suspicious_list, client_ip, NULL, &error_msg) <= 0)) { if (conn_limits_filter_state == MODSEC_DETECTION_ONLY) - ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, + ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, "ModSecurity: Too many threads [%ld] of %ld allowed " \ "in READ state from %s - There is a suspission list " \ "but that IP is not part of it, access granted", diff --git a/standalone/config.c b/standalone/config.c index 1552c6fabf..800d5b4344 100644 --- a/standalone/config.c +++ b/standalone/config.c @@ -432,6 +432,7 @@ AP_DECLARE(apr_status_t) ap_pcfg_openfile(ap_configfile_t **ret_cfg, apr_file_t *file = NULL; apr_finfo_t finfo; apr_status_t status; + int exist_type; #ifdef DEBUG char buf[120]; #endif @@ -457,13 +458,13 @@ AP_DECLARE(apr_status_t) ap_pcfg_openfile(ap_configfile_t **ret_cfg, if (status != APR_SUCCESS) return status; - if (finfo.filetype != APR_REG && + exist_type = (finfo.filetype != APR_REG); #if defined(WIN32) || defined(OS2) || defined(NETWARE) - strcasecmp(apr_filepath_name_get(name), "nul") != 0) { + exist_type = (exist_type && strcasecmp(apr_filepath_name_get(name), "nul") != 0); #else - strcmp(name, "/dev/null") != 0) { + exist_type = (exist_type && strcmp(name, "/dev/null") != 0); #endif /* WIN32 || OS2 */ - ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, + if (exist_type){ ap_log_error(APLOG_MARK, APLOG_ERR, 0, NULL, "Access to file %s denied by server: not a regular file", name); apr_file_close(file); @@ -503,7 +504,7 @@ AP_DECLARE(apr_status_t) ap_pcfg_openfile(ap_configfile_t **ret_cfg, #else new_cfg->getch = cfg_getch; new_cfg->getstr = cfg_getstr; - new_cfg->close = cfg_close; + new_cfg->close = cfg_close; #endif new_cfg->line_number = 0; *ret_cfg = new_cfg; From 59851fff2b98abece45b63aee10d0680e13486f4 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 9 Dec 2015 10:06:00 -0300 Subject: [PATCH 018/248] Adds information about the issue #996 on the CHANGES file --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index de1448f8dd..21d6fc5c57 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Refactoring conditional directives for if wrappers, alternative if + statements and incomplete if conditions. + [Issue #996 - Wesley M and ModSecurity team] * mlogc-batch-load.pl.in: fix searching SecAuditLogStorageDir files with Apache 2.4 [Issue #775 - Elia Pinto] From 258e5545a2ba4a064004ccfedba015c1904cbdaa Mon Sep 17 00:00:00 2001 From: "Mario D. Santana" Date: Tue, 15 Dec 2015 16:03:45 -0700 Subject: [PATCH 019/248] Perform the intercept_action as well as the disruptive actions. --- apache2/re.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/apache2/re.c b/apache2/re.c index 1d843e21c1..7e0a238c63 100644 --- a/apache2/re.c +++ b/apache2/re.c @@ -2513,6 +2513,11 @@ static void msre_perform_disruptive_actions(modsec_rec *msr, msre_rule *rule, } } } + if (actionset->intercept_action_rec->metadata->type == ACTION_DISRUPTIVE) { + if (actionset->intercept_action_rec->metadata->execute != NULL) { + actionset->intercept_action_rec->metadata->execute(msr, mptmp, rule, actionset->intercept_action_rec); + } + } /* If "noauditlog" was used do not mark the transaction relevant. */ if (actionset->auditlog != 0) { From e3b3721ee379dd8251cec97f5e588456cf5ee420 Mon Sep 17 00:00:00 2001 From: "Mario D. Santana" Date: Tue, 15 Dec 2015 16:04:20 -0700 Subject: [PATCH 020/248] Allow mod_proxy's "nocanon" behavior to be specified in proxy actions. --- apache2/re_actions.c | 7 ++++- .../regression/action/00-disruptive-actions.t | 31 +++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/apache2/re_actions.c b/apache2/re_actions.c index e06ebe65c2..aaf33817e2 100644 --- a/apache2/re_actions.c +++ b/apache2/re_actions.c @@ -664,7 +664,12 @@ static apr_status_t msre_action_proxy_execute(modsec_rec *msr, apr_pool_t *mptmp var = apr_pcalloc(mptmp, sizeof(msc_string)); if (var == NULL) return -1; - var->value = (char *)action->param; + if (!strncmp(action->param,"[nocanon]",9)) { + apr_table_setn(msr->r->notes,"proxy-nocanon",1); + var->value = (char *)action->param+9; + } else { + var->value = (char *)action->param; + } var->value_len = strlen(var->value); expand_macros(msr, var, rule, mptmp); diff --git a/tests/regression/action/00-disruptive-actions.t b/tests/regression/action/00-disruptive-actions.t index 15c9836d88..f682396ee9 100644 --- a/tests/regression/action/00-disruptive-actions.t +++ b/tests/regression/action/00-disruptive-actions.t @@ -474,6 +474,37 @@ GET => "http://$ENV{SERVER_NAME}:$ENV{SERVER_PORT}/test2.txt", ), }, +{ + type => "action", + comment => "nocanon proxy in phase:1 (get)", + conf => qq( + SecRuleEngine On + SecRequestBodyAccess On + SecResponseBodyAccess On + SecResponseBodyMimeType null + SecRule REQUEST_URI "\@streq /test2.txt" "phase:1,proxy:'[nocanon]http://$ENV{SERVER_NAME}:$ENV{SERVER_PORT}/test.txt',id:500005" + ), + match_log => { + error => { + apache => [qr/ModSecurity: Access denied using proxy to \(phase 1\)/, 1], + nginx => [qr/ModSecurity: Access denied with code 500 \(phase 1\) \(Configuration Error: Proxy action to .* requested but proxy is only available in Apache version\)./, 1], + }, + }, + match_response => { + status => { + apache => qr/^200$/, + nginx => qr/^500$/, + }, + content => { + apache => qr/^TEST$/, + nginx => qr/^*$/, + }, + }, + + request => new HTTP::Request( + GET => "http://$ENV{SERVER_NAME}:$ENV{SERVER_PORT}/test2.txt", + ), +}, { type => "action", comment => "proxy in phase:2 (get)", From c711808ef73a16df830cebf3e381846510942ed3 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 6 Jan 2016 08:24:48 -0300 Subject: [PATCH 021/248] Cosmetic changes on #1031 to avoid compilation warning --- apache2/re_actions.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apache2/re_actions.c b/apache2/re_actions.c index aaf33817e2..504cc24053 100644 --- a/apache2/re_actions.c +++ b/apache2/re_actions.c @@ -665,7 +665,7 @@ static apr_status_t msre_action_proxy_execute(modsec_rec *msr, apr_pool_t *mptmp var = apr_pcalloc(mptmp, sizeof(msc_string)); if (var == NULL) return -1; if (!strncmp(action->param,"[nocanon]",9)) { - apr_table_setn(msr->r->notes,"proxy-nocanon",1); + apr_table_setn(msr->r->notes,"proxy-nocanon", "1"); var->value = (char *)action->param+9; } else { var->value = (char *)action->param; From 831282ee2c4107c4823063b25f28b4e1d648ffaf Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 6 Jan 2016 08:27:40 -0300 Subject: [PATCH 022/248] Adds information about the pull request #1031 on the CHANGES file --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index 21d6fc5c57..675989b0d5 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Allow mod_proxy's "nocanon" behavior to be specified in proxy actions + and Perform the intercept_action as well as the disruptive actions. + [Issue #1031, #961, #763 - Mario D. Santana and ModSecurity team] * Refactoring conditional directives for if wrappers, alternative if statements and incomplete if conditions. [Issue #996 - Wesley M and ModSecurity team] From bd7ee39d2eb614bdec75cec5ca5569ca52ca3609 Mon Sep 17 00:00:00 2001 From: Ishwor Gurung Date: Sun, 3 May 2015 16:59:59 +1000 Subject: [PATCH 023/248] Allow user to choose between TLS versions(TLSProtocol option introduced). --- mlogc/mlogc.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/mlogc/mlogc.c b/mlogc/mlogc.c index e67da10850..e650452dc4 100644 --- a/mlogc/mlogc.c +++ b/mlogc/mlogc.c @@ -158,6 +158,8 @@ static apr_pool_t *recv_pool = NULL; static apr_array_header_t *queue = NULL; static const char *queue_path = NULL; static int ssl_validation = 0; +static int tlsprotocol = 1; +static curl_version_info_data* curlversion = NULL; /* static apr_time_t queue_time = 0; */ static void *requestline_regex = NULL; static int running = 0; @@ -810,6 +812,26 @@ static void init_configuration(void) startup_delay = atoi(s); } + /* TLS Protocol - TLSv1(0) TLSv1.1(1) TLSv1.2(2) (SSLv3 not supported) */ + s = apr_table_get(conf, "TLSProtocol"); + if (s != NULL) { + int num = atoi(s); + switch (num) { + case 0: + tlsprotocol = 0; + break; + case 1: + tlsprotocol = 1; + break; + case 2: + tlsprotocol = 2; + break; + default: + tlsprotocol = 2; /* Default is TLSv1.2 */ + } + } + curlversion = curl_version_info(CURLVERSION_NOW); + if ( startup_delay > 0 ) { error_log(LOG_NOTICE, NULL, "Delaying execution for %dms.", startup_delay); @@ -824,6 +846,8 @@ static void init_configuration(void) error_log(LOG_DEBUG2, NULL, "ErrorLog=%s", error_log_path); error_log(LOG_DEBUG2, NULL, "ErrorLogLevel=%d", error_log_level); error_log(LOG_DEBUG2, NULL, "StartupDelay=%d", startup_delay); + error_log(LOG_DEBUG2, NULL, "TLSProtocol=%d", tlsprotocol); + error_log(LOG_DEBUG2, NULL, "cURL version=%s", curlversion->version); s = apr_table_get(conf, "CheckpointInterval"); if (s != NULL) { @@ -1182,6 +1206,8 @@ static void logc_init(void) apr_status_t rc = 0; const char *errptr = NULL; int i, erroffset; + /* cURL major, minor and patch version */ + short cmaj, cmin, cpat = 0; queue = apr_array_make(pool, 64, sizeof(entry_t *)); if (queue == NULL) { @@ -1246,8 +1272,31 @@ static void logc_init(void) /* Seems like CURL_SSLVERSION_TLSv1_2 is not supported on libcurl * < v7.34.0 + * + * version_num is a 24 bit number created like this: + * <8 bits major number> | <8 bits minor number> | <8 bits patch number>. */ - curl_easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1); + switch (tlsprotocol) { + case 0: + curl_easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_0); + break; + case 1: + curl_easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_1); + break; + case 2: + curl_easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2); + break; + default: + curl_easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2); + break; + } + cmaj = curlversion->version_num >> 16; + cmin = (curlversion->version_num & 0x00ff00) >> 8; + cpat = (curlversion->version_num & 0x0000ff); + /* If cURL version < v7.34.0, use TLS v1.x */ + if (cmaj <= 7 && cmin < 34) { + curl_easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1); + } curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 15); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, TRUE); @@ -1258,6 +1307,10 @@ static void logc_init(void) *(CURL **)apr_array_push(curl_handles) = curl; } + if (cmaj <= 7 && cmin < 34) { + error_log(LOG_DEBUG2, NULL, "TLSv1.2 is unsupported in cURL %d.%d.%d", cmaj, cmin, cpat); + } + logline_regex = pcre_compile(logline_pattern, PCRE_CASELESS, &errptr, &erroffset, NULL); if (logline_regex == NULL) { From cb91850bcd331a7a56a2a4f27706b3c9e396a059 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 6 Jan 2016 15:02:41 -0300 Subject: [PATCH 024/248] Adds information about the pull request #881 on the CHANGES file --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index 675989b0d5..ff2e8c2779 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * mlogc: Allow user to choose between TLS versions (TLSProtocol option + introduced). + [Issue #881 - Ishwor Gurung] * Allow mod_proxy's "nocanon" behavior to be specified in proxy actions and Perform the intercept_action as well as the disruptive actions. [Issue #1031, #961, #763 - Mario D. Santana and ModSecurity team] From 74558b42e416a8193ec2726f0cfa7ca5ec99ac63 Mon Sep 17 00:00:00 2001 From: Athmane Madjoudj Date: Fri, 13 Feb 2015 13:24:50 +0100 Subject: [PATCH 025/248] Fix build issue with Lua >= 5.3 --- apache2/msc_lua.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/apache2/msc_lua.c b/apache2/msc_lua.c index f4482ae7e5..58206a7584 100644 --- a/apache2/msc_lua.c +++ b/apache2/msc_lua.c @@ -111,8 +111,11 @@ char *lua_compile(msc_script **script, const char *filename, apr_pool_t *pool) { dump.pool = pool; dump.parts = apr_array_make(pool, 128, sizeof(msc_script_part *)); +#if LUA_VERSION_NUM >= 503 + lua_dump(L, dump_writer, &dump, 1); +#else lua_dump(L, dump_writer, &dump); - +#endif (*script) = apr_pcalloc(pool, sizeof(msc_script)); (*script)->name = filename; (*script)->parts = dump.parts; From 05bcafd4fc6337fd0a06e0df4399d98aed00f4fc Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Fri, 8 Jan 2016 09:24:12 -0300 Subject: [PATCH 026/248] Extends Lua implementation to support Lua 5.3 --- CHANGES | 2 ++ apache2/msc_lua.c | 26 +++++++++++++++++++------- build/find_lua.m4 | 2 +- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/CHANGES b/CHANGES index ff2e8c2779..837a32feed 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Extanded Lua support to include version 5.3 + [Issue #837, #762, #814 - Athmane Madjoudj and ModSecurity team] * mlogc: Allow user to choose between TLS versions (TLSProtocol option introduced). [Issue #881 - Ishwor Gurung] diff --git a/apache2/msc_lua.c b/apache2/msc_lua.c index 58206a7584..51be1745b2 100644 --- a/apache2/msc_lua.c +++ b/apache2/msc_lua.c @@ -112,7 +112,7 @@ char *lua_compile(msc_script **script, const char *filename, apr_pool_t *pool) { dump.parts = apr_array_make(pool, 128, sizeof(msc_script_part *)); #if LUA_VERSION_NUM >= 503 - lua_dump(L, dump_writer, &dump, 1); + lua_dump(L, dump_writer, &dump, 0); #else lua_dump(L, dump_writer, &dump); #endif @@ -420,23 +420,32 @@ int lua_execute(msc_script *script, char *param, modsec_rec *msr, msre_rule *rul time_before = apr_time_now(); #ifdef CACHE_LUA + L = msr->L; rc = lua_gettop(L); if(rc) lua_pop(L, rc); + #else + /* Create new state. */ -#if LUA_VERSION_NUM > 501 +#if LUA_VERSION_NUM == 502 || LUA_VERSION_NUM == 503 || LUA_VERSION_NUM == 501 L = luaL_newstate(); -#else +#elif LUA_VERSION_NUM == 500 L = lua_open(); +#else +#error We are only tested under Lua 5.0, 5.1, 5.2, or 5.3. #endif luaL_openlibs(L); + #endif if(L == NULL) return -1; + luaL_newmetatable(L, "luaL_msc"); + lua_newtable(L); + /* Associate msr with the state. */ lua_pushlightuserdata(L, (void *)msr); lua_setglobal(L, "__msr"); @@ -448,13 +457,16 @@ int lua_execute(msc_script *script, char *param, modsec_rec *msr, msre_rule *rul } /* Register functions. */ -#if LUA_VERSION_NUM > 501 - luaL_setfuncs(L,mylib,0); - lua_setglobal(L,"m"); -#else +#if LUA_VERSION_NUM == 500 || LUA_VERSION_NUM == 501 luaL_register(L, "m", mylib); +#elif LUA_VERSION_NUM == 502 || LUA_VERSION_NUM == 503 + luaL_setfuncs(L, mylib, 0); +#else +#error We are only tested under Lua 5.0, 5.1, 5.2, or 5.3. #endif + lua_setglobal(L, "m"); + rc = lua_restore(L, script); if (rc) { *error_msg = apr_psprintf(msr->mp, "Lua: Failed to restore script with %i.", rc); diff --git a/build/find_lua.m4 b/build/find_lua.m4 index b86280112b..acb903e5fa 100644 --- a/build/find_lua.m4 +++ b/build/find_lua.m4 @@ -16,7 +16,7 @@ LUA_CPPFLAGS="" LUA_LDADD="" LUA_LDFLAGS="" LUA_CONFIG=${PKG_CONFIG} -LUA_PKGNAMES="lua5.1 lua-5.1 lua_5.1 lua-51 lua_51 lua51 lua5 lua" +LUA_PKGNAMES="lua5.1 lua-5.1 lua_5.1 lua-51 lua_51 lua51 lua5 lua lua5.2 lua-5.2 lua_5.2 lua-52 lua_52 lua52 lua5.3 lua-5.3 lua_5.3 lua-53 lua_53 lua53 " LUA_SONAMES="so la sl dll dylib a" AC_ARG_WITH( From 880b2764a31c2c3db77b81d139a0aa3149ce50db Mon Sep 17 00:00:00 2001 From: Chaim Sanders Date: Mon, 11 Jan 2016 10:09:41 -0500 Subject: [PATCH 027/248] Updated Licensing information to reflect year --- LICENSE | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/LICENSE b/LICENSE index 261eeb9e9f..9135230d95 100644 --- a/LICENSE +++ b/LICENSE @@ -175,18 +175,7 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2016 ModSecurity Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From 1068da464c4fd7a91e7cf25adb8e028cfd650d53 Mon Sep 17 00:00:00 2001 From: Chaim Sanders Date: Mon, 11 Jan 2016 10:43:05 -0500 Subject: [PATCH 028/248] Updated recommended conf to enter XML processor when content-type is application/xml --- modsecurity.conf-recommended | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modsecurity.conf-recommended b/modsecurity.conf-recommended index 9ee17f2f82..42a6f6c889 100644 --- a/modsecurity.conf-recommended +++ b/modsecurity.conf-recommended @@ -19,7 +19,7 @@ SecRequestBodyAccess On # Enable XML request body parser. # Initiate XML Processor in case of xml content-type # -SecRule REQUEST_HEADERS:Content-Type "text/xml" \ +SecRule REQUEST_HEADERS:Content-Type "(?:text|application)/xml" \ "id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML" # Enable JSON request body parser. From 3f9e2ccc7cce33e8410c761f935cd5939ca4af63 Mon Sep 17 00:00:00 2001 From: Justin Gerace Date: Thu, 24 Apr 2014 16:06:00 -0700 Subject: [PATCH 029/248] Stop buffering when the request is larger than SecRequestBodyLimit and in ProcessPartial mode --- apache2/apache2_io.c | 27 ++++++++++++++++++--------- apache2/modsecurity.h | 1 + 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/apache2/apache2_io.c b/apache2/apache2_io.c index 88f1903183..af6555fde7 100644 --- a/apache2/apache2_io.c +++ b/apache2/apache2_io.c @@ -139,12 +139,14 @@ apr_status_t input_filter(ap_filter_t *f, apr_bucket_brigade *bb_out, if (rc == 0) { modsecurity_request_body_retrieve_end(msr); - bucket = apr_bucket_eos_create(f->r->connection->bucket_alloc); - if (bucket == NULL) return APR_EGENERAL; - APR_BRIGADE_INSERT_TAIL(bb_out, bucket); + if (msr->if_seen_eos) { + bucket = apr_bucket_eos_create(f->r->connection->bucket_alloc); + if (bucket == NULL) return APR_EGENERAL; + APR_BRIGADE_INSERT_TAIL(bb_out, bucket); - if (msr->txcfg->debuglog_level >= 4) { - msr_log(msr, 4, "Input filter: Sent EOS."); + if (msr->txcfg->debuglog_level >= 4) { + msr_log(msr, 4, "Input filter: Sent EOS."); + } } /* We're done */ @@ -164,7 +166,7 @@ apr_status_t input_filter(ap_filter_t *f, apr_bucket_brigade *bb_out, */ apr_status_t read_request_body(modsec_rec *msr, char **error_msg) { request_rec *r = msr->r; - unsigned int seen_eos; + unsigned int finished_reading; apr_bucket_brigade *bb_in; apr_bucket *bucket; @@ -193,7 +195,8 @@ apr_status_t read_request_body(modsec_rec *msr, char **error_msg) { return -1; } - seen_eos = 0; + finished_reading = 0; + msr->if_seen_eos = 0; bb_in = apr_brigade_create(msr->mp, r->connection->bucket_alloc); if (bb_in == NULL) return -1; do { @@ -283,6 +286,11 @@ apr_status_t read_request_body(modsec_rec *msr, char **error_msg) { if (buflen != 0) { int rcbs = modsecurity_request_body_store(msr, buf, buflen, error_msg); + + if (msr->reqbody_length > (apr_size_t)msr->txcfg->reqbody_limit && msr->txcfg->if_limit_action == REQUEST_BODY_LIMIT_ACTION_PARTIAL) { + finished_reading = 1; + } + if (rcbs < 0) { if (rcbs == -5) { if((msr->txcfg->is_enabled == MODSEC_ENABLED) && (msr->txcfg->if_limit_action == REQUEST_BODY_LIMIT_ACTION_REJECT)) { @@ -309,12 +317,13 @@ apr_status_t read_request_body(modsec_rec *msr, char **error_msg) { } if (APR_BUCKET_IS_EOS(bucket)) { - seen_eos = 1; + finished_reading = 1; + msr->if_seen_eos = 1; } } apr_brigade_cleanup(bb_in); - } while(!seen_eos); + } while(!finished_reading); // TODO: Why ignore the return code here? modsecurity_request_body_end(msr, error_msg); diff --git a/apache2/modsecurity.h b/apache2/modsecurity.h index bbec3faa4d..04b96a8c1c 100644 --- a/apache2/modsecurity.h +++ b/apache2/modsecurity.h @@ -268,6 +268,7 @@ struct modsec_rec { unsigned int phase_request_body_complete; apr_bucket_brigade *if_brigade; + unsigned int if_seen_eos; unsigned int if_status; unsigned int if_started_forwarding; From b3f197dd1f5afbea585067c3d8c61c9e0a1afe8c Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 25 Jan 2016 13:40:46 -0300 Subject: [PATCH 030/248] Adds information about the pull request #709 on the CHANGES file --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index 837a32feed..5c29f11022 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Stop buffering when the request is larger than SecRequestBodyLimit + in ProcessPartial mode + [Issue #709, #705, #728 - Justin Gerace and ModSecurity team] * Extanded Lua support to include version 5.3 [Issue #837, #762, #814 - Athmane Madjoudj and ModSecurity team] * mlogc: Allow user to choose between TLS versions (TLSProtocol option From d434a6c0430a1ff960e5a79720f08f12c8a34f8c Mon Sep 17 00:00:00 2001 From: Chaim Sanders Date: Thu, 7 Jan 2016 11:36:50 -0500 Subject: [PATCH 031/248] Fixing missing return value check for hashing response injection failure --- apache2/apache2_io.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/apache2/apache2_io.c b/apache2/apache2_io.c index af6555fde7..0d59613e4d 100644 --- a/apache2/apache2_io.c +++ b/apache2/apache2_io.c @@ -609,8 +609,12 @@ static int flatten_response_body(modsec_rec *msr) { retval = hash_response_body_links(msr); if(retval > 0) { retval = inject_hashed_response_body(msr, retval); - if (msr->txcfg->debuglog_level >= 4) { - msr_log(msr, 4, "Hash completed in %" APR_TIME_T_FMT " usec.", (apr_time_now() - time1)); + if(retval < 0){ + msr_log(msr, 1, "inject_hashed_response_body: Unable to inject hash into response body. Returning response without changes." ); + }else{ + if (msr->txcfg->debuglog_level >= 4) { + msr_log(msr, 4, "Hash completed in %" APR_TIME_T_FMT " usec.", (apr_time_now() - time1)); + } } } From 35fbc76ecca9457f59f15cae01a0da9ebfd3b0f7 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 25 Jan 2016 14:58:24 -0300 Subject: [PATCH 032/248] Adds information about the pull request #1041 on the CHANGES file --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 5c29f11022..574343ec28 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Checking for hashing injection response to report in case of failure. + [Issue #1041 - ModSecurity team] * Stop buffering when the request is larger than SecRequestBodyLimit in ProcessPartial mode [Issue #709, #705, #728 - Justin Gerace and ModSecurity team] From b175c5cf60b022721990a3e6bd7ca253145805be Mon Sep 17 00:00:00 2001 From: littlecho Date: Thu, 26 Mar 2015 15:22:45 +0800 Subject: [PATCH 033/248] Update apache2_config.c Change third parameter(which is the apr file permission flag) from CREATEMODE to dcfg->auditlog_fileperms. Due to the user can specify the desired file permission setting for the audit log files with setting the value of SecAuditLogFileMode, we should follow the file permission setting from the config file. Therefore, as the dcfg->auditlog_fileperms will be modified in cmd_audit_log_dirmode function, we can use the value while calling apr_file_open to meet the file permission that specified in modsecurity.conf. --- apache2/apache2_config.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/apache2/apache2_config.c b/apache2/apache2_config.c index bad5c5dc79..4cab8983f8 100644 --- a/apache2/apache2_config.c +++ b/apache2/apache2_config.c @@ -1192,10 +1192,13 @@ static const char *cmd_audit_log(cmd_parms *cmd, void *_dcfg, const char *p1) else { const char *file_name = ap_server_root_relative(cmd->pool, dcfg->auditlog_name); apr_status_t rc; - + + if (dcfg->auditlog_fileperms == NOT_SET) { + dcfg->auditlog_fileperms = CREATEMODE; + } rc = apr_file_open(&dcfg->auditlog_fd, file_name, APR_WRITE | APR_APPEND | APR_CREATE | APR_BINARY, - CREATEMODE, cmd->pool); + dcfg->auditlog_fileperms, cmd->pool); if (rc != APR_SUCCESS) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to open the audit log file: %s", From 0db247f0e9ca43dba7e4720ae48283ae3b26832e Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Tue, 26 Jan 2016 09:20:25 -0300 Subject: [PATCH 034/248] Replicates CREATEMODE patch to the secondary auditlog file At patch 45805be, @littlecho changed the behaviour to set the audit log index/serial file permission. Before, it was using the default permission now it is respecting the permission configured via SecAuditLogFileMode. This patch replicates @littlecho's work to the secundary auditlog file. --- apache2/apache2_config.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/apache2/apache2_config.c b/apache2/apache2_config.c index 4cab8983f8..3e33fa0453 100644 --- a/apache2/apache2_config.c +++ b/apache2/apache2_config.c @@ -1234,9 +1234,12 @@ static const char *cmd_audit_log2(cmd_parms *cmd, void *_dcfg, const char *p1) const char *file_name = ap_server_root_relative(cmd->pool, dcfg->auditlog2_name); apr_status_t rc; + if (dcfg->auditlog_fileperms == NOT_SET) { + dcfg->auditlog_fileperms = CREATEMODE; + } rc = apr_file_open(&dcfg->auditlog2_fd, file_name, APR_WRITE | APR_APPEND | APR_CREATE | APR_BINARY, - CREATEMODE, cmd->pool); + dcfg->auditlog_fileperms, cmd->pool); if (rc != APR_SUCCESS) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to open the secondary audit log file: %s", From 4eb095ad2566f6055fcbf89be2cbab08d4b73a9e Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Tue, 26 Jan 2016 09:28:20 -0300 Subject: [PATCH 035/248] Adds information about the pull request #852 on the CHANGES file --- CHANGES | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES b/CHANGES index 574343ec28..fc30e44b3f 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,10 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Creating AuditLog serial file (or paralel index) respecting the + permission configured with SecAuditLogFileMode, before, it was used + only to save the transactions while in paralel mode. + [Issue #852 - @littlecho and ModSecurity team] * Checking for hashing injection response to report in case of failure. [Issue #1041 - ModSecurity team] * Stop buffering when the request is larger than SecRequestBodyLimit From 7b2ca1617e9bff088369cf2fd5807a09cb4ac6e8 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Wed, 15 Jul 2015 13:57:02 -0700 Subject: [PATCH 036/248] first pass at JSON logging implementation --- apache2/msc_logging.c | 475 ++++++++++++++++++++++++++++++++++++- apache2/msc_logging_json.h | 13 + configure.ac | 18 +- 3 files changed, 494 insertions(+), 12 deletions(-) create mode 100644 apache2/msc_logging_json.h diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 3323fac925..61e21ad773 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -25,6 +25,11 @@ #include "apr_version.h" #include +#ifdef WITH_JSON_LOGGING +#include +#include "msc_logging_json.h" +#endif + /** * Write the supplied data to the audit log (if the FD is ready), update * the size counters, update the hash context. @@ -381,6 +386,40 @@ static void sec_auditlog_write_producer_header(modsec_rec *msr) { sec_auditlog_write(msr, ".\n", 2); } +#ifdef WITH_JSON_LOGGING +/** + * Ouput the Producer header into a JSON generator + */ +static void sec_auditlog_write_producer_header_json(modsec_rec *msr, yajl_gen g) { + char **signatures = NULL; + int i; + + // this is written no matter what + yajl_string(g, "producer"); + + /* Try to write verything in one go. */ + if (msr->txcfg->component_signatures->nelts == 0) { + yajl_string(g, MODSEC_MODULE_NAME_FULL); + + return; + } + + // we'll need an array if there are component signatures + yajl_gen_array_open(g); + + /* Start with the ModSecurity signature. */ + yajl_string(g, MODSEC_MODULE_NAME_FULL); + + /* Then loop through the components and output individual signatures. */ + signatures = (char **)msr->txcfg->component_signatures->elts; + for(i = 0; i < msr->txcfg->component_signatures->nelts; i++) { + yajl_string(g, (char *)signatures[i]); + } + + yajl_gen_array_close(g); // array for producers is finished +} +#endif + /* * \brief This function will returns the next chain node * @@ -480,6 +519,66 @@ static int chained_is_matched(modsec_rec *msr, const msre_rule *next_rule) { return 0; } +#ifdef WITH_JSON_LOGGING +/** + * Write detailed information about a rule and its actionset into a JSON generator + */ +static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) { + int present = 0; + + yajl_gen_map_open(g); + + yajl_string(g, "actionset"); + yajl_gen_map_open(g); + if (rule->actionset->id) { + yajl_kv_string(g, "id", rule->actionset->id); + } + if (rule->actionset->rev) { + yajl_kv_string(g, "rev", rule->actionset->rev); + } + if (rule->actionset->msg) { + yajl_kv_string(g, "msg", rule->actionset->msg); + } + if (rule->actionset->version) { + yajl_kv_string(g, "version", rule->actionset->version); + } + if (rule->actionset->logdata) { + yajl_kv_string(g, "logdata", rule->actionset->logdata); + } + if (rule->actionset->severity != NOT_SET) { + yajl_kv_int(g, "severity", rule->actionset->severity); + } + if (rule->actionset->accuracy != NOT_SET) { + yajl_kv_int(g, "accuracy", rule->actionset->accuracy); + } + if (rule->actionset->maturity != NOT_SET) { + yajl_kv_int(g, "maturity", rule->actionset->maturity); + } + if (rule->actionset->phase != NOT_SET) { + yajl_kv_int(g, "phase", rule->actionset->phase); + } + yajl_kv_bool(g, "is_chained", rule->actionset->is_chained); + yajl_gen_map_close(g); + + yajl_string(g, "operator"); + yajl_gen_map_open(g); + yajl_kv_string(g, "operator", rule->op_name); + yajl_kv_string(g, "operator_param", rule->op_param); + yajl_kv_string(g, "target", rule->p1); + yajl_gen_map_close(g); + + yajl_string(g, "config"); + yajl_gen_map_open(g); + yajl_kv_string(g, "filename", rule->filename); + yajl_kv_int(g, "line_num", rule->line_num); + yajl_gen_map_close(g); + + yajl_kv_bool(g, "is_matched", chained_is_matched(msr, rule)); + + yajl_gen_map_close(g); +} +#endif + /** * Produce an audit log entry. */ @@ -501,9 +600,14 @@ void sec_audit_logger(modsec_rec *msr) { char *buf = NULL, *pat = NULL; msc_parm *mparm = NULL; int arg_min, arg_max, sanitize_matched; +#ifdef WITH_JSON_LOGGING + yajl_gen g; +#endif +#ifndef WITH_JSON_LOGGING /* the boundary is used by both audit log types */ msr->new_auditlog_boundary = create_auditlog_boundary(msr->r); +#endif /* Return silently if we don't have a request line. This * means we will not be logging request timeouts. @@ -591,29 +695,72 @@ void sec_audit_logger(modsec_rec *msr) { } } - /* AUDITLOG_PART_HEADER */ +#ifdef WITH_JSON_LOGGING + /** + * allocate the buffer for the JSON generator + * passing null will force yajl to use malloc/realloc/free + * need to perf test using APR routines + */ + g = yajl_gen_alloc(NULL); + + /** + * don't pretty print JSON by default + * this is harder to eyeball but much easier to parse programmatically + */ + yajl_gen_config(g, yajl_gen_beautify, 0); + + yajl_gen_map_open(g); // IT BEGINS +#endif + + /* AUDITLOG_PART_HEADER */ +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_HEADER); sec_auditlog_write(msr, text, strlen(text)); - /* Format: time transaction_id remote_addr remote_port local_addr local_port */ text = apr_psprintf(msr->mp, "[%s] %s %s %u %s %u", current_logtime(msr->mp), msr->txid, msr->remote_addr, msr->remote_port, msr->local_addr, msr->local_port); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "transaction"); + yajl_gen_map_open(g); // transaction top-level key + yajl_kv_string(g, "time", current_logtime(msr->mp)); + yajl_kv_string(g, "transaction_id", msr->txid); + yajl_kv_string(g, "remote_address", msr->remote_addr); + yajl_kv_int(g, "remote_port", (int)msr->remote_port); // msr->remote_port is unsigned, yajl wants signed + yajl_kv_string(g, "local_address", msr->local_addr); + yajl_kv_int(g, "local_port", (int)msr->local_port); + + yajl_gen_map_close(g); // transaction top-level key is finished + + yajl_string(g, "request"); + yajl_gen_map_open(g); // request top-level key +#endif /* AUDITLOG_PART_REQUEST_HEADERS */ if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_REQUEST_HEADERS) != NULL) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_REQUEST_HEADERS); sec_auditlog_write(msr, text, strlen(text)); +#endif sanitize_request_line(msr); - +#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, msr->request_line, strlen(msr->request_line)); sec_auditlog_write(msr, "\n", 1); +#else + // Request Line doesn't get its own map for now. should it? + yajl_kv_string(g, "request_line", msr->request_line); +#endif + +#ifdef WITH_JSON_LOGGING + yajl_string(g, "headers"); + yajl_gen_map_open(g); // separate map for request headers +#endif arr = apr_table_elts(msr->request_headers); te = (apr_table_entry_t *)arr->elts; @@ -624,7 +771,13 @@ void sec_audit_logger(modsec_rec *msr) { for (i = 0; i < arr->nelts; i++) { sanitized_partial = 0; sanitize_matched = 0; +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, te[i].val); +#else + // write the key no matter what + // since sanitization only occurs on the value + yajl_string(g, te[i].key); +#endif if (apr_table_get(msr->request_headers_to_sanitize, te[i].key) != NULL) { buf = apr_psprintf(msr->mp, "%s",text+strlen(te[i].key)+2); @@ -656,13 +809,30 @@ void sec_audit_logger(modsec_rec *msr) { } if(sanitized_partial == 1 && sanitize_matched == 0) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, buf); +#else + yajl_string(g, buf); +#endif } else { +#ifndef WITH_JSON_LOGGING memset(text + strlen(te[i].key) + 2, '*', strlen(te[i].val)); +#else + yajl_string(g, "****"); // fix this later +#endif } } +#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, text, strlen(text)); +#else + // we diverge from the original logic a bit because we always print the key + // at this no point sanitization had occured, so we just print the value + yajl_string(g, te[i].val); +#endif } +#ifdef WITH_JSON_LOGGING + yajl_gen_map_close(g); // request headers map is finished +#endif } /* AUDITLOG_PART_REQUEST_BODY */ @@ -749,9 +919,13 @@ void sec_audit_logger(modsec_rec *msr) { unsigned int chunk_offset = 0; unsigned int sanitize_offset = 0; unsigned int sanitize_length = 0; - +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_REQUEST_BODY); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "body"); + yajl_gen_array_open(g); // use an array here because we're writing in chunks +#endif for(;;) { rc = modsecurity_request_body_retrieve(msr, &chunk, -1, &my_error_msg); @@ -810,7 +984,11 @@ void sec_audit_logger(modsec_rec *msr) { /* Write the sanitized chunk to the log * and advance to the next chunk. */ +#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, chunk->data, chunk->length); +#else + yajl_string(g, chunk->data); +#endif chunk_offset += chunk->length; } @@ -819,6 +997,10 @@ void sec_audit_logger(modsec_rec *msr) { } } +#ifdef WITH_JSON_LOGGING + yajl_gen_array_close(g); // request body chunks array is finished +#endif + if (rc < 0) { msr_log(msr, 1, "Audit log: %s", my_error_msg); } @@ -838,29 +1020,58 @@ void sec_audit_logger(modsec_rec *msr) { if (buffer == NULL) { msr_log(msr, 1, "Audit log: Failed to reconstruct request body."); } else { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_FAKE_REQUEST_BODY); sec_auditlog_write(msr, text, strlen(text)); sec_auditlog_write(msr, buffer, strlen(buffer)); +#else + // this is a key instead 'request', doesn't need an array or map since it's one value + yajl_kv_string(g, "fake_body", buffer); +#endif } } } +#ifdef WITH_JSON_LOGGING + yajl_gen_map_close(g); // request top-level key is finished + + yajl_string(g, "response"); + yajl_gen_map_open(g); // response top-level key +#endif + /* AUDITLOG_PART_A_RESPONSE_HEADERS */ if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_A_RESPONSE_HEADERS) != NULL) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_A_RESPONSE_HEADERS); sec_auditlog_write(msr, text, strlen(text)); +#endif /* There are no response headers (or the status line) in HTTP 0.9 */ if (msr->response_headers_sent) { if (msr->status_line != NULL) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s %s\n", msr->response_protocol, msr->status_line); +#else + yajl_kv_string(g, "protocol", msr->response_protocol); + yajl_kv_string(g, "status", msr->status_line); +#endif } else { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s %u\n", msr->response_protocol, msr->response_status); +#else + yajl_kv_string(g, "protocol", msr->response_protocol); + yajl_kv_int(g, "status", (int)msr->response_status); +#endif } +#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "headers"); + yajl_gen_map_open(g); // separate map for response headers +#endif /* Output headers */ @@ -873,7 +1084,13 @@ void sec_audit_logger(modsec_rec *msr) { for (i = 0; i < arr->nelts; i++) { sanitized_partial = 0; sanitize_matched = 0; +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, te[i].val); +#else + // write the key no matter what + // since sanitization only occurs on the value + yajl_string(g, te[i].key); +#endif if (apr_table_get(msr->response_headers_to_sanitize, te[i].key) != NULL) { buf = apr_psprintf(msr->mp, "%s",text+strlen(te[i].key)+2); @@ -905,13 +1122,30 @@ void sec_audit_logger(modsec_rec *msr) { } if(sanitized_partial == 1 && sanitize_matched == 0) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, buf); +#else + yajl_string(g, buf); +#endif } else { +#ifndef WITH_JSON_LOGGING memset(text + strlen(te[i].key) + 2, '*', strlen(te[i].val)); +#else + yajl_string(g, "****"); // fix this later +#endif } } +#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, text, strlen(text)); +#else + // we diverge from the original logic a bit because we always print the key + // at this point no sanitization had occured, so we just print the value + yajl_string(g, te[i].val); +#endif } +#ifdef WITH_JSON_LOGGING + yajl_gen_map_close(g); // response headers map is finised +#endif } } @@ -921,51 +1155,101 @@ void sec_audit_logger(modsec_rec *msr) { if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_RESPONSE_BODY) != NULL) { if (msr->resbody_data != NULL) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_RESPONSE_BODY); sec_auditlog_write(msr, text, strlen(text)); sec_auditlog_write(msr, msr->resbody_data, msr->resbody_length); +#else + yajl_kv_string(g, "body", msr->resbody_data); +#endif wrote_response_body = 1; } } +#ifdef WITH_JSON_LOGGING + yajl_gen_map_close(g); // response top-level key is finished + + yajl_string(g, "data"); + yajl_gen_map_open(g); // data top-level key +#endif + /* AUDITLOG_PART_TRAILER */ if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_TRAILER) != NULL) { apr_time_t now = apr_time_now(); - +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_TRAILER); sec_auditlog_write(msr, text, strlen(text)); +#endif /* Messages */ +#ifdef WITH_JSON_LOGGING + yajl_string(g, "messages"); + yajl_gen_array_open(g); +#endif for(i = 0; i < msr->alerts->nelts; i++) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Message: %s\n", ((char **)msr->alerts->elts)[i]); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, ((char **)msr->alerts->elts)[i]); +#endif } +#ifdef WITH_JSON_LOGGING + yajl_gen_array_close(g); +#endif /* Apache error messages */ +#ifdef WITH_JSON_LOGGING + yajl_string(g, "error_messages"); + yajl_gen_array_open(g); +#endif for(i = 0; i < msr->error_messages->nelts; i++) { error_message_t *em = (((error_message_t **)msr->error_messages->elts)[i]); +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Apache-Error: %s\n", format_error_log_message(msr->mp, em)); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, format_error_log_message(msr->mp, em)); +#endif } +#ifdef WITH_JSON_LOGGING + yajl_gen_array_close(g); +#endif /* Action */ if (msr->was_intercepted) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Action: Intercepted (phase %d)\n", msr->intercept_phase); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "action"); + yajl_gen_map_open(g); + yajl_kv_bool(g, "intercepted", 1); + yajl_kv_int(g, "phase", msr->intercept_phase); + yajl_gen_map_close(g); +#endif } /* Apache-Handler */ if (msr->r->handler != NULL) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Apache-Handler: %s\n", msr->r->handler); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_kv_string(g, "handler", msr->r->handler); +#endif } /* Stopwatch; left in for compatibility reasons */ text = apr_psprintf(msr->mp, "Stopwatch: %" APR_TIME_T_FMT " %" APR_TIME_T_FMT " (- - -)\n", msr->request_time, (now - msr->request_time)); +#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_kv_string(g, "stopwatch", text); +#endif /* Stopwatch2 */ { @@ -973,26 +1257,46 @@ void sec_audit_logger(modsec_rec *msr) { text = apr_psprintf(msr->mp, "Stopwatch2: %" APR_TIME_T_FMT " %" APR_TIME_T_FMT "; %s\n", msr->request_time, (now - msr->request_time), perf_all); - +#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_kv_string(g, "stopwatch2", text); +#endif } /* Our response body does not contain chunks */ /* ENH Only write this when the output was chunked. */ /* ENH Add info when request body was decompressed, dechunked too. */ if (wrote_response_body) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Response-Body-Transformed: Dechunked\n"); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_kv_bool(g, "response_body_dechunked", 1); +#endif } +#ifndef WITH_JSON_LOGGING sec_auditlog_write_producer_header(msr); +#else + sec_auditlog_write_producer_header_json(msr, g); +#endif /* Server */ if (msr->server_software != NULL) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Server: %s\n", msr->server_software); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_kv_string(g, "server", msr->server_software); +#endif } +#ifdef WITH_JSON_LOGGING + yajl_string(g, "sanitized"); + yajl_gen_map_open(g); // open a separate map for sanitized values +#endif + /* Sanitised arguments */ { const apr_array_header_t *tarr; @@ -1002,16 +1306,31 @@ void sec_audit_logger(modsec_rec *msr) { telts = (const apr_table_entry_t*)tarr->elts; if (tarr->nelts > 0) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Sanitised-Args: "); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "args"); + yajl_gen_array_open(g); +#endif } for(i = 0; i < tarr->nelts; i++) { msc_arg *arg = (msc_arg *)telts[i].val; +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s\"%s\"%s", ((i == 0) ? "" : ", "), log_escape(msr->mp, arg->name), ((i == (tarr->nelts - 1)) ? ".\n" : "")); sec_auditlog_write(msr, text, strlen(text)); +#else + // yay arrays actually make it easier here + yajl_string(g, log_escape(msr->mp, arg->name)); +#endif } +#ifdef WITH_JSON_LOGGING + if (tarr->nelts > 0) { + yajl_gen_array_close(g); + } +#endif } /* Sanitised request headers */ @@ -1023,15 +1342,29 @@ void sec_audit_logger(modsec_rec *msr) { telts = (const apr_table_entry_t*)tarr->elts; if (tarr->nelts > 0) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Sanitised-Request-Headers: "); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "request_headers"); + yajl_gen_array_open(g); +#endif } for(i = 0; i < tarr->nelts; i++) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s\"%s\"%s", ((i == 0) ? "" : ", "), log_escape(msr->mp, telts[i].key), ((i == (tarr->nelts - 1)) ? ".\n" : "")); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, log_escape(msr->mp, telts[i].key)); +#endif + } +#ifdef WITH_JSON_LOGGING + if (tarr->nelts > 0) { + yajl_gen_array_close(g); } +#endif } /* Sanitised response headers */ @@ -1043,40 +1376,85 @@ void sec_audit_logger(modsec_rec *msr) { telts = (const apr_table_entry_t*)tarr->elts; if (tarr->nelts > 0) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Sanitised-Response-Headers: "); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "response_headers"); + yajl_gen_array_open(g); +#endif } for(i = 0; i < tarr->nelts; i++) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s\"%s\"%s", ((i == 0) ? "" : ", "), log_escape(msr->mp, telts[i].key), ((i == (tarr->nelts - 1)) ? ".\n" : "")); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, log_escape(msr->mp, telts[i].key)); +#endif + } +#ifdef WITH_JSON_LOGGING + if (tarr->nelts > 0) { + yajl_gen_array_close(g); } +#endif } +#ifdef WITH_JSON_LOGGING + yajl_gen_map_close(g); // sanitized args map is finished +#endif + /* Web application info. */ if ( ((msr->txcfg->webappid != NULL)&&(strcmp(msr->txcfg->webappid, "default") != 0)) || (msr->sessionid != NULL) || (msr->userid != NULL)) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "WebApp-Info: \"%s\" \"%s\" \"%s\"\n", msr->txcfg->webappid == NULL ? "-" : log_escape(msr->mp, msr->txcfg->webappid), msr->sessionid == NULL ? "-" : log_escape(msr->mp, msr->sessionid), msr->userid == NULL ? "-" : log_escape(msr->mp, msr->userid)); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "webapp_info"); + yajl_gen_map_open(g); + + if (msr->txcfg->webappid != NULL) { + yajl_kv_string(g, "id", log_escape(msr->mp, msr->txcfg->webappid)); + } + if (msr->sessionid != NULL) { + yajl_kv_string(g, "session", log_escape(msr->mp, msr->sessionid)); + } + if (msr->userid != NULL) { + yajl_kv_string(g, "user_id", log_escape(msr->mp, msr->userid)); + } + + yajl_gen_map_close(g); +#endif } if ( ((msr->txcfg->sensor_id != NULL)&&(strcmp(msr->txcfg->sensor_id, "default") != 0))) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Sensor-Id: \"%s\"\n", msr->txcfg->sensor_id == NULL ? "-" : log_escape(msr->mp, msr->txcfg->sensor_id)), sec_auditlog_write(msr, text, strlen(text)); +#else + if(msr->txcfg->sensor_id != NULL) { + yajl_kv_string(g, "sensor_id", log_escape(msr->mp, msr->txcfg->sensor_id)); + } +#endif } if (msr->txcfg->is_enabled > 0) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Engine-Mode: \"%s\"\n", msr->txcfg->is_enabled == 1 ? "DETECTION_ONLY" : "ENABLED"), sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_kv_string(g, "engine_mode", (msr->txcfg->is_enabled == 1 ? "DETECTION_ONLY" : "ENABLED")); +#endif } /* Rule performance time */ @@ -1088,54 +1466,104 @@ void sec_audit_logger(modsec_rec *msr) { telts = (const apr_table_entry_t*)tarr->elts; if (tarr->nelts > 0) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Rules-Performance-Info: "); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "rules_performance_info"); + yajl_gen_map_open(g); // separate map for rule perf info +#endif } for(i = 0; i < tarr->nelts; i++) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s\"%s=%s\"%s", ((i == 0) ? "" : ", "), log_escape(msr->mp, telts[i].key), log_escape(msr->mp, telts[i].val), ((i == (tarr->nelts - 1)) ? ".\n" : "")); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_kv_string(g, log_escape(msr->mp, telts[i].key), log_escape(msr->mp, telts[i].val)); +#endif } +#ifdef WITH_JSON_LOGGING + if (tarr->nelts > 0) { + yajl_gen_map_close(g); // map for rule perf info is finished + } +#endif } - } +#ifdef WITH_JSON_LOGGING + yajl_gen_map_close(g); // data top-level key is finished +#endif + /* AUDITLOG_PART_UPLOADS */ if ((strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_UPLOADS) != NULL) && (msr->mpd != NULL)) { multipart_part **parts = NULL; unsigned int total_size = 0; int cfiles = 0; +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_UPLOADS); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "uploads"); + yajl_gen_map_open(g); +#endif parts = (multipart_part **)msr->mpd->parts->elts; +#ifdef WITH_JSON_LOGGING + yajl_string(g, "info"); + yajl_gen_array_open(g); // separate array for upload info +#endif for(cfiles = 0; cfiles < msr->mpd->parts->nelts; cfiles++) { if (parts[cfiles]->type == MULTIPART_FILE) { if(parts[cfiles]->filename != NULL) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%d,%u,\"%s\",\"%s\"\n", cfiles+1, parts[cfiles]->tmp_file_size, log_escape(msr->mp, parts[cfiles]->filename), log_escape(msr->mp, parts[cfiles]->content_type ? parts[cfiles]->content_type : "")); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_kv_int(g, "file_size", parts[cfiles]->tmp_file_size); + yajl_kv_string(g, "file_name", log_escape(msr->mp, parts[cfiles]->filename)); + yajl_kv_string(g, "content_type", parts[cfiles]->content_type ? parts[cfiles]->content_type : ""); +#endif total_size += parts[cfiles]->tmp_file_size; } } } +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "Total,%u\n", total_size); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_gen_array_close(g); // array for upload info is finished + yajl_kv_int(g, "total", total_size); + + yajl_gen_map_close(g); // uploads top-level key is finished +#endif } /* AUDITLOG_PART_MATCHEDRULES */ if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_MATCHEDRULES) != NULL) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_MATCHEDRULES); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_string(g, "matched_rules"); + yajl_gen_array_open(g); // matched_rules top-level key +#endif + /* Matched Rules */ + for(i = 0; i < msr->matched_rules->nelts; i++) { rule = ((msre_rule **)msr->matched_rules->elts)[i]; if ((rule != NULL) && (rule->actionset != NULL) && rule->actionset->is_chained && (rule->chain_starter == NULL)) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s\n", rule->unparsed); sec_auditlog_write(msr, text, strlen(text)); +#else + write_rule_json(msr, rule, g); +#endif do { if (rule->ruleset != NULL) { @@ -1146,32 +1574,59 @@ void sec_audit_logger(modsec_rec *msr) { present = chained_is_matched(msr,next_rule); if (present == 0) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "#%s\n",next_rule->unparsed); +#endif } else { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s\n",next_rule->unparsed); +#endif i++; } - +#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, text, strlen(text)); +#else + write_rule_json(msr, next_rule, g); +#endif } } rule = next_rule; } while (rule != NULL && rule->actionset != NULL && rule->actionset->is_chained); +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n"); sec_auditlog_write(msr, text, strlen(text)); +#endif } else { if ((rule != NULL) && (rule->actionset != NULL) && !rule->actionset->is_chained && (rule->chain_starter == NULL)) { +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s\n\n", rule->unparsed); sec_auditlog_write(msr, text, strlen(text)); +#else + write_rule_json(msr, rule, g); +#endif } } } - } +#ifdef WITH_JSON_LOGGING + yajl_gen_array_close(g); // matched_rules top-level key is finished +#endif + } /* AUDITLOG_PART_ENDMARKER */ - +#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_ENDMARKER); sec_auditlog_write(msr, text, strlen(text)); +#else + yajl_gen_map_close(g); // box it up! + + const unsigned char *final_buf; + size_t len; + yajl_gen_get_buf(g, &final_buf, &len); + sec_auditlog_write(msr, final_buf, len); + + yajl_gen_clear(g); + yajl_gen_free(g); +#endif /* Return here if we were writing to a serial log * as it does not need an index file. diff --git a/apache2/msc_logging_json.h b/apache2/msc_logging_json.h new file mode 100644 index 0000000000..e49b98c29d --- /dev/null +++ b/apache2/msc_logging_json.h @@ -0,0 +1,13 @@ +#include + +#include + +#define yajl_string(g, s) yajl_gen_string(g, (const unsigned char *)s, strlen(s)) + +#define yajl_kv_null(g, k) yajl_string(g, k); yajl_gen_null(g) + +#define yajl_kv_int(g, k, v) yajl_string(g, k); yajl_gen_integer(g, v) + +#define yajl_kv_string(g, k, v) yajl_string(g, k); yajl_string(g, v) + +#define yajl_kv_bool(g, k, v) yajl_string(g, k); yajl_gen_bool(g, v) diff --git a/configure.ac b/configure.ac index 7517885893..bb2e42e34a 100644 --- a/configure.ac +++ b/configure.ac @@ -275,6 +275,21 @@ if test "$build_docs" -eq 1; then TOPLEVEL_SUBDIRS="$TOPLEVEL_SUBDIRS docs" fi +# Add JSON audit logging +AC_ARG_ENABLE(json-logging, + AS_HELP_STRING([--enabled-json-logging], + [Enable JSON audit logging.]), +[ + if test "$enableval" != "no"; then + json_logging='-DWITH_JSON_LOGGING' + MODSEC_EXTRA_CFLAGS="$MODSEC_EXTRA_CFLAGS $json_logging" + else + json_logging='' + fi +], +[ + json_logging='' +]) # Add PCRE Studying @@ -659,8 +674,7 @@ else fi fi -MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type" - +MODSEC_EXTRA_CFLAGS="$json_logging $pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type" APXS_WRAPPER=build/apxs-wrapper APXS_EXTRA_CFLAGS="" for f in $EXTRA_CFLAGS; do From dd79bea0b49df2e90683e47077f4d6e592f512ba Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Thu, 16 Jul 2015 15:33:52 -0700 Subject: [PATCH 037/248] Additional updates for JSON logging * Write Stopwatch2 values into a separate map * Remove legacy Stopwatch * Proper sanitization of request/response headers * Lazily open maps for keys that may not have content --- apache2/msc_logging.c | 120 +++++++++++++++++++++++++++++++----------- 1 file changed, 88 insertions(+), 32 deletions(-) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 61e21ad773..2fe63142ee 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -520,6 +520,26 @@ static int chained_is_matched(modsec_rec *msr, const msre_rule *next_rule) { } #ifdef WITH_JSON_LOGGING +/** + * Write detailed information about performance metrics into a JSON generator + */ +static void format_performance_variables_json(modsec_rec *msr, yajl_gen g) { + yajl_string(g, "stopwatch"); + yajl_gen_map_open(g); + + yajl_kv_int(g, "p1", msr->time_phase1); + yajl_kv_int(g, "p2", msr->time_phase2); + yajl_kv_int(g, "p3", msr->time_phase3); + yajl_kv_int(g, "p4", msr->time_phase4); + yajl_kv_int(g, "p5", msr->time_phase5); + yajl_kv_int(g, "sr", msr->time_storage_read); + yajl_kv_int(g, "sw", msr->time_storage_write); + yajl_kv_int(g, "l", msr->time_logging); + yajl_kv_int(g, "gc", msr->time_gc); + + yajl_gen_map_close(g); +} + /** * Write detailed information about a rule and its actionset into a JSON generator */ @@ -558,6 +578,9 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) yajl_kv_int(g, "phase", rule->actionset->phase); } yajl_kv_bool(g, "is_chained", rule->actionset->is_chained); + if (rule->actionset->is_chained && (rule->chain_starter == NULL)) { + yajl_kv_bool(g, "chain_starter", 1); + } yajl_gen_map_close(g); yajl_string(g, "operator"); @@ -602,6 +625,7 @@ void sec_audit_logger(modsec_rec *msr) { int arg_min, arg_max, sanitize_matched; #ifdef WITH_JSON_LOGGING yajl_gen g; + int been_opened = 0; // helper flag for conditionally opening maps #endif #ifndef WITH_JSON_LOGGING @@ -704,7 +728,7 @@ void sec_audit_logger(modsec_rec *msr) { g = yajl_gen_alloc(NULL); /** - * don't pretty print JSON by default + * don't pretty print JSON * this is harder to eyeball but much easier to parse programmatically */ yajl_gen_config(g, yajl_gen_beautify, 0); @@ -771,16 +795,14 @@ void sec_audit_logger(modsec_rec *msr) { for (i = 0; i < arr->nelts; i++) { sanitized_partial = 0; sanitize_matched = 0; -#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, te[i].val); -#else +#ifdef WITH_JSON_LOGGING // write the key no matter what // since sanitization only occurs on the value yajl_string(g, te[i].key); #endif if (apr_table_get(msr->request_headers_to_sanitize, te[i].key) != NULL) { buf = apr_psprintf(msr->mp, "%s",text+strlen(te[i].key)+2); - for ( k = 0; k < tarr_pattern->nelts; k++) { if(strncmp(telts_pattern[k].key,te[i].key,strlen(te[i].key)) ==0 ) { mparm = (msc_parm *)telts_pattern[k].val; @@ -818,7 +840,8 @@ void sec_audit_logger(modsec_rec *msr) { #ifndef WITH_JSON_LOGGING memset(text + strlen(te[i].key) + 2, '*', strlen(te[i].val)); #else - yajl_string(g, "****"); // fix this later + memset(buf, '*', strlen(buf)); // strlen also includes the appended newline on the header + yajl_string(g, buf); #endif } } @@ -827,7 +850,9 @@ void sec_audit_logger(modsec_rec *msr) { #else // we diverge from the original logic a bit because we always print the key // at this no point sanitization had occured, so we just print the value - yajl_string(g, te[i].val); + else { + yajl_string(g, te[i].val); + } #endif } #ifdef WITH_JSON_LOGGING @@ -1025,7 +1050,6 @@ void sec_audit_logger(modsec_rec *msr) { sec_auditlog_write(msr, text, strlen(text)); sec_auditlog_write(msr, buffer, strlen(buffer)); #else - // this is a key instead 'request', doesn't need an array or map since it's one value yajl_kv_string(g, "fake_body", buffer); #endif } @@ -1055,7 +1079,8 @@ void sec_audit_logger(modsec_rec *msr) { msr->status_line); #else yajl_kv_string(g, "protocol", msr->response_protocol); - yajl_kv_string(g, "status", msr->status_line); + // as an integer, response status is easier to parse than status_line + yajl_kv_int(g, "status", (int)msr->response_status); #endif } else { #ifndef WITH_JSON_LOGGING @@ -1084,9 +1109,8 @@ void sec_audit_logger(modsec_rec *msr) { for (i = 0; i < arr->nelts; i++) { sanitized_partial = 0; sanitize_matched = 0; -#ifndef WITH_JSON_LOGGING text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, te[i].val); -#else +#ifdef WITH_JSON_LOGGING // write the key no matter what // since sanitization only occurs on the value yajl_string(g, te[i].key); @@ -1131,7 +1155,8 @@ void sec_audit_logger(modsec_rec *msr) { #ifndef WITH_JSON_LOGGING memset(text + strlen(te[i].key) + 2, '*', strlen(te[i].val)); #else - yajl_string(g, "****"); // fix this later + memset(buf, '*', strlen(buf)); + yajl_string(g, buf); #endif } } @@ -1140,7 +1165,9 @@ void sec_audit_logger(modsec_rec *msr) { #else // we diverge from the original logic a bit because we always print the key // at this point no sanitization had occured, so we just print the value - yajl_string(g, te[i].val); + else { + yajl_string(g, te[i].val); + } #endif } #ifdef WITH_JSON_LOGGING @@ -1169,8 +1196,8 @@ void sec_audit_logger(modsec_rec *msr) { #ifdef WITH_JSON_LOGGING yajl_gen_map_close(g); // response top-level key is finished - yajl_string(g, "data"); - yajl_gen_map_open(g); // data top-level key + yajl_string(g, "audit_data"); + yajl_gen_map_open(g); // audit_data top-level key #endif /* AUDITLOG_PART_TRAILER */ @@ -1184,8 +1211,12 @@ void sec_audit_logger(modsec_rec *msr) { /* Messages */ #ifdef WITH_JSON_LOGGING - yajl_string(g, "messages"); - yajl_gen_array_open(g); + been_opened = 0; + if (msr->alerts->nelts > 0) { + yajl_string(g, "messages"); + yajl_gen_array_open(g); + been_opened = 1; + } #endif for(i = 0; i < msr->alerts->nelts; i++) { #ifndef WITH_JSON_LOGGING @@ -1196,13 +1227,19 @@ void sec_audit_logger(modsec_rec *msr) { #endif } #ifdef WITH_JSON_LOGGING - yajl_gen_array_close(g); + if (been_opened == 1) { + yajl_gen_array_close(g); + } #endif /* Apache error messages */ #ifdef WITH_JSON_LOGGING - yajl_string(g, "error_messages"); - yajl_gen_array_open(g); + been_opened = 0; + if (msr->error_messages->nelts > 0) { + yajl_string(g, "error_messages"); + yajl_gen_array_open(g); + been_opened = 1; + } #endif for(i = 0; i < msr->error_messages->nelts; i++) { error_message_t *em = (((error_message_t **)msr->error_messages->elts)[i]); @@ -1215,7 +1252,9 @@ void sec_audit_logger(modsec_rec *msr) { #endif } #ifdef WITH_JSON_LOGGING - yajl_gen_array_close(g); + if (been_opened == 1) { + yajl_gen_array_close(g); + } #endif /* Action */ @@ -1228,6 +1267,7 @@ void sec_audit_logger(modsec_rec *msr) { yajl_gen_map_open(g); yajl_kv_bool(g, "intercepted", 1); yajl_kv_int(g, "phase", msr->intercept_phase); + yajl_kv_string(g, "message", msr->intercept_message); yajl_gen_map_close(g); #endif } @@ -1242,27 +1282,25 @@ void sec_audit_logger(modsec_rec *msr) { #endif } +#ifndef WITH_JSON_LOGGING /* Stopwatch; left in for compatibility reasons */ text = apr_psprintf(msr->mp, "Stopwatch: %" APR_TIME_T_FMT " %" APR_TIME_T_FMT " (- - -)\n", msr->request_time, (now - msr->request_time)); -#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, text, strlen(text)); -#else - yajl_kv_string(g, "stopwatch", text); #endif /* Stopwatch2 */ +#ifndef WITH_JSON_LOGGING { char *perf_all = format_all_performance_variables(msr, msr->mp); text = apr_psprintf(msr->mp, "Stopwatch2: %" APR_TIME_T_FMT " %" APR_TIME_T_FMT "; %s\n", msr->request_time, (now - msr->request_time), perf_all); -#ifndef WITH_JSON_LOGGING sec_auditlog_write(msr, text, strlen(text)); + } #else - yajl_kv_string(g, "stopwatch2", text); + format_performance_variables_json(msr, g); #endif - } /* Our response body does not contain chunks */ /* ENH Only write this when the output was chunked. */ @@ -1293,8 +1331,7 @@ void sec_audit_logger(modsec_rec *msr) { } #ifdef WITH_JSON_LOGGING - yajl_string(g, "sanitized"); - yajl_gen_map_open(g); // open a separate map for sanitized values + been_opened = 0; #endif /* Sanitised arguments */ @@ -1310,6 +1347,12 @@ void sec_audit_logger(modsec_rec *msr) { text = apr_psprintf(msr->mp, "Sanitised-Args: "); sec_auditlog_write(msr, text, strlen(text)); #else + if (been_opened == 0) { + yajl_string(g, "sanitized"); + yajl_gen_map_open(g); + been_opened = 1; + } + yajl_string(g, "args"); yajl_gen_array_open(g); #endif @@ -1346,6 +1389,12 @@ void sec_audit_logger(modsec_rec *msr) { text = apr_psprintf(msr->mp, "Sanitised-Request-Headers: "); sec_auditlog_write(msr, text, strlen(text)); #else + if (been_opened == 0) { + yajl_string(g, "sanitized"); + yajl_gen_map_open(g); + been_opened = 1; + } + yajl_string(g, "request_headers"); yajl_gen_array_open(g); #endif @@ -1380,6 +1429,12 @@ void sec_audit_logger(modsec_rec *msr) { text = apr_psprintf(msr->mp, "Sanitised-Response-Headers: "); sec_auditlog_write(msr, text, strlen(text)); #else + if (been_opened == 0) { + yajl_string(g, "sanitized"); + yajl_gen_map_open(g); + been_opened = 1; + } + yajl_string(g, "response_headers"); yajl_gen_array_open(g); #endif @@ -1402,7 +1457,9 @@ void sec_audit_logger(modsec_rec *msr) { } #ifdef WITH_JSON_LOGGING - yajl_gen_map_close(g); // sanitized args map is finished + if (been_opened == 1) { + yajl_gen_map_close(g); // sanitized args map is finished + } #endif /* Web application info. */ @@ -1493,7 +1550,7 @@ void sec_audit_logger(modsec_rec *msr) { } #ifdef WITH_JSON_LOGGING - yajl_gen_map_close(g); // data top-level key is finished + yajl_gen_map_close(g); // audit_data top-level key is finished #endif /* AUDITLOG_PART_UPLOADS */ @@ -1552,7 +1609,6 @@ void sec_audit_logger(modsec_rec *msr) { yajl_gen_array_open(g); // matched_rules top-level key #endif - /* Matched Rules */ for(i = 0; i < msr->matched_rules->nelts; i++) { @@ -1608,7 +1664,7 @@ void sec_audit_logger(modsec_rec *msr) { } } #ifdef WITH_JSON_LOGGING - yajl_gen_array_close(g); // matched_rules top-level key is finished + yajl_gen_array_close(g); // matched_rules top-level key is finished #endif } From 7a39b4b5b9fb51054445e3a8ca66a5f5bdde75fb Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Sat, 18 Jul 2015 22:43:10 -0700 Subject: [PATCH 038/248] Make JSON audit logging a configurable option Remove compile-time setting for generating audit logs as JSON, creating a new config option (SecAuditLogFormat). sec_audit_logger is now a wrapper for sec_audit_logger_json or sec_audit_logger_native. This has the disadvantage of making the audit log generation code harder to maintain, but the logger function itself now is no longer pepper with binary branches. --- apache2/apache2_config.c | 27 + apache2/modsecurity.h | 3 + apache2/msc_logging.c | 1116 +++++++++++++++++++++++++++----------- apache2/msc_logging.h | 3 + configure.ac | 18 +- 5 files changed, 819 insertions(+), 348 deletions(-) diff --git a/apache2/apache2_config.c b/apache2/apache2_config.c index 3e33fa0453..e38cf4e97a 100644 --- a/apache2/apache2_config.c +++ b/apache2/apache2_config.c @@ -73,6 +73,7 @@ void *create_directory_config(apr_pool_t *mp, char *path) /* audit log variables */ dcfg->auditlog_flag = NOT_SET; dcfg->auditlog_type = NOT_SET; + dcfg->auditlog_format = NOT_SET; dcfg->max_rule_time = NOT_SET; dcfg->auditlog_dirperms = NOT_SET; dcfg->auditlog_fileperms = NOT_SET; @@ -503,6 +504,8 @@ void *merge_directory_configs(apr_pool_t *mp, void *_parent, void *_child) merged->auditlog2_fd = parent->auditlog2_fd; merged->auditlog2_name = parent->auditlog2_name; } + merged->auditlog_format = (child->auditlog_format == NOT_SET + ? parent->auditlog_format : child->auditlog_format); merged->auditlog_storage_dir = (child->auditlog_storage_dir == NOT_SET_P ? parent->auditlog_storage_dir : child->auditlog_storage_dir); merged->auditlog_parts = (child->auditlog_parts == NOT_SET_P @@ -667,6 +670,7 @@ void init_directory_config(directory_config *dcfg) /* audit log variables */ if (dcfg->auditlog_flag == NOT_SET) dcfg->auditlog_flag = 0; if (dcfg->auditlog_type == NOT_SET) dcfg->auditlog_type = AUDITLOG_SERIAL; + if (dcfg->auditlog_format == NOT_SET) dcfg->auditlog_format = AUDITLOGFORMAT_NATIVE; if (dcfg->max_rule_time == NOT_SET) dcfg->max_rule_time = 0; if (dcfg->auditlog_dirperms == NOT_SET) dcfg->auditlog_dirperms = CREATEMODE_DIR; if (dcfg->auditlog_fileperms == NOT_SET) dcfg->auditlog_fileperms = CREATEMODE; @@ -1291,6 +1295,21 @@ static const char *cmd_audit_log_type(cmd_parms *cmd, void *_dcfg, return NULL; } +static const char *cmd_audit_log_mode(cmd_parms *cmd, void *_dcfg, + const char *p1) +{ + directory_config *dcfg = _dcfg; + + if (strcasecmp(p1, "JSON") == 0) dcfg->auditlog_format = AUDITLOGFORMAT_JSON; + else + if (strcasecmp(p1, "Native") == 0) dcfg->auditlog_format = AUDITLOGFORMAT_NATIVE; + else + return (const char *)apr_psprintf(cmd->pool, + "ModSecurity: Unrecognised parameter value for SecAuditLogFormat: %s", p1); + + return NULL; +} + static const char *cmd_audit_log_dirmode(cmd_parms *cmd, void *_dcfg, const char *p1) { @@ -3232,6 +3251,14 @@ const command_rec module_directives[] = { "whether to use the old audit log format (Serial) or new (Concurrent)" ), + AP_INIT_TAKE1 ( + "SecAuditLogFormat", + cmd_audit_log_mode, + NULL, + CMD_SCOPE_ANY, + "whether to emit audit log data in native format or JSON" + ), + AP_INIT_TAKE1 ( "SecAuditLogStorageDir", cmd_audit_log_storage_dir, diff --git a/apache2/modsecurity.h b/apache2/modsecurity.h index 04b96a8c1c..bc015f9345 100644 --- a/apache2/modsecurity.h +++ b/apache2/modsecurity.h @@ -519,6 +519,9 @@ struct directory_config { /* AUDITLOG_SERIAL (single file) or AUDITLOG_CONCURRENT (multiple files) */ int auditlog_type; + /* AUDITLOGFORMAT_NATIVE or AUDITLOGFORMAT_JSON */ + int auditlog_format; + /* Mode for audit log directories and files */ apr_fileperms_t auditlog_dirperms; apr_fileperms_t auditlog_fileperms; diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 2fe63142ee..a1911f65ea 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -25,10 +25,8 @@ #include "apr_version.h" #include -#ifdef WITH_JSON_LOGGING #include #include "msc_logging_json.h" -#endif /** * Write the supplied data to the audit log (if the FD is ready), update @@ -386,7 +384,6 @@ static void sec_auditlog_write_producer_header(modsec_rec *msr) { sec_auditlog_write(msr, ".\n", 2); } -#ifdef WITH_JSON_LOGGING /** * Ouput the Producer header into a JSON generator */ @@ -418,7 +415,6 @@ static void sec_auditlog_write_producer_header_json(modsec_rec *msr, yajl_gen g) yajl_gen_array_close(g); // array for producers is finished } -#endif /* * \brief This function will returns the next chain node @@ -519,7 +515,6 @@ static int chained_is_matched(modsec_rec *msr, const msre_rule *next_rule) { return 0; } -#ifdef WITH_JSON_LOGGING /** * Write detailed information about performance metrics into a JSON generator */ @@ -600,12 +595,11 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) yajl_gen_map_close(g); } -#endif -/** - * Produce an audit log entry. +/* + * Produce an audit log entry in JSON format. */ -void sec_audit_logger(modsec_rec *msr) { +void sec_audit_logger_json(modsec_rec *msr) { const apr_array_header_t *arr = NULL; apr_table_entry_t *te = NULL; const apr_array_header_t *tarr_pattern = NULL; @@ -623,15 +617,8 @@ void sec_audit_logger(modsec_rec *msr) { char *buf = NULL, *pat = NULL; msc_parm *mparm = NULL; int arg_min, arg_max, sanitize_matched; -#ifdef WITH_JSON_LOGGING yajl_gen g; int been_opened = 0; // helper flag for conditionally opening maps -#endif - -#ifndef WITH_JSON_LOGGING - /* the boundary is used by both audit log types */ - msr->new_auditlog_boundary = create_auditlog_boundary(msr->r); -#endif /* Return silently if we don't have a request line. This * means we will not be logging request timeouts. @@ -719,7 +706,6 @@ void sec_audit_logger(modsec_rec *msr) { } } -#ifdef WITH_JSON_LOGGING /** * allocate the buffer for the JSON generator * passing null will force yajl to use malloc/realloc/free @@ -734,20 +720,8 @@ void sec_audit_logger(modsec_rec *msr) { yajl_gen_config(g, yajl_gen_beautify, 0); yajl_gen_map_open(g); // IT BEGINS -#endif - /* AUDITLOG_PART_HEADER */ -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_HEADER); - sec_auditlog_write(msr, text, strlen(text)); - /* Format: time transaction_id remote_addr remote_port local_addr local_port */ - - text = apr_psprintf(msr->mp, "[%s] %s %s %u %s %u", - current_logtime(msr->mp), msr->txid, msr->remote_addr, msr->remote_port, - msr->local_addr, msr->local_port); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, "transaction"); yajl_gen_map_open(g); // transaction top-level key @@ -762,29 +736,15 @@ void sec_audit_logger(modsec_rec *msr) { yajl_string(g, "request"); yajl_gen_map_open(g); // request top-level key -#endif /* AUDITLOG_PART_REQUEST_HEADERS */ if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_REQUEST_HEADERS) != NULL) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_REQUEST_HEADERS); - sec_auditlog_write(msr, text, strlen(text)); -#endif - sanitize_request_line(msr); -#ifndef WITH_JSON_LOGGING - sec_auditlog_write(msr, msr->request_line, strlen(msr->request_line)); - sec_auditlog_write(msr, "\n", 1); -#else - // Request Line doesn't get its own map for now. should it? yajl_kv_string(g, "request_line", msr->request_line); -#endif -#ifdef WITH_JSON_LOGGING yajl_string(g, "headers"); yajl_gen_map_open(g); // separate map for request headers -#endif arr = apr_table_elts(msr->request_headers); te = (apr_table_entry_t *)arr->elts; @@ -796,11 +756,9 @@ void sec_audit_logger(modsec_rec *msr) { sanitized_partial = 0; sanitize_matched = 0; text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, te[i].val); -#ifdef WITH_JSON_LOGGING // write the key no matter what // since sanitization only occurs on the value yajl_string(g, te[i].key); -#endif if (apr_table_get(msr->request_headers_to_sanitize, te[i].key) != NULL) { buf = apr_psprintf(msr->mp, "%s",text+strlen(te[i].key)+2); for ( k = 0; k < tarr_pattern->nelts; k++) { @@ -831,33 +789,18 @@ void sec_audit_logger(modsec_rec *msr) { } if(sanitized_partial == 1 && sanitize_matched == 0) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, buf); -#else yajl_string(g, buf); -#endif } else { -#ifndef WITH_JSON_LOGGING - memset(text + strlen(te[i].key) + 2, '*', strlen(te[i].val)); -#else memset(buf, '*', strlen(buf)); // strlen also includes the appended newline on the header yajl_string(g, buf); -#endif } - } -#ifndef WITH_JSON_LOGGING - sec_auditlog_write(msr, text, strlen(text)); -#else + } else { // we diverge from the original logic a bit because we always print the key // at this no point sanitization had occured, so we just print the value - else { yajl_string(g, te[i].val); } -#endif } -#ifdef WITH_JSON_LOGGING yajl_gen_map_close(g); // request headers map is finished -#endif } /* AUDITLOG_PART_REQUEST_BODY */ @@ -944,13 +887,8 @@ void sec_audit_logger(modsec_rec *msr) { unsigned int chunk_offset = 0; unsigned int sanitize_offset = 0; unsigned int sanitize_length = 0; -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_REQUEST_BODY); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, "body"); yajl_gen_array_open(g); // use an array here because we're writing in chunks -#endif for(;;) { rc = modsecurity_request_body_retrieve(msr, &chunk, -1, &my_error_msg); @@ -1009,11 +947,7 @@ void sec_audit_logger(modsec_rec *msr) { /* Write the sanitized chunk to the log * and advance to the next chunk. */ -#ifndef WITH_JSON_LOGGING - sec_auditlog_write(msr, chunk->data, chunk->length); -#else yajl_string(g, chunk->data); -#endif chunk_offset += chunk->length; } @@ -1022,9 +956,7 @@ void sec_audit_logger(modsec_rec *msr) { } } -#ifdef WITH_JSON_LOGGING yajl_gen_array_close(g); // request body chunks array is finished -#endif if (rc < 0) { msr_log(msr, 1, "Audit log: %s", my_error_msg); @@ -1045,58 +977,27 @@ void sec_audit_logger(modsec_rec *msr) { if (buffer == NULL) { msr_log(msr, 1, "Audit log: Failed to reconstruct request body."); } else { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_FAKE_REQUEST_BODY); - sec_auditlog_write(msr, text, strlen(text)); - sec_auditlog_write(msr, buffer, strlen(buffer)); -#else yajl_kv_string(g, "fake_body", buffer); -#endif } } } -#ifdef WITH_JSON_LOGGING yajl_gen_map_close(g); // request top-level key is finished yajl_string(g, "response"); yajl_gen_map_open(g); // response top-level key -#endif /* AUDITLOG_PART_A_RESPONSE_HEADERS */ if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_A_RESPONSE_HEADERS) != NULL) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_A_RESPONSE_HEADERS); - sec_auditlog_write(msr, text, strlen(text)); -#endif /* There are no response headers (or the status line) in HTTP 0.9 */ if (msr->response_headers_sent) { - if (msr->status_line != NULL) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s %s\n", msr->response_protocol, - msr->status_line); -#else - yajl_kv_string(g, "protocol", msr->response_protocol); - // as an integer, response status is easier to parse than status_line - yajl_kv_int(g, "status", (int)msr->response_status); -#endif - } else { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s %u\n", msr->response_protocol, - msr->response_status); -#else - yajl_kv_string(g, "protocol", msr->response_protocol); - yajl_kv_int(g, "status", (int)msr->response_status); -#endif - } -#ifndef WITH_JSON_LOGGING - sec_auditlog_write(msr, text, strlen(text)); -#else + yajl_kv_string(g, "protocol", msr->response_protocol); + // as an integer, response status is easier to parse than status_line + yajl_kv_int(g, "status", (int)msr->response_status); yajl_string(g, "headers"); yajl_gen_map_open(g); // separate map for response headers -#endif /* Output headers */ @@ -1110,11 +1011,9 @@ void sec_audit_logger(modsec_rec *msr) { sanitized_partial = 0; sanitize_matched = 0; text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, te[i].val); -#ifdef WITH_JSON_LOGGING // write the key no matter what // since sanitization only occurs on the value yajl_string(g, te[i].key); -#endif if (apr_table_get(msr->response_headers_to_sanitize, te[i].key) != NULL) { buf = apr_psprintf(msr->mp, "%s",text+strlen(te[i].key)+2); @@ -1146,33 +1045,18 @@ void sec_audit_logger(modsec_rec *msr) { } if(sanitized_partial == 1 && sanitize_matched == 0) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, buf); -#else yajl_string(g, buf); -#endif } else { -#ifndef WITH_JSON_LOGGING - memset(text + strlen(te[i].key) + 2, '*', strlen(te[i].val)); -#else memset(buf, '*', strlen(buf)); yajl_string(g, buf); -#endif } - } -#ifndef WITH_JSON_LOGGING - sec_auditlog_write(msr, text, strlen(text)); -#else - // we diverge from the original logic a bit because we always print the key - // at this point no sanitization had occured, so we just print the value - else { + } else { + // we diverge from the original logic a bit because we always print the key + // at this point no sanitization had occured, so we just print the value yajl_string(g, te[i].val); } -#endif } -#ifdef WITH_JSON_LOGGING yajl_gen_map_close(g); // response headers map is finised -#endif } } @@ -1182,158 +1066,84 @@ void sec_audit_logger(modsec_rec *msr) { if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_RESPONSE_BODY) != NULL) { if (msr->resbody_data != NULL) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_RESPONSE_BODY); - sec_auditlog_write(msr, text, strlen(text)); - sec_auditlog_write(msr, msr->resbody_data, msr->resbody_length); -#else yajl_kv_string(g, "body", msr->resbody_data); -#endif wrote_response_body = 1; } } -#ifdef WITH_JSON_LOGGING yajl_gen_map_close(g); // response top-level key is finished yajl_string(g, "audit_data"); yajl_gen_map_open(g); // audit_data top-level key -#endif /* AUDITLOG_PART_TRAILER */ if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_TRAILER) != NULL) { apr_time_t now = apr_time_now(); -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_TRAILER); - sec_auditlog_write(msr, text, strlen(text)); -#endif /* Messages */ -#ifdef WITH_JSON_LOGGING been_opened = 0; if (msr->alerts->nelts > 0) { yajl_string(g, "messages"); yajl_gen_array_open(g); been_opened = 1; } -#endif for(i = 0; i < msr->alerts->nelts; i++) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Message: %s\n", ((char **)msr->alerts->elts)[i]); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, ((char **)msr->alerts->elts)[i]); -#endif } -#ifdef WITH_JSON_LOGGING if (been_opened == 1) { yajl_gen_array_close(g); } -#endif /* Apache error messages */ -#ifdef WITH_JSON_LOGGING been_opened = 0; if (msr->error_messages->nelts > 0) { yajl_string(g, "error_messages"); yajl_gen_array_open(g); been_opened = 1; } -#endif for(i = 0; i < msr->error_messages->nelts; i++) { error_message_t *em = (((error_message_t **)msr->error_messages->elts)[i]); -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Apache-Error: %s\n", - format_error_log_message(msr->mp, em)); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, format_error_log_message(msr->mp, em)); -#endif } -#ifdef WITH_JSON_LOGGING if (been_opened == 1) { yajl_gen_array_close(g); } -#endif /* Action */ if (msr->was_intercepted) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Action: Intercepted (phase %d)\n", msr->intercept_phase); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, "action"); yajl_gen_map_open(g); yajl_kv_bool(g, "intercepted", 1); yajl_kv_int(g, "phase", msr->intercept_phase); yajl_kv_string(g, "message", msr->intercept_message); yajl_gen_map_close(g); -#endif } /* Apache-Handler */ if (msr->r->handler != NULL) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Apache-Handler: %s\n", msr->r->handler); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_kv_string(g, "handler", msr->r->handler); -#endif } -#ifndef WITH_JSON_LOGGING - /* Stopwatch; left in for compatibility reasons */ - text = apr_psprintf(msr->mp, "Stopwatch: %" APR_TIME_T_FMT " %" APR_TIME_T_FMT " (- - -)\n", - msr->request_time, (now - msr->request_time)); - sec_auditlog_write(msr, text, strlen(text)); -#endif /* Stopwatch2 */ -#ifndef WITH_JSON_LOGGING - { - char *perf_all = format_all_performance_variables(msr, msr->mp); - - text = apr_psprintf(msr->mp, "Stopwatch2: %" APR_TIME_T_FMT " %" APR_TIME_T_FMT - "; %s\n", msr->request_time, (now - msr->request_time), perf_all); - sec_auditlog_write(msr, text, strlen(text)); - } -#else format_performance_variables_json(msr, g); -#endif /* Our response body does not contain chunks */ /* ENH Only write this when the output was chunked. */ /* ENH Add info when request body was decompressed, dechunked too. */ if (wrote_response_body) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Response-Body-Transformed: Dechunked\n"); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_kv_bool(g, "response_body_dechunked", 1); -#endif } -#ifndef WITH_JSON_LOGGING - sec_auditlog_write_producer_header(msr); -#else sec_auditlog_write_producer_header_json(msr, g); -#endif /* Server */ if (msr->server_software != NULL) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Server: %s\n", msr->server_software); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_kv_string(g, "server", msr->server_software); -#endif } -#ifdef WITH_JSON_LOGGING been_opened = 0; -#endif - /* Sanitised arguments */ { const apr_array_header_t *tarr; @@ -1343,10 +1153,6 @@ void sec_audit_logger(modsec_rec *msr) { telts = (const apr_table_entry_t*)tarr->elts; if (tarr->nelts > 0) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Sanitised-Args: "); - sec_auditlog_write(msr, text, strlen(text)); -#else if (been_opened == 0) { yajl_string(g, "sanitized"); yajl_gen_map_open(g); @@ -1355,25 +1161,16 @@ void sec_audit_logger(modsec_rec *msr) { yajl_string(g, "args"); yajl_gen_array_open(g); -#endif } for(i = 0; i < tarr->nelts; i++) { msc_arg *arg = (msc_arg *)telts[i].val; -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s\"%s\"%s", ((i == 0) ? "" : ", "), - log_escape(msr->mp, arg->name), ((i == (tarr->nelts - 1)) ? ".\n" : "")); - sec_auditlog_write(msr, text, strlen(text)); -#else // yay arrays actually make it easier here yajl_string(g, log_escape(msr->mp, arg->name)); -#endif } -#ifdef WITH_JSON_LOGGING if (tarr->nelts > 0) { yajl_gen_array_close(g); } -#endif } /* Sanitised request headers */ @@ -1385,10 +1182,6 @@ void sec_audit_logger(modsec_rec *msr) { telts = (const apr_table_entry_t*)tarr->elts; if (tarr->nelts > 0) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Sanitised-Request-Headers: "); - sec_auditlog_write(msr, text, strlen(text)); -#else if (been_opened == 0) { yajl_string(g, "sanitized"); yajl_gen_map_open(g); @@ -1397,23 +1190,14 @@ void sec_audit_logger(modsec_rec *msr) { yajl_string(g, "request_headers"); yajl_gen_array_open(g); -#endif } for(i = 0; i < tarr->nelts; i++) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s\"%s\"%s", ((i == 0) ? "" : ", "), - log_escape(msr->mp, telts[i].key), ((i == (tarr->nelts - 1)) ? ".\n" : "")); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, log_escape(msr->mp, telts[i].key)); -#endif } -#ifdef WITH_JSON_LOGGING if (tarr->nelts > 0) { yajl_gen_array_close(g); } -#endif } /* Sanitised response headers */ @@ -1425,10 +1209,6 @@ void sec_audit_logger(modsec_rec *msr) { telts = (const apr_table_entry_t*)tarr->elts; if (tarr->nelts > 0) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Sanitised-Response-Headers: "); - sec_auditlog_write(msr, text, strlen(text)); -#else if (been_opened == 0) { yajl_string(g, "sanitized"); yajl_gen_map_open(g); @@ -1437,42 +1217,24 @@ void sec_audit_logger(modsec_rec *msr) { yajl_string(g, "response_headers"); yajl_gen_array_open(g); -#endif } for(i = 0; i < tarr->nelts; i++) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s\"%s\"%s", ((i == 0) ? "" : ", "), - log_escape(msr->mp, telts[i].key), ((i == (tarr->nelts - 1)) ? ".\n" : "")); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, log_escape(msr->mp, telts[i].key)); -#endif } -#ifdef WITH_JSON_LOGGING if (tarr->nelts > 0) { yajl_gen_array_close(g); } -#endif } -#ifdef WITH_JSON_LOGGING if (been_opened == 1) { yajl_gen_map_close(g); // sanitized args map is finished } -#endif /* Web application info. */ if ( ((msr->txcfg->webappid != NULL)&&(strcmp(msr->txcfg->webappid, "default") != 0)) || (msr->sessionid != NULL) || (msr->userid != NULL)) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "WebApp-Info: \"%s\" \"%s\" \"%s\"\n", - msr->txcfg->webappid == NULL ? "-" : log_escape(msr->mp, msr->txcfg->webappid), - msr->sessionid == NULL ? "-" : log_escape(msr->mp, msr->sessionid), - msr->userid == NULL ? "-" : log_escape(msr->mp, msr->userid)); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, "webapp_info"); yajl_gen_map_open(g); @@ -1487,31 +1249,18 @@ void sec_audit_logger(modsec_rec *msr) { } yajl_gen_map_close(g); -#endif } if ( ((msr->txcfg->sensor_id != NULL)&&(strcmp(msr->txcfg->sensor_id, "default") != 0))) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Sensor-Id: \"%s\"\n", - msr->txcfg->sensor_id == NULL ? "-" : log_escape(msr->mp, msr->txcfg->sensor_id)), - sec_auditlog_write(msr, text, strlen(text)); -#else if(msr->txcfg->sensor_id != NULL) { yajl_kv_string(g, "sensor_id", log_escape(msr->mp, msr->txcfg->sensor_id)); } -#endif } if (msr->txcfg->is_enabled > 0) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Engine-Mode: \"%s\"\n", - msr->txcfg->is_enabled == 1 ? "DETECTION_ONLY" : "ENABLED"), - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_kv_string(g, "engine_mode", (msr->txcfg->is_enabled == 1 ? "DETECTION_ONLY" : "ENABLED")); -#endif } /* Rule performance time */ @@ -1523,35 +1272,20 @@ void sec_audit_logger(modsec_rec *msr) { telts = (const apr_table_entry_t*)tarr->elts; if (tarr->nelts > 0) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Rules-Performance-Info: "); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, "rules_performance_info"); yajl_gen_map_open(g); // separate map for rule perf info -#endif } for(i = 0; i < tarr->nelts; i++) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s\"%s=%s\"%s", ((i == 0) ? "" : ", "), - log_escape(msr->mp, telts[i].key), log_escape(msr->mp, telts[i].val), ((i == (tarr->nelts - 1)) ? ".\n" : "")); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_kv_string(g, log_escape(msr->mp, telts[i].key), log_escape(msr->mp, telts[i].val)); -#endif } -#ifdef WITH_JSON_LOGGING if (tarr->nelts > 0) { yajl_gen_map_close(g); // map for rule perf info is finished } -#endif } } -#ifdef WITH_JSON_LOGGING yajl_gen_map_close(g); // audit_data top-level key is finished -#endif /* AUDITLOG_PART_UPLOADS */ if ((strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_UPLOADS) != NULL) && (msr->mpd != NULL)) { @@ -1559,67 +1293,40 @@ void sec_audit_logger(modsec_rec *msr) { unsigned int total_size = 0; int cfiles = 0; -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_UPLOADS); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, "uploads"); yajl_gen_map_open(g); -#endif parts = (multipart_part **)msr->mpd->parts->elts; -#ifdef WITH_JSON_LOGGING yajl_string(g, "info"); yajl_gen_array_open(g); // separate array for upload info -#endif for(cfiles = 0; cfiles < msr->mpd->parts->nelts; cfiles++) { if (parts[cfiles]->type == MULTIPART_FILE) { if(parts[cfiles]->filename != NULL) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%d,%u,\"%s\",\"%s\"\n", cfiles+1, parts[cfiles]->tmp_file_size, log_escape(msr->mp, parts[cfiles]->filename), log_escape(msr->mp, parts[cfiles]->content_type ? parts[cfiles]->content_type : "")); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_kv_int(g, "file_size", parts[cfiles]->tmp_file_size); yajl_kv_string(g, "file_name", log_escape(msr->mp, parts[cfiles]->filename)); yajl_kv_string(g, "content_type", parts[cfiles]->content_type ? parts[cfiles]->content_type : ""); -#endif total_size += parts[cfiles]->tmp_file_size; } } } -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "Total,%u\n", total_size); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_gen_array_close(g); // array for upload info is finished yajl_kv_int(g, "total", total_size); yajl_gen_map_close(g); // uploads top-level key is finished -#endif } /* AUDITLOG_PART_MATCHEDRULES */ if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_MATCHEDRULES) != NULL) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_MATCHEDRULES); - sec_auditlog_write(msr, text, strlen(text)); -#else yajl_string(g, "matched_rules"); yajl_gen_array_open(g); // matched_rules top-level key -#endif /* Matched Rules */ for(i = 0; i < msr->matched_rules->nelts; i++) { rule = ((msre_rule **)msr->matched_rules->elts)[i]; if ((rule != NULL) && (rule->actionset != NULL) && rule->actionset->is_chained && (rule->chain_starter == NULL)) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s\n", rule->unparsed); - sec_auditlog_write(msr, text, strlen(text)); -#else write_rule_json(msr, rule, g); -#endif do { if (rule->ruleset != NULL) { @@ -1629,50 +1336,26 @@ void sec_audit_logger(modsec_rec *msr) { present = chained_is_matched(msr,next_rule); - if (present == 0) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "#%s\n",next_rule->unparsed); -#endif - } else { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s\n",next_rule->unparsed); -#endif + if (present == 1) { i++; } -#ifndef WITH_JSON_LOGGING - sec_auditlog_write(msr, text, strlen(text)); -#else write_rule_json(msr, next_rule, g); -#endif } } rule = next_rule; } while (rule != NULL && rule->actionset != NULL && rule->actionset->is_chained); -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n"); - sec_auditlog_write(msr, text, strlen(text)); -#endif } else { if ((rule != NULL) && (rule->actionset != NULL) && !rule->actionset->is_chained && (rule->chain_starter == NULL)) { -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "%s\n\n", rule->unparsed); - sec_auditlog_write(msr, text, strlen(text)); -#else write_rule_json(msr, rule, g); -#endif } } } -#ifdef WITH_JSON_LOGGING yajl_gen_array_close(g); // matched_rules top-level key is finished -#endif } /* AUDITLOG_PART_ENDMARKER */ -#ifndef WITH_JSON_LOGGING - text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_ENDMARKER); - sec_auditlog_write(msr, text, strlen(text)); -#else + + /* finished building JSON */ yajl_gen_map_close(g); // box it up! const unsigned char *final_buf; @@ -1682,7 +1365,6 @@ void sec_audit_logger(modsec_rec *msr) { yajl_gen_clear(g); yajl_gen_free(g); -#endif /* Return here if we were writing to a serial log * as it does not need an index file. @@ -1760,3 +1442,775 @@ void sec_audit_logger(modsec_rec *msr) { apr_file_write_full(msr->txcfg->auditlog2_fd, text, nbytes, &nbytes_written); } } + +/* + * Produce an audit log entry in native format. + */ +void sec_audit_logger_native(modsec_rec *msr) { + const apr_array_header_t *arr = NULL; + apr_table_entry_t *te = NULL; + const apr_array_header_t *tarr_pattern = NULL; + const apr_table_entry_t *telts_pattern = NULL; + char *str1 = NULL, *str2 = NULL, *text = NULL; + const msre_rule *rule = NULL, *next_rule = NULL; + apr_size_t nbytes, nbytes_written; + unsigned char md5hash[APR_MD5_DIGESTSIZE]; + int was_limited = 0; + int present = 0; + int wrote_response_body = 0; + char *entry_filename, *entry_basename; + apr_status_t rc; + int i, limit, k, sanitized_partial, j; + char *buf = NULL, *pat = NULL; + msc_parm *mparm = NULL; + int arg_min, arg_max, sanitize_matched; + + /* the boundary is used by both audit log types */ + msr->new_auditlog_boundary = create_auditlog_boundary(msr->r); + + /* Return silently if we don't have a request line. This + * means we will not be logging request timeouts. + */ + if (msr->request_line == NULL) { + msr_log(msr, 4, "Audit log: Skipping request whose request_line is null."); + return; + } + + /* Also return silently if we don't have a file descriptor. */ + if (msr->txcfg->auditlog_fd == NULL) { + msr_log(msr, 4, "Audit log: Skipping request since there is nowhere to write to."); + return; + } + + if (msr->txcfg->auditlog_type != AUDITLOG_CONCURRENT) { + /* Serial logging - we already have an open file + * descriptor to write to. + */ + msr->new_auditlog_fd = msr->txcfg->auditlog_fd; + } else { + /* Concurrent logging - we need to create a brand + * new file for this request. + */ + apr_md5_init(&msr->new_auditlog_md5ctx); + + msr->new_auditlog_filename = construct_auditlog_filename(msr->mp, msr->txid); + if (msr->new_auditlog_filename == NULL) return; + + /* The audit log storage directory should be explicitly + * defined. But if it isn't try to write to the same + * directory where the index file is placed. Of course, + * it is *very* bad practice to allow the Apache user + * to write to the same directory where a root user is + * writing to but it's not us that's causing the problem + * and there isn't anything we can do about that. + * + * ENH Actually there is something we can do! We will make + * SecAuditStorageDir mandatory, ask the user to explicitly + * define the storage location *and* refuse to work if the + * index and the storage location are in the same folder. + */ + if (msr->txcfg->auditlog_storage_dir == NULL) { + entry_filename = file_dirname(msr->mp, msr->txcfg->auditlog_name); + } + else { + entry_filename = msr->txcfg->auditlog_storage_dir; + } + if (entry_filename == NULL) return; + + entry_filename = apr_psprintf(msr->mp, "%s%s", entry_filename, msr->new_auditlog_filename); + if (entry_filename == NULL) return; + entry_basename = file_dirname(msr->mp, entry_filename); + if (entry_basename == NULL) return; + + /* IMP1 Surely it would be more efficient to check the folders for + * the audit log repository base path in the configuration phase, to reduce + * the work we do on every request. Also, since our path depends on time, + * we could cache the time we last checked and don't check if we know + * the folder is there. + */ + rc = apr_dir_make_recursive(entry_basename, msr->txcfg->auditlog_dirperms, msr->mp); + if ((rc != APR_SUCCESS) && (rc != APR_EEXIST)) { + msr_log(msr, 1, "Audit log: Failed to create subdirectories: %s (%s)", + entry_basename, get_apr_error(msr->mp, rc)); + return; + } + + rc = apr_file_open(&msr->new_auditlog_fd, entry_filename, + APR_WRITE | APR_TRUNCATE | APR_CREATE | APR_BINARY | APR_FILE_NOCLEANUP, + msr->txcfg->auditlog_fileperms, msr->mp); + if (rc != APR_SUCCESS) { + msr_log(msr, 1, "Audit log: Failed to create file: %s (%s)", + entry_filename, get_apr_error(msr->mp, rc)); + return; + } + } + + /* Lock the mutex, but only if we are using serial format. */ + if (msr->txcfg->auditlog_type != AUDITLOG_CONCURRENT) { + rc = apr_global_mutex_lock(msr->modsecurity->auditlog_lock); + if (rc != APR_SUCCESS) { + msr_log(msr, 1, "Audit log: Failed to lock global mutex: %s", + get_apr_error(msr->mp, rc)); + } + } + + + /* AUDITLOG_PART_HEADER */ + text = apr_psprintf(msr->mp, "--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_HEADER); + sec_auditlog_write(msr, text, strlen(text)); + /* Format: time transaction_id remote_addr remote_port local_addr local_port */ + + text = apr_psprintf(msr->mp, "[%s] %s %s %u %s %u", + current_logtime(msr->mp), msr->txid, msr->remote_addr, msr->remote_port, + msr->local_addr, msr->local_port); + sec_auditlog_write(msr, text, strlen(text)); + + /* AUDITLOG_PART_REQUEST_HEADERS */ + + if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_REQUEST_HEADERS) != NULL) { + text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_REQUEST_HEADERS); + sec_auditlog_write(msr, text, strlen(text)); + + sanitize_request_line(msr); + sec_auditlog_write(msr, msr->request_line, strlen(msr->request_line)); + sec_auditlog_write(msr, "\n", 1); + + + arr = apr_table_elts(msr->request_headers); + te = (apr_table_entry_t *)arr->elts; + + tarr_pattern = apr_table_elts(msr->pattern_to_sanitize); + telts_pattern = (const apr_table_entry_t*)tarr_pattern->elts; + + for (i = 0; i < arr->nelts; i++) { + sanitized_partial = 0; + sanitize_matched = 0; + text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, te[i].val); + if (apr_table_get(msr->request_headers_to_sanitize, te[i].key) != NULL) { + buf = apr_psprintf(msr->mp, "%s",text+strlen(te[i].key)+2); + for ( k = 0; k < tarr_pattern->nelts; k++) { + if(strncmp(telts_pattern[k].key,te[i].key,strlen(te[i].key)) ==0 ) { + mparm = (msc_parm *)telts_pattern[k].val; + if(mparm->pad_1 == -1) + sanitize_matched = 1; + pat = strstr(buf,mparm->value); + if (pat != NULL) { + j = strlen(mparm->value); + arg_min = j; + arg_max = 1; + while((*pat != '\0')&&(j--)) { + if(arg_max > mparm->pad_2) { + int off = strlen(mparm->value) - arg_max; + int pos = mparm->pad_1-1; + if(off > pos) { + *pat = '*'; + } + } + arg_max++; + arg_min--; + pat++; + } + sanitized_partial = 1; + } + } + } + + if(sanitized_partial == 1 && sanitize_matched == 0) { + text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, buf); + } else { + memset(text + strlen(te[i].key) + 2, '*', strlen(te[i].val)); + } + } + sec_auditlog_write(msr, text, strlen(text)); + } + } + + /* AUDITLOG_PART_REQUEST_BODY */ + + /* Output this part of it was explicitly requested (C) or if it was the faked + * request body that was requested (I) but we have no reason to fake it (it's + * already in the correct format). + */ + if ( (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_REQUEST_BODY) != NULL) + || ( (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_FAKE_REQUEST_BODY) != NULL) + && (msr->mpd == NULL) ) ) + { + if (msr->msc_reqbody_read) { + const apr_array_header_t *tarr; + const apr_table_entry_t *telts; + apr_array_header_t *sorted_args; + unsigned int offset = 0, last_offset = 0; + msc_arg *nextarg = NULL; + int sanitize = 0; /* IMP1 Use constants for "sanitize" values. */ + char *my_error_msg = NULL; + + sorted_args = apr_array_make(msr->mp, 25, sizeof(const msc_arg *)); + + /* First we need to sort the arguments that need to be + * sanitized in descending order (we are using a stack structure + * to store then so the order will be ascending when we start + * popping them out). This is because we will + * be reading the request body sequentially and must + * sanitize it as we go. + */ + + for(;;) { + nextarg = NULL; + + /* Find the next largest offset (excluding + * the ones we've used up already). + */ + tarr = apr_table_elts(msr->arguments_to_sanitize); + telts = (const apr_table_entry_t*)tarr->elts; + for(i = 0; i < tarr->nelts; i++) { + msc_arg *arg = (msc_arg *)telts[i].val; + if (arg->origin != NULL && + strcmp(arg->origin, "BODY") != 0) + continue; + + if (last_offset == 0) { /* The first time we're here. */ + if (arg->value_origin_offset > offset) { + offset = arg->value_origin_offset; + nextarg = arg; + } + } else { /* Not the first time. */ + if ((arg->value_origin_offset > offset) + &&(arg->value_origin_offset < last_offset)) + { + offset = arg->value_origin_offset; + nextarg = arg; + } + } + } + + /* If we don't have the next argument that means + * we're done here. + */ + if (nextarg == NULL) break; + + sanitize = 2; /* Means time to pop the next argument out. */ + last_offset = offset; + offset = 0; + { /* IMP1 Fix this ugly bit here. */ + msc_arg **x = apr_array_push(sorted_args); + *x = nextarg; + } + } + + /* Now start retrieving the body chunk by chunk and + * sanitize data in pieces. + */ + + rc = modsecurity_request_body_retrieve_start(msr, &my_error_msg); + if (rc < 0) { + msr_log(msr, 1, "Audit log: %s", my_error_msg); + } else { + msc_data_chunk *chunk = NULL; + unsigned int chunk_offset = 0; + unsigned int sanitize_offset = 0; + unsigned int sanitize_length = 0; + text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_REQUEST_BODY); + sec_auditlog_write(msr, text, strlen(text)); + + for(;;) { + rc = modsecurity_request_body_retrieve(msr, &chunk, -1, &my_error_msg); + if (chunk != NULL) { + /* Anything greater than 1 means we have more data to sanitize. */ + while (sanitize > 1) { + msc_arg **arg = NULL; + + if (sanitize == 2) { + /* Get the next argument from the stack. */ + arg = (msc_arg **)apr_array_pop(sorted_args); + if (arg == NULL) sanitize = 0; /* We're done sanitising. */ + else { + /* Continue with sanitation to process the + * retrieved argument. + */ + sanitize = 1; + sanitize_offset = (*arg)->value_origin_offset; + sanitize_length = (*arg)->value_origin_len; + } + } + + if (sanitize) { + /* Check if the data we want to sanitize is + * stored in the current chunk. + */ + if (chunk_offset + chunk->length > sanitize_offset) { + unsigned int soff; /* data offset within chunk */ + unsigned int len; /* amount in this chunk to sanitize */ + + soff = sanitize_offset - chunk_offset; + + if (soff + sanitize_length <= chunk->length) { + /* The entire argument resides in the current chunk. */ + len = sanitize_length; + sanitize = 2; /* Get another parameter to sanitize. */ + } else { + /* Some work to do here but we'll need to seek + * another chunk. + */ + len = chunk->length - soff; + sanitize_offset += len; + sanitize_length -= len; + sanitize = 1; /* It's OK to go to the next chunk. */ + } + + /* Yes, we actually write over the original data. + * We shouldn't be needing it any more. + */ + if (soff + len <= chunk->length) { /* double check */ + memset((char *)chunk->data + soff, '*', len); + } + } + } + } + + /* Write the sanitized chunk to the log + * and advance to the next chunk. */ + sec_auditlog_write(msr, chunk->data, chunk->length); + chunk_offset += chunk->length; + } + + if (rc <= 0) { + break; + } + } + + if (rc < 0) { + msr_log(msr, 1, "Audit log: %s", my_error_msg); + } + + modsecurity_request_body_retrieve_end(msr); + } + } + } + + /* AUDITLOG_PART_FAKE_REQUEST_BODY */ + + if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_FAKE_REQUEST_BODY) != NULL) { + if ((msr->msc_reqbody_read)&&(msr->mpd != NULL)) { + char *buffer = NULL; + + buffer = multipart_reconstruct_urlencoded_body_sanitize(msr); + if (buffer == NULL) { + msr_log(msr, 1, "Audit log: Failed to reconstruct request body."); + } else { + text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_FAKE_REQUEST_BODY); + sec_auditlog_write(msr, text, strlen(text)); + sec_auditlog_write(msr, buffer, strlen(buffer)); + } + } + } + + /* AUDITLOG_PART_A_RESPONSE_HEADERS */ + + if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_A_RESPONSE_HEADERS) != NULL) { + text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_A_RESPONSE_HEADERS); + sec_auditlog_write(msr, text, strlen(text)); + + /* There are no response headers (or the status line) in HTTP 0.9 */ + if (msr->response_headers_sent) { + if (msr->status_line != NULL) { + text = apr_psprintf(msr->mp, "%s %s\n", msr->response_protocol, + msr->status_line); + } else { + text = apr_psprintf(msr->mp, "%s %u\n", msr->response_protocol, + msr->response_status); + } + sec_auditlog_write(msr, text, strlen(text)); + + /* Output headers */ + + arr = apr_table_elts(msr->response_headers); + te = (apr_table_entry_t *)arr->elts; + + tarr_pattern = apr_table_elts(msr->pattern_to_sanitize); + telts_pattern = (const apr_table_entry_t*)tarr_pattern->elts; + + for (i = 0; i < arr->nelts; i++) { + sanitized_partial = 0; + sanitize_matched = 0; + text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, te[i].val); + if (apr_table_get(msr->response_headers_to_sanitize, te[i].key) != NULL) { + buf = apr_psprintf(msr->mp, "%s",text+strlen(te[i].key)+2); + + for ( k = 0; k < tarr_pattern->nelts; k++) { + if(strncmp(telts_pattern[k].key,te[i].key,strlen(te[i].key)) ==0 ) { + mparm = (msc_parm *)telts_pattern[k].val; + if(mparm->pad_1 == -1) + sanitize_matched = 1; + pat = strstr(buf,mparm->value); + if (pat != NULL) { + j = strlen(mparm->value); + arg_min = j; + arg_max = 1; + while((*pat != '\0')&&(j--)) { + if(arg_max > mparm->pad_2) { + int off = strlen(mparm->value) - arg_max; + int pos = mparm->pad_1-1; + if(off > pos) { + *pat = '*'; + } + } + arg_max++; + arg_min--; + pat++; + } + sanitized_partial = 1; + } + } + } + + if(sanitized_partial == 1 && sanitize_matched == 0) { + text = apr_psprintf(msr->mp, "%s: %s\n", te[i].key, buf); + } else { + memset(text + strlen(te[i].key) + 2, '*', strlen(te[i].val)); + } + } + sec_auditlog_write(msr, text, strlen(text)); + } + } + } + + apr_table_clear(msr->pattern_to_sanitize); + + /* AUDITLOG_PART_RESPONSE_BODY */ + + if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_RESPONSE_BODY) != NULL) { + if (msr->resbody_data != NULL) { + text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_RESPONSE_BODY); + sec_auditlog_write(msr, text, strlen(text)); + sec_auditlog_write(msr, msr->resbody_data, msr->resbody_length); + wrote_response_body = 1; + } + } + + /* AUDITLOG_PART_TRAILER */ + + if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_TRAILER) != NULL) { + apr_time_t now = apr_time_now(); + text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_TRAILER); + sec_auditlog_write(msr, text, strlen(text)); + + /* Messages */ + for(i = 0; i < msr->alerts->nelts; i++) { + text = apr_psprintf(msr->mp, "Message: %s\n", ((char **)msr->alerts->elts)[i]); + sec_auditlog_write(msr, text, strlen(text)); + } + + /* Apache error messages */ + for(i = 0; i < msr->error_messages->nelts; i++) { + error_message_t *em = (((error_message_t **)msr->error_messages->elts)[i]); + text = apr_psprintf(msr->mp, "Apache-Error: %s\n", + format_error_log_message(msr->mp, em)); + sec_auditlog_write(msr, text, strlen(text)); + } + + /* Action */ + if (msr->was_intercepted) { + text = apr_psprintf(msr->mp, "Action: Intercepted (phase %d)\n", msr->intercept_phase); + sec_auditlog_write(msr, text, strlen(text)); + } + + /* Apache-Handler */ + if (msr->r->handler != NULL) { + text = apr_psprintf(msr->mp, "Apache-Handler: %s\n", msr->r->handler); + sec_auditlog_write(msr, text, strlen(text)); + } + + /* Stopwatch; left in for compatibility reasons */ + text = apr_psprintf(msr->mp, "Stopwatch: %" APR_TIME_T_FMT " %" APR_TIME_T_FMT " (- - -)\n", + msr->request_time, (now - msr->request_time)); + sec_auditlog_write(msr, text, strlen(text)); + + /* Stopwatch2 */ + { + char *perf_all = format_all_performance_variables(msr, msr->mp); + + text = apr_psprintf(msr->mp, "Stopwatch2: %" APR_TIME_T_FMT " %" APR_TIME_T_FMT + "; %s\n", msr->request_time, (now - msr->request_time), perf_all); + sec_auditlog_write(msr, text, strlen(text)); + } + + /* Our response body does not contain chunks */ + /* ENH Only write this when the output was chunked. */ + /* ENH Add info when request body was decompressed, dechunked too. */ + if (wrote_response_body) { + text = apr_psprintf(msr->mp, "Response-Body-Transformed: Dechunked\n"); + sec_auditlog_write(msr, text, strlen(text)); + } + + sec_auditlog_write_producer_header(msr); + + /* Server */ + if (msr->server_software != NULL) { + text = apr_psprintf(msr->mp, "Server: %s\n", msr->server_software); + sec_auditlog_write(msr, text, strlen(text)); + } + + /* Sanitised arguments */ + { + const apr_array_header_t *tarr; + const apr_table_entry_t *telts; + + tarr = apr_table_elts(msr->arguments_to_sanitize); + telts = (const apr_table_entry_t*)tarr->elts; + + if (tarr->nelts > 0) { + text = apr_psprintf(msr->mp, "Sanitised-Args: "); + sec_auditlog_write(msr, text, strlen(text)); + } + + for(i = 0; i < tarr->nelts; i++) { + msc_arg *arg = (msc_arg *)telts[i].val; + text = apr_psprintf(msr->mp, "%s\"%s\"%s", ((i == 0) ? "" : ", "), + log_escape(msr->mp, arg->name), ((i == (tarr->nelts - 1)) ? ".\n" : "")); + sec_auditlog_write(msr, text, strlen(text)); + } + } + + /* Sanitised request headers */ + { + const apr_array_header_t *tarr; + const apr_table_entry_t *telts; + + tarr = apr_table_elts(msr->request_headers_to_sanitize); + telts = (const apr_table_entry_t*)tarr->elts; + + if (tarr->nelts > 0) { + text = apr_psprintf(msr->mp, "Sanitised-Request-Headers: "); + sec_auditlog_write(msr, text, strlen(text)); + } + + for(i = 0; i < tarr->nelts; i++) { + text = apr_psprintf(msr->mp, "%s\"%s\"%s", ((i == 0) ? "" : ", "), + log_escape(msr->mp, telts[i].key), ((i == (tarr->nelts - 1)) ? ".\n" : "")); + sec_auditlog_write(msr, text, strlen(text)); + } + } + + /* Sanitised response headers */ + { + const apr_array_header_t *tarr; + const apr_table_entry_t *telts; + + tarr = apr_table_elts(msr->response_headers_to_sanitize); + telts = (const apr_table_entry_t*)tarr->elts; + + if (tarr->nelts > 0) { + text = apr_psprintf(msr->mp, "Sanitised-Response-Headers: "); + sec_auditlog_write(msr, text, strlen(text)); + } + + for(i = 0; i < tarr->nelts; i++) { + text = apr_psprintf(msr->mp, "%s\"%s\"%s", ((i == 0) ? "" : ", "), + log_escape(msr->mp, telts[i].key), ((i == (tarr->nelts - 1)) ? ".\n" : "")); + sec_auditlog_write(msr, text, strlen(text)); + } + } + + /* Web application info. */ + if ( ((msr->txcfg->webappid != NULL)&&(strcmp(msr->txcfg->webappid, "default") != 0)) + || (msr->sessionid != NULL) || (msr->userid != NULL)) + { + text = apr_psprintf(msr->mp, "WebApp-Info: \"%s\" \"%s\" \"%s\"\n", + msr->txcfg->webappid == NULL ? "-" : log_escape(msr->mp, msr->txcfg->webappid), + msr->sessionid == NULL ? "-" : log_escape(msr->mp, msr->sessionid), + msr->userid == NULL ? "-" : log_escape(msr->mp, msr->userid)); + sec_auditlog_write(msr, text, strlen(text)); + } + + if ( ((msr->txcfg->sensor_id != NULL)&&(strcmp(msr->txcfg->sensor_id, "default") != 0))) + { + text = apr_psprintf(msr->mp, "Sensor-Id: \"%s\"\n", + msr->txcfg->sensor_id == NULL ? "-" : log_escape(msr->mp, msr->txcfg->sensor_id)), + sec_auditlog_write(msr, text, strlen(text)); + } + + + if (msr->txcfg->is_enabled > 0) { + text = apr_psprintf(msr->mp, "Engine-Mode: \"%s\"\n", + msr->txcfg->is_enabled == 1 ? "DETECTION_ONLY" : "ENABLED"), + sec_auditlog_write(msr, text, strlen(text)); + } + + /* Rule performance time */ + if(msr->txcfg->max_rule_time > 0) { + const apr_array_header_t *tarr; + const apr_table_entry_t *telts; + + tarr = apr_table_elts(msr->perf_rules); + telts = (const apr_table_entry_t*)tarr->elts; + + if (tarr->nelts > 0) { + text = apr_psprintf(msr->mp, "Rules-Performance-Info: "); + sec_auditlog_write(msr, text, strlen(text)); + } + + for(i = 0; i < tarr->nelts; i++) { + text = apr_psprintf(msr->mp, "%s\"%s=%s\"%s", ((i == 0) ? "" : ", "), + log_escape(msr->mp, telts[i].key), log_escape(msr->mp, telts[i].val), ((i == (tarr->nelts - 1)) ? ".\n" : "")); + sec_auditlog_write(msr, text, strlen(text)); + } + } + } + + /* AUDITLOG_PART_UPLOADS */ + if ((strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_UPLOADS) != NULL) && (msr->mpd != NULL)) { + multipart_part **parts = NULL; + unsigned int total_size = 0; + int cfiles = 0; + + text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_UPLOADS); + sec_auditlog_write(msr, text, strlen(text)); + + parts = (multipart_part **)msr->mpd->parts->elts; + for(cfiles = 0; cfiles < msr->mpd->parts->nelts; cfiles++) { + if (parts[cfiles]->type == MULTIPART_FILE) { + if(parts[cfiles]->filename != NULL) { + text = apr_psprintf(msr->mp, "%d,%u,\"%s\",\"%s\"\n", cfiles+1, parts[cfiles]->tmp_file_size, log_escape(msr->mp, parts[cfiles]->filename), log_escape(msr->mp, parts[cfiles]->content_type ? parts[cfiles]->content_type : "")); + sec_auditlog_write(msr, text, strlen(text)); + total_size += parts[cfiles]->tmp_file_size; + } + } + } + text = apr_psprintf(msr->mp, "Total,%u\n", total_size); + sec_auditlog_write(msr, text, strlen(text)); + } + + /* AUDITLOG_PART_MATCHEDRULES */ + + if (strchr(msr->txcfg->auditlog_parts, AUDITLOG_PART_MATCHEDRULES) != NULL) { + text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_MATCHEDRULES); + sec_auditlog_write(msr, text, strlen(text)); + + /* Matched Rules */ + + for(i = 0; i < msr->matched_rules->nelts; i++) { + rule = ((msre_rule **)msr->matched_rules->elts)[i]; + if ((rule != NULL) && (rule->actionset != NULL) && rule->actionset->is_chained && (rule->chain_starter == NULL)) { + text = apr_psprintf(msr->mp, "%s\n", rule->unparsed); + sec_auditlog_write(msr, text, strlen(text)); + do { + if (rule->ruleset != NULL) { + + next_rule = return_chained_rule(rule,msr); + + if (next_rule != NULL) { + + present = chained_is_matched(msr,next_rule); + + if (present == 0) { + text = apr_psprintf(msr->mp, "#%s\n",next_rule->unparsed); + } else { + text = apr_psprintf(msr->mp, "%s\n",next_rule->unparsed); + i++; + } + sec_auditlog_write(msr, text, strlen(text)); + } + } + rule = next_rule; + } while (rule != NULL && rule->actionset != NULL && rule->actionset->is_chained); + text = apr_psprintf(msr->mp, "\n"); + sec_auditlog_write(msr, text, strlen(text)); + } else { + if ((rule != NULL) && (rule->actionset != NULL) && !rule->actionset->is_chained && (rule->chain_starter == NULL)) { + text = apr_psprintf(msr->mp, "%s\n\n", rule->unparsed); + sec_auditlog_write(msr, text, strlen(text)); + } + } + } + } + /* AUDITLOG_PART_ENDMARKER */ + text = apr_psprintf(msr->mp, "\n--%s-%c--\n", msr->new_auditlog_boundary, AUDITLOG_PART_ENDMARKER); + sec_auditlog_write(msr, text, strlen(text)); + + /* Return here if we were writing to a serial log + * as it does not need an index file. + */ + if (msr->txcfg->auditlog_type != AUDITLOG_CONCURRENT) { + sec_auditlog_write(msr, "\n", 1); + + /* Unlock the mutex we used to serialise access to the audit log file. */ + rc = apr_global_mutex_unlock(msr->modsecurity->auditlog_lock); + if (rc != APR_SUCCESS) { + msr_log(msr, 1, "Audit log: Failed to unlock global mutex: %s", + get_apr_error(msr->mp, rc)); + } + + return; + } + + /* From here on only concurrent-style processing. */ + + /* File handle might already be closed after write failure. */ + if (msr->new_auditlog_fd) { + apr_file_close(msr->new_auditlog_fd); + } + + /* Write an entry to the index file */ + + /* Calculate hash of the entry. */ + apr_md5_final(md5hash, &msr->new_auditlog_md5ctx); + + str2 = apr_psprintf(msr->mp, "%s %d %d md5:%s", msr->new_auditlog_filename, 0, + msr->new_auditlog_size, bytes2hex(msr->mp, md5hash, 16)); + if (str2 == NULL) return; + + /* We do not want the index line to be longer than 3980 bytes. */ + limit = 3980; + was_limited = 0; + + /* If we are logging to a pipe we need to observe and + * obey the pipe atomic write limit - PIPE_BUF. For + * more details see the discussion in sec_guardian_logger code. + */ + if (msr->txcfg->auditlog_name[0] == '|') { + if (PIPE_BUF < limit) { + limit = PIPE_BUF; + } + } + + limit = limit - strlen(str2) - 5; + if (limit <= 0) { + msr_log(msr, 1, "Audit Log: Atomic PIPE write buffer too small: %d", PIPE_BUF); + return; + } + + str1 = construct_log_vcombinedus_limited(msr, limit, &was_limited); + if (str1 == NULL) return; + + if (was_limited == 0) { + text = apr_psprintf(msr->mp, "%s %s \n", str1, str2); + } else { + text = apr_psprintf(msr->mp, "%s %s L\n", str1, str2); + } + if (text == NULL) return; + + nbytes = strlen(text); + if (msr->txcfg->debuglog_level >= 9) { + msr_log(msr, 9, "Audit Log: Writing %" APR_SIZE_T_FMT " bytes to primary concurrent index", nbytes); + } + apr_file_write_full(msr->txcfg->auditlog_fd, text, nbytes, &nbytes_written); + + /* Write to the secondary audit log if we have one */ + if (msr->txcfg->auditlog2_fd != NULL) { + if (msr->txcfg->debuglog_level >= 9) { + msr_log(msr, 9, "Audit Log: Writing %" APR_SIZE_T_FMT " bytes to secondary concurrent index", nbytes); + } + apr_file_write_full(msr->txcfg->auditlog2_fd, text, nbytes, &nbytes_written); + } +} + +/* + * Handler for audit log writers. + */ +void sec_audit_logger(modsec_rec *msr) { + if (msr->txcfg->auditlog_format == AUDITLOGFORMAT_JSON) { + sec_audit_logger_json(msr); + } else { + sec_audit_logger_native(msr); + } +} diff --git a/apache2/msc_logging.h b/apache2/msc_logging.h index 75af9da7ae..9b1b48853a 100644 --- a/apache2/msc_logging.h +++ b/apache2/msc_logging.h @@ -22,6 +22,9 @@ #define AUDITLOG_SERIAL 0 #define AUDITLOG_CONCURRENT 1 +#define AUDITLOGFORMAT_JSON 0 +#define AUDITLOGFORMAT_NATIVE 1 + #define AUDITLOG_PART_FIRST 'A' #define AUDITLOG_PART_HEADER 'A' #define AUDITLOG_PART_REQUEST_HEADERS 'B' diff --git a/configure.ac b/configure.ac index bb2e42e34a..b188cb11e4 100644 --- a/configure.ac +++ b/configure.ac @@ -275,22 +275,6 @@ if test "$build_docs" -eq 1; then TOPLEVEL_SUBDIRS="$TOPLEVEL_SUBDIRS docs" fi -# Add JSON audit logging -AC_ARG_ENABLE(json-logging, - AS_HELP_STRING([--enabled-json-logging], - [Enable JSON audit logging.]), -[ - if test "$enableval" != "no"; then - json_logging='-DWITH_JSON_LOGGING' - MODSEC_EXTRA_CFLAGS="$MODSEC_EXTRA_CFLAGS $json_logging" - else - json_logging='' - fi -], -[ - json_logging='' -]) - # Add PCRE Studying AC_ARG_ENABLE(pcre-study, @@ -674,7 +658,7 @@ else fi fi -MODSEC_EXTRA_CFLAGS="$json_logging $pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type" +MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type" APXS_WRAPPER=build/apxs-wrapper APXS_EXTRA_CFLAGS="" for f in $EXTRA_CFLAGS; do From 8559399ebdf4364a9ec73e3c88f92de2f48c017c Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Sun, 19 Jul 2015 00:09:37 -0700 Subject: [PATCH 039/248] Update JSON structure for matched rules Create a separate map for each matched rule chain, making it easier to identify chains in which only a portion of rules actually matched. --- apache2/msc_logging.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index a1911f65ea..edc771be1a 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -1326,6 +1326,15 @@ void sec_audit_logger_json(modsec_rec *msr) { for(i = 0; i < msr->matched_rules->nelts; i++) { rule = ((msre_rule **)msr->matched_rules->elts)[i]; if ((rule != NULL) && (rule->actionset != NULL) && rule->actionset->is_chained && (rule->chain_starter == NULL)) { + /* + * create a separate map for each rule chain + * this makes it a lot easier to search for partial chains + */ + yajl_gen_map_open(g); // map for this chain + yajl_kv_bool(g, "chain", 1); + yajl_string(g, "rules"); + yajl_gen_array_open(g); // array for the rules + write_rule_json(msr, rule, g); do { if (rule->ruleset != NULL) { @@ -1344,10 +1353,23 @@ void sec_audit_logger_json(modsec_rec *msr) { } rule = next_rule; } while (rule != NULL && rule->actionset != NULL && rule->actionset->is_chained); + yajl_gen_array_close(g); + + yajl_kv_bool(g, "full_chain_match", present); // if one of the rules didnt match, present is set to 0 + yajl_gen_map_close(g); // close the map for this chain } else { + yajl_gen_map_open(g); + + yajl_kv_bool(g, "chain", 0); + yajl_string(g, "rules"); // this really should be 'rule', but we're keeping in line with other chain maps + + yajl_gen_array_open(g); if ((rule != NULL) && (rule->actionset != NULL) && !rule->actionset->is_chained && (rule->chain_starter == NULL)) { write_rule_json(msr, rule, g); } + yajl_gen_array_close(g); + + yajl_gen_map_close(g); } } yajl_gen_array_close(g); // matched_rules top-level key is finished From 0c95a7a2cde10e3cbe77c45f26b7a19b980f34c8 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Wed, 22 Jul 2015 13:14:00 -0700 Subject: [PATCH 040/248] Clean up JSON rule writer * Escape rule actionset metadata * Escape and truncate logdata * Lazily add actionset tags as an array * Add negated rule op_param * Add unparsed rule representation --- apache2/msc_logging.c | 64 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 58 insertions(+), 6 deletions(-) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index edc771be1a..771b52b59e 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -539,26 +539,49 @@ static void format_performance_variables_json(modsec_rec *msr, yajl_gen g) { * Write detailed information about a rule and its actionset into a JSON generator */ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) { - int present = 0; + const apr_array_header_t *tarr; + const apr_table_entry_t *telts; + int been_opened = 0; + int k; yajl_gen_map_open(g); yajl_string(g, "actionset"); yajl_gen_map_open(g); if (rule->actionset->id) { - yajl_kv_string(g, "id", rule->actionset->id); + yajl_kv_string(g, "id", log_escape(msr->mp, rule->actionset->id)); } if (rule->actionset->rev) { - yajl_kv_string(g, "rev", rule->actionset->rev); + yajl_kv_string(g, "rev", log_escape(msr->mp, rule->actionset->rev)); } if (rule->actionset->msg) { - yajl_kv_string(g, "msg", rule->actionset->msg); + msc_string *var = (msc_string *)apr_palloc(msr->mp, sizeof(msc_string)); + var->value = (char *)rule->actionset->msg; + var->value_len = strlen(rule->actionset->msg); + expand_macros(msr, var, NULL, msr->mp); + + yajl_kv_string(g, "msg", log_escape_ex(msr->mp, var->value, var->value_len)); } if (rule->actionset->version) { - yajl_kv_string(g, "version", rule->actionset->version); + yajl_kv_string(g, "version", log_escape(msr->mp, rule->actionset->version)); } if (rule->actionset->logdata) { - yajl_kv_string(g, "logdata", rule->actionset->logdata); + msc_string *var = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string)); + var->value = (char *)rule->actionset->logdata; + var->value_len = strlen(rule->actionset->logdata); + expand_macros(msr, var, NULL, msr->mp); + + char *logdata = apr_pstrdup(msr->mp, log_escape_hex(msr->mp, (unsigned char *)var->value, var->value_len)); + + // if it is > 512 bytes, then truncate at 512 with ellipsis. + if (strlen(logdata) > 515) { + logdata[512] = '.'; + logdata[513] = '.'; + logdata[514] = '.'; + logdata[515] = '\0'; + } + + yajl_kv_string(g, "logdata", logdata); } if (rule->actionset->severity != NOT_SET) { yajl_kv_int(g, "severity", rule->actionset->severity); @@ -576,6 +599,33 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) if (rule->actionset->is_chained && (rule->chain_starter == NULL)) { yajl_kv_bool(g, "chain_starter", 1); } + + // tags, lazily opened + tarr = apr_table_elts(rule->actionset->actions); + telts = (const apr_table_entry_t*)tarr->elts; + for (k = 0; k < tarr->nelts; k++) { + msre_action *action = (msre_action *)telts[k].val; + if (strcmp(telts[k].key, "tag") == 0) { + if (been_opened == 0) { + yajl_string(g, "tags"); + yajl_gen_array_open(g); + been_opened = 1; + } + + // expand variables in the tag + msc_string *var = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string)); + var->value = (char *)action->param; + var->value_len = strlen(action->param); + expand_macros(msr, var, NULL, msr->mp); + + yajl_string(g, log_escape(msr->mp, var->value)); + } + } + + if (been_opened == 1) { + yajl_gen_array_close(g); + } + yajl_gen_map_close(g); yajl_string(g, "operator"); @@ -583,6 +633,7 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) yajl_kv_string(g, "operator", rule->op_name); yajl_kv_string(g, "operator_param", rule->op_param); yajl_kv_string(g, "target", rule->p1); + yajl_kv_bool(g, "negated", rule->op_negated); yajl_gen_map_close(g); yajl_string(g, "config"); @@ -591,6 +642,7 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) yajl_kv_int(g, "line_num", rule->line_num); yajl_gen_map_close(g); + yajl_kv_string(g, "unparsed", rule->unparsed); yajl_kv_bool(g, "is_matched", chained_is_matched(msr, rule)); yajl_gen_map_close(g); From 5bc75ec871814d3963ed86c79e71fab88c5de228 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Tue, 10 Nov 2015 11:12:42 -0800 Subject: [PATCH 041/248] Do not compile in JSON logging support if yajl is not found --- apache2/apache2_config.c | 10 ++++++++++ apache2/modsecurity.h | 2 ++ apache2/msc_logging.c | 10 ++++++++++ apache2/msc_logging.h | 2 ++ 4 files changed, 24 insertions(+) diff --git a/apache2/apache2_config.c b/apache2/apache2_config.c index e38cf4e97a..3ab618a9e2 100644 --- a/apache2/apache2_config.c +++ b/apache2/apache2_config.c @@ -73,7 +73,9 @@ void *create_directory_config(apr_pool_t *mp, char *path) /* audit log variables */ dcfg->auditlog_flag = NOT_SET; dcfg->auditlog_type = NOT_SET; + #ifdef WITH_YAJL dcfg->auditlog_format = NOT_SET; + #endif dcfg->max_rule_time = NOT_SET; dcfg->auditlog_dirperms = NOT_SET; dcfg->auditlog_fileperms = NOT_SET; @@ -504,8 +506,10 @@ void *merge_directory_configs(apr_pool_t *mp, void *_parent, void *_child) merged->auditlog2_fd = parent->auditlog2_fd; merged->auditlog2_name = parent->auditlog2_name; } + #ifdef WITH_YAJL merged->auditlog_format = (child->auditlog_format == NOT_SET ? parent->auditlog_format : child->auditlog_format); + #endif merged->auditlog_storage_dir = (child->auditlog_storage_dir == NOT_SET_P ? parent->auditlog_storage_dir : child->auditlog_storage_dir); merged->auditlog_parts = (child->auditlog_parts == NOT_SET_P @@ -670,7 +674,9 @@ void init_directory_config(directory_config *dcfg) /* audit log variables */ if (dcfg->auditlog_flag == NOT_SET) dcfg->auditlog_flag = 0; if (dcfg->auditlog_type == NOT_SET) dcfg->auditlog_type = AUDITLOG_SERIAL; + #ifdef WITH_YAJL if (dcfg->auditlog_format == NOT_SET) dcfg->auditlog_format = AUDITLOGFORMAT_NATIVE; + #endif if (dcfg->max_rule_time == NOT_SET) dcfg->max_rule_time = 0; if (dcfg->auditlog_dirperms == NOT_SET) dcfg->auditlog_dirperms = CREATEMODE_DIR; if (dcfg->auditlog_fileperms == NOT_SET) dcfg->auditlog_fileperms = CREATEMODE; @@ -1295,6 +1301,7 @@ static const char *cmd_audit_log_type(cmd_parms *cmd, void *_dcfg, return NULL; } +#ifdef WITH_YAJL static const char *cmd_audit_log_mode(cmd_parms *cmd, void *_dcfg, const char *p1) { @@ -1309,6 +1316,7 @@ static const char *cmd_audit_log_mode(cmd_parms *cmd, void *_dcfg, return NULL; } +#endif static const char *cmd_audit_log_dirmode(cmd_parms *cmd, void *_dcfg, const char *p1) @@ -3251,6 +3259,7 @@ const command_rec module_directives[] = { "whether to use the old audit log format (Serial) or new (Concurrent)" ), +#ifdef WITH_YAJL AP_INIT_TAKE1 ( "SecAuditLogFormat", cmd_audit_log_mode, @@ -3258,6 +3267,7 @@ const command_rec module_directives[] = { CMD_SCOPE_ANY, "whether to emit audit log data in native format or JSON" ), +#endif AP_INIT_TAKE1 ( "SecAuditLogStorageDir", diff --git a/apache2/modsecurity.h b/apache2/modsecurity.h index bc015f9345..228bea0b22 100644 --- a/apache2/modsecurity.h +++ b/apache2/modsecurity.h @@ -519,8 +519,10 @@ struct directory_config { /* AUDITLOG_SERIAL (single file) or AUDITLOG_CONCURRENT (multiple files) */ int auditlog_type; +#ifdef WITH_YAJL /* AUDITLOGFORMAT_NATIVE or AUDITLOGFORMAT_JSON */ int auditlog_format; +#endif /* Mode for audit log directories and files */ apr_fileperms_t auditlog_dirperms; diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 771b52b59e..6a03d8181e 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -25,8 +25,10 @@ #include "apr_version.h" #include +#ifdef WITH_YAJL #include #include "msc_logging_json.h" +#endif /** * Write the supplied data to the audit log (if the FD is ready), update @@ -384,6 +386,7 @@ static void sec_auditlog_write_producer_header(modsec_rec *msr) { sec_auditlog_write(msr, ".\n", 2); } +#ifdef WITH_YAJL /** * Ouput the Producer header into a JSON generator */ @@ -415,6 +418,7 @@ static void sec_auditlog_write_producer_header_json(modsec_rec *msr, yajl_gen g) yajl_gen_array_close(g); // array for producers is finished } +#endif /* * \brief This function will returns the next chain node @@ -515,6 +519,7 @@ static int chained_is_matched(modsec_rec *msr, const msre_rule *next_rule) { return 0; } +#ifdef WITH_YAJL /** * Write detailed information about performance metrics into a JSON generator */ @@ -1516,6 +1521,7 @@ void sec_audit_logger_json(modsec_rec *msr) { apr_file_write_full(msr->txcfg->auditlog2_fd, text, nbytes, &nbytes_written); } } +#endif /* * Produce an audit log entry in native format. @@ -2282,9 +2288,13 @@ void sec_audit_logger_native(modsec_rec *msr) { * Handler for audit log writers. */ void sec_audit_logger(modsec_rec *msr) { + #ifdef WITH_YAJL if (msr->txcfg->auditlog_format == AUDITLOGFORMAT_JSON) { sec_audit_logger_json(msr); } else { + #endif sec_audit_logger_native(msr); + #ifdef WITH_YAJL } + #endif } diff --git a/apache2/msc_logging.h b/apache2/msc_logging.h index 9b1b48853a..5378ddc659 100644 --- a/apache2/msc_logging.h +++ b/apache2/msc_logging.h @@ -22,8 +22,10 @@ #define AUDITLOG_SERIAL 0 #define AUDITLOG_CONCURRENT 1 +#ifdef WITH_YAJL #define AUDITLOGFORMAT_JSON 0 #define AUDITLOGFORMAT_NATIVE 1 +#endif #define AUDITLOG_PART_FIRST 'A' #define AUDITLOG_PART_HEADER 'A' From ddc25dbbaa7183c431059784c55b887da54f3785 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Tue, 10 Nov 2015 15:54:42 -0800 Subject: [PATCH 042/248] Fix 'is_chained' value for final rule in chain 'is_chained' should be true for an actionset when the is_chained member of the struct is true, or when its rule has a valid chain_starter member. --- apache2/msc_logging.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 6a03d8181e..ee7415da8d 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -600,7 +600,7 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) if (rule->actionset->phase != NOT_SET) { yajl_kv_int(g, "phase", rule->actionset->phase); } - yajl_kv_bool(g, "is_chained", rule->actionset->is_chained); + yajl_kv_bool(g, "is_chained", rule->actionset->is_chained || (rule->chain_starter != NULL)); if (rule->actionset->is_chained && (rule->chain_starter == NULL)) { yajl_kv_bool(g, "chain_starter", 1); } From 8f8645f3d64cfa1e2b52a0766f93fdc1f51e2e55 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Wed, 11 Nov 2015 08:17:59 -0800 Subject: [PATCH 043/248] Whitespace fix for pull request --- configure.ac | 2 ++ 1 file changed, 2 insertions(+) diff --git a/configure.ac b/configure.ac index b188cb11e4..7517885893 100644 --- a/configure.ac +++ b/configure.ac @@ -275,6 +275,7 @@ if test "$build_docs" -eq 1; then TOPLEVEL_SUBDIRS="$TOPLEVEL_SUBDIRS docs" fi + # Add PCRE Studying AC_ARG_ENABLE(pcre-study, @@ -659,6 +660,7 @@ else fi MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type" + APXS_WRAPPER=build/apxs-wrapper APXS_EXTRA_CFLAGS="" for f in $EXTRA_CFLAGS; do From 2307a8b55be9dbea740239d06c7446aad8a7c613 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Fri, 4 Dec 2015 16:43:24 -0800 Subject: [PATCH 044/248] Add JSON log parse script --- tools/parse_modsec.pl | 576 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 576 insertions(+) create mode 100755 tools/parse_modsec.pl diff --git a/tools/parse_modsec.pl b/tools/parse_modsec.pl new file mode 100755 index 0000000000..3c5db51ff6 --- /dev/null +++ b/tools/parse_modsec.pl @@ -0,0 +1,576 @@ +#!/usr/bin/perl + +use strict; +use warnings; + +use DateTime::Format::Strptime; +use Getopt::Long qw(:config no_ignore_case bundling); +use JSON; +use List::MoreUtils qw(any); +use NetAddr::IP; +use Try::Tiny; + +=pod + +=head1 NAME + +parse_modsec.pl + +=head1 SYNOPSIS + +Parse ModSecurity logs generated as JSON + +=head1 USAGE + +Usage: $0 [h] [Gtsrfdbalspj] + -H|--host Search rules based on the Host request header + -t|--transaction-id Search rules based on the unique transaction ID + -s|--source-ip Search rules based on the client IP address (can be presented as an address or CIDR block) + -r|--rule-id Search rules based on the rule ID + -f|--filter Define advanced filters to walk through JSON tree + -d|--delim Define a delimiter for advanced filters. Default is '.' + -b|--before Search rules before this timeframe + -a|--after Search rules after this timeframe + -l|--logpath Define a path to read JSON logs from. Default is '/var/log/modsec_audit.log' + -S|--stdin Read rules from stdin instead of an on-disk file + -p|--partial-chains Do not prune partial chain matches + -j|--json Print rule entries as a JSON blob, rather than nice formatting + -v|--verbose Be verbose about various details such as JSON parse failures + + +=head2 FILTERS + +ModSecurity JSON audit logs are written as a series of atomic JSON documents, as opposed to a single, monolithic structure. This program will read through all JSON documents provided, making certain assumptions about the structure of each document, and will print out relevent entries based on the parameters provided. Log entries can be filtered by key-value pairs; given a key at an arbitrary level in the document, test the value of the key against an expected expression. The best way to understand this is with examples (see EXAMPLES for further details). + +Filter values are treated as regular expressions. Each match is anchored by '^' and'$', meaning that values that do not contain PCRE metacharacters will essentially match by string equality. + +Filters can be used to search a specific key-pair value, or an array of values. Arrays containing sub-elements can also be traversed. Arrays are identified in a filter key expression through the use of the '%' metacharacter. See EXAMPLES for further discussion of filter key expression syntax. + +Multiple filters can be provided, and are used in a logical AND manner (that is, an entry must match all given filters). + + +=head2 FILTER EXAMPLES + +Examine the following entry: + + { + "foo": "bar", + "foo2": "bar2", + "qux": { + "quux": "corge", + "grault": "garply", + "wal.do": "fred" + }, + "baz": [ + "bat", + "bam", + "bif" + ], + "bal": [ + { "hello": "world" }, + { "how": "are" }, + { "you": "doing" } + ] + } + +A search for the top level key "foo" containing the value "bar" would look like: + + -f foo=bar + +However, the following will not result in the entry being matched: + + -f foo=bar2 + +This is because the value of "foo" in the JSON document does not match the regex "^bar2$" + +Searching sub-keys is possible by providing the traversal path as the filter key, separated by a delimiter. By default the delimiter is '.'. For example, to search the value of the "grault" subkey within the "qux" key: + + -f qux.grault= + +Search arrays is also possible with the use of the '%' metacharacter, which should be used in place of a key name in the filter expression. For example, to search through all the values in the "baz" top-level key: + + -f baz.%= + +Searching for specific keys that are live in an array is also possible. For example, to search for the value of the "hello" key within the top-level key "bal" array: + + -f bal.%.hello= + +If any key contains a period character (.), you can specify an alternative delimiter using the '-d' option. To search the "wal.do" key within "qux": + + -d @ quz@wal.do= + + +=head2 SHORTCUTS + +Quick searches of on-disk log files likely will be performed using simple queries. Rather than forcing users to write a filter for common parameters, we provide a few shortcuts as options. These shortcuts can be combined with additional filters for complex searches. Provided shortcuts (and the matching filter key expression) are listed below: + + Host: request.headers.Host + Transaction ID: transaction.transaction_id + Rule ID: matched_rules.%.rules.%.actionset.id + +Additionally, the '--source-ip' argument allows for searching rule entries based on the remote IP address. This option searches based on CIDR blocks, instead of the filter searching described above. + +=head2 TIMEFRAME + +Log entries can further be narrowed by time range. The --before and --after flags can be used to return only entries that returned before or after (or both) a given date and time. Values for these options can be provided by the following syntax: + + ^\d+[dDhHmM]?$ + +For example, to limit the search of entries to between one and 4 days ago: + + -a 4d -b 1d + +You may provide one, both, or neither of these flags. + + +=head2 USAGE EXAMPLES + +Print all log entries from the default log location: + + parse_modsec.pl + +Print entries matching a specific source IP: + + parse_modsec.pl -s 1.2.3.4 + +Print entries matching a source IP in a given subnet: + + parse_modsec.pl -s 1.2.3.0/24 + +Print entries matching a given host and all its sub domains: + + parse_modsec.pl -H .*example.com + +Print entries matching a specific rule ID, that occurred within the last 12 hours: + + parse_modsec.pl -r 123456 -a 12h + +Print entries matching a given rule ID, even if that ID was present in a partial chain: + + parse_modsec.pl -r 123456 -p + +Print entries that contain an HTTP status code 403 + + parse_modsec.pl -f response.status=403 + +Print entries that contain an HTTP GET request with a 'Content-Length' header + + parse_modsec.pl -f request.headers.Content-Length=.* -f request.request_line=GET.* + +=cut + +sub usage { + print <<"_EOF"; +Usage: $0 [h] [Gtsrfdbalspj] + -h|--help Print this help + -H|--host Search rules based on the Host request header + -t|--transaction-id Search rules based on the unique transaction ID + -s|--source-ip Search rules based on the client IP address (can be presented as an address or CIDR block) + -r|--rule-id Search rules based on the rule ID + -f|--filter Define advanced filters to walk through JSON tree + -d|--delim Define a delimiter for advanced filters. Default is '.' + -b|--before Search rules before this timeframe + -a|--after Search rules after this timeframe + -l|--logpath Define a path to read JSON logs from. Default is '/var/log/modsec_audit.log' + -S|--stdin Read rules from stdin instead of an on-disk file + -p|--partial-chains Do not prune partial chain matches + -j|--json Print rule entries as a JSON blob, rather than nice formatting + -v|--verbose Be verbose about various details such as JSON parse failures + + For detailed explanations of various options and example usages, see 'perldoc $0' + +_EOF + exit 1; +} + +# figure the number of seconds based on the command-line option +sub parse_duration { + my ($duration) = @_; + + if ($duration =~ /^(\d+)[dD]$/) { + return $1 * 60 * 60 * 24; + } elsif ($duration =~ /^(\d+)[hH]$/) { + return $1 * 60 * 60; + } elsif ($duration =~ /^(\d+)[mM]$/) { + return $1 * 60; + } elsif ($duration =~ /^(\d+)[sS]?$/) { + return $1; + } else { + die "Couldn't parse duration $duration!\n"; + } +} + +# build a DateTime representative of the past +sub build_datetime { + my ($duration) = @_; + + return if !$duration; + return DateTime->now()->subtract(seconds => parse_duration($duration)); +} + +# determine if the log entry occurred within the given timeframe +sub within_timeframe { + my ($args) = @_; + my $entry = $args->{entry}; + my $before = $args->{before}; + my $after = $args->{after}; + my $timestamp = parse_modsec_timestamp($entry->{transaction}->{time}); + + return (defined $before ? $timestamp < $before : 1) && + (defined $after ? $timestamp > $after : 1); +} + +# sigh... +sub parse_modsec_timestamp { + my ($input) = @_; + + my $format = '%d/%b/%Y:%H:%M:%S -%z'; + my $locale = 'en_US'; + + my $strp = DateTime::Format::Strptime->new( + pattern => $format, + locale => $locale, + ); + + return $strp->parse_datetime($input); +} + +# figure out if we're reading from a file or stdin +# return a file handle representation of our data +sub get_input { + my ($args) = @_; + my $logpath = $args->{logpath}; + my $stdin = $args->{stdin}; + my $fh; + + $stdin ? + $fh = *STDIN : + open $fh, '<', $logpath or die $!; + + return $fh; +} + +# figure if the target address/cidr contains the entry's remote address +sub cidr_match { + my ($args) = @_; + my $entry = $args->{entry}; + my $target = $args->{target}; + my $client_ip = $entry->{transaction}->{remote_address}; + + return $target ? $target->contains(NetAddr::IP->new($client_ip)) : 1; +} + +# given a file handle, return an arrayref representing pertinent rule entries +sub grok_input { + my ($args) = @_; + my $fh = $args->{fh}; + my $filters = $args->{filters}; + my $delim = $args->{delim}; + my $source_ip = $args->{source_ip}; + my $before = $args->{before}; + my $after = $args->{after}; + my $partial = $args->{partial}; + my $verbose = $args->{verbose}; + + my @ref; + + while (my $line = <$fh>) { + my $entry; + + try { + $entry = decode_json($line); + } catch { + warn "Could not decode as JSON:\n$line\n" if $verbose; + }; + + next if !$entry; + + skim_entry({ + entry => $entry, + partial => $partial, + }); + + next if !filter({ + filters => $filters, + data => $entry, + delim => $delim, + }); + + next if !cidr_match({ + entry => $entry, + target => $source_ip, + }); + + next if !within_timeframe({ + entry => $entry, + before => $before, + after => $after, + }); + + push @ref, $entry; + } + + return \@ref; +} + +# get rid of partial chains and other noise +sub skim_entry { + my ($args) = @_; + my $entry = $args->{entry}; + my $partial = $args->{partial}; + my $ctr = 0; + + for my $matched_rule (@{$entry->{matched_rules}}) { + splice @{$entry->{matched_rules}}, $ctr++, 1 + if $matched_rule->{chain} && !$matched_rule->{full_chain_match} && !$partial; + } +} + +# print entries after filtering and skimming +sub print_matches { + my ($args) = @_; + my $ref = $args->{ref}; + my $json = $args->{json}; + my $verbose = $args->{verbose}; + + for my $entry (@{$ref}) { + if ($json) { + print encode_json($entry) . "\n"; + } else { + printf "\n%s\n", '=' x 80; + + # request + my $transaction = $entry->{transaction}; + my $request = $entry->{request}; + + printf "%s\nTransaction ID: %s\nIP: %s\n\n%s\n", + parse_modsec_timestamp($transaction->{time}), + $transaction->{transaction_id}, + $transaction->{remote_address}, + $request->{request_line}; + + for my $header (sort keys %{$request->{headers}}) { + printf "%s: %s\n", $header, $request->{headers}->{$header}; + } + + # matched rules + for my $chain (@{$entry->{matched_rules}}) { + print "\n"; + my @extra_data; + my $ctr = 0; + + for my $rule (@{$chain->{rules}}) { + printf $rule->{is_matched} ? "%s%s\n" : "%s#%s\n", ' ' x $ctr++, $rule->{unparsed}; + push @extra_data, $rule->{actionset}->{msg} if $rule->{actionset}->{msg}; + push @extra_data, $rule->{actionset}->{logdata} if $rule->{actionset}->{logdata}; + } + + printf "\n-- %s\n", join "\n-- ", @extra_data if @extra_data && $verbose; + } + + # audit message + printf "\n-- %s\n\n", $entry->{audit_data}->{action}->{message} if $verbose; + + printf "%s\n", '=' x 80; + } + } +} + +# filter out rule entries based on given filter definitions +sub filter { + my ($args) = @_; + my $filters = $args->{filters}; + my $data = $args->{data}; + my $delim = $args->{delim}; + + my $valid_match = 1; + + for my $field (keys %{$filters}) { + my $args = { + field => $field, + match => $filters->{$field}, + delim => $delim, + hash => $data, + }; + + if (!match($args)) { + $valid_match = 0; + last; + } + } + return $valid_match; +} + +# match a hash element (may be an array of elements) against a given pattern +sub match { + my ($args) = @_; + my $delim = $args->{delim}; + my $hash = $args->{hash}; + my $match = $args->{match}; + my $field = $args->{field}; + + my @matches = traverse($args); + + return any { $_ =~ m/^$match$/ } @matches; +} + +# walk a JSON structure in search of a given key +# borrowed and butchered from view_signatures.pl +sub traverse { + my ($args) = @_; + my $delim = $args->{delim}; + my $hash = $args->{hash}; + my $match = $args->{match}; + my $field = $args->{field}; + my @traverse = split /\Q$delim\E/, $field; + + my @values; + + while (my $level = shift @traverse) { + if ($level eq '%') { + # match() is called in a list context + # so if we have a bad filter expression + # we need to bail in a sensible way + return () if ref $hash ne 'ARRAY'; + + for my $subhash (@{$hash}) { + my @match = traverse({ + hash => $subhash, + delim => $delim, + match => $match, + field => join $delim, @traverse, + }); + push(@values, @match) if @match; + } + } elsif (ref $hash eq 'HASH' && defined $hash->{$level}) { + $hash = $hash->{$level}; + } else { + $hash = undef; + last; + } + } + + push @values, $hash if defined $hash; + return ref $hash eq 'ARRAY' ? @{$hash} : @values; +} + +# merge any custom-defined filters with shortcut options +sub merge_filters { + my ($args) = @_; + my $filters = $args->{filters}; + my $delim = $args->{delim}; + + my $lookup = { + host => [qw(request headers Host)], + transaction_id => [qw(transaction transaction_id)], + rule_id => [qw(matched_rules % rules % actionset id)] + }; + + for my $field (keys %{$lookup}) { + if (defined $args->{$field}) { + my $key = build_filter_key({ + elements => $lookup->{$field}, + delim => $delim, + }); + + $filters->{$key} = $args->{$field}; + } + } +} + +# stub sub to build a filter key +sub build_filter_key { + my ($args) = @_; + my $elements = $args->{elements}; + my $delim = $args->{delim}; + + return join $delim, @{$elements}; +} + +sub main { + my ( + $host, $transaction_id, # shortcuts + $source_ip, $rule_id, # shortcuts + %filters, $delim, # used by filters/match/traverse to grok the input + $before, $after, # timeframe + $logpath, $stdin, # input + $partial_chains, $json, # output + $verbose, # output + $fh, $parsed_ref, # data structures + ); + + GetOptions( + 'h|help' => sub { usage(); }, + 'H|host=s' => \$host, + 't|transaction-id=s' => \$transaction_id, + 's|source-ip=s' => \$source_ip, + 'r|rule-id=i' => \$rule_id, + 'f|filter=s' => \%filters, + 'd|delim=s' => \$delim, + 'b|before=s' => \$before, + 'a|after=s' => \$after, + 'l|logpath=s' => \$logpath, + 'S|stdin' => \$stdin, + 'p|partial-chains' => \$partial_chains, + 'j|json' => \$json, + 'v|verbose' => \$verbose, + ) or usage(); + + # sanity checks + die "Cannot parse both a file and stdin\n" + if defined $logpath && defined $stdin; + + if (defined $source_ip) { + $source_ip = NetAddr::IP->new($source_ip); + die "Invalid IP/CIDR provided for source IP argument\n" + unless $source_ip; + } + + # build_datetime will bail out if an invalid format was given + $before = build_datetime($before); + $after = build_datetime($after); + + # figure where we're reading from + $logpath ||= '/var/log/modsec_audit.log'; + $fh = get_input({ + logpath => $logpath, + stdin => $stdin, + }); + + die "Could not get a handle on your data\n" + unless $fh; + + # build the filters by merging shortcut options with custom filter directives + $delim ||= '.'; + merge_filters({ + filters => \%filters, + host => $host, + transaction_id => $transaction_id, + source_ip => $source_ip, + rule_id => $rule_id, + delim => $delim, + }); + + # walk through our input, getting an arrayref of entries valid based on filters and timeframe + $parsed_ref = grok_input({ + fh => $fh, + filters => \%filters, + delim => $delim, + source_ip => $source_ip, + before => $before, + after => $after, + partial => $partial_chains, + verbose => $verbose, + }); + + close $fh || warn $!; + + # show me the money! + print_matches({ + ref => $parsed_ref, + json => $json, + verbose => $verbose, + }); +} + +main(); From 374871e10e38ec54f0860ba6bd464ccb4e3b239d Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Mon, 7 Dec 2015 22:15:28 -0800 Subject: [PATCH 045/248] Updates to parse_modsec.pl --- tools/parse_modsec.pl | 69 ++++++++++++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 21 deletions(-) diff --git a/tools/parse_modsec.pl b/tools/parse_modsec.pl index 3c5db51ff6..8c3b043e95 100755 --- a/tools/parse_modsec.pl +++ b/tools/parse_modsec.pl @@ -22,7 +22,7 @@ =head1 SYNOPSIS =head1 USAGE -Usage: $0 [h] [Gtsrfdbalspj] +Usage: $0 [h] [Htsrfdbalspjv] -H|--host Search rules based on the Host request header -t|--transaction-id Search rules based on the unique transaction ID -s|--source-ip Search rules based on the client IP address (can be presented as an address or CIDR block) @@ -35,7 +35,7 @@ =head1 USAGE -S|--stdin Read rules from stdin instead of an on-disk file -p|--partial-chains Do not prune partial chain matches -j|--json Print rule entries as a JSON blob, rather than nice formatting - -v|--verbose Be verbose about various details such as JSON parse failures + -v|--verbose Be verbose about various details such as JSON parse failures and log data =head2 FILTERS @@ -129,6 +129,10 @@ =head2 USAGE EXAMPLES parse_modsec.pl +Print all log entries and show more detailed information, such as response data and matched rule details + + parse_modsec.pl -v + Print entries matching a specific source IP: parse_modsec.pl -s 1.2.3.4 @@ -161,7 +165,7 @@ =head2 USAGE EXAMPLES sub usage { print <<"_EOF"; -Usage: $0 [h] [Gtsrfdbalspj] +Usage: $0 [h] [Htsrfdbalspjv] -h|--help Print this help -H|--host Search rules based on the Host request header -t|--transaction-id Search rules based on the unique transaction ID @@ -175,7 +179,7 @@ sub usage { -S|--stdin Read rules from stdin instead of an on-disk file -p|--partial-chains Do not prune partial chain matches -j|--json Print rule entries as a JSON blob, rather than nice formatting - -v|--verbose Be verbose about various details such as JSON parse failures + -v|--verbose Be verbose about various details such as JSON parse failures and log data For detailed explanations of various options and example usages, see 'perldoc $0' @@ -339,22 +343,44 @@ sub print_matches { } else { printf "\n%s\n", '=' x 80; - # request - my $transaction = $entry->{transaction}; - my $request = $entry->{request}; + my $transaction = $entry->{transaction}; + my $request = $entry->{request}; + my $response = $entry->{response}; + my $audit_data = $entry->{audit_data}; + my $matched_rules = $entry->{matched_rules}; + + if ($transaction) { + printf "%s\nTransaction ID: %s\nIP: %s\n\n", + parse_modsec_timestamp($transaction->{time}), + $transaction->{transaction_id}, + $transaction->{remote_address}; + } + + printf "%s\n", $request->{request_line} + if $request->{request_line}; + + if ($request->{headers}) { + for my $header (sort keys %{$request->{headers}}) { + printf "%s: %s\n", $header, $request->{headers}->{$header}; + } + } + + if ($verbose) { + print join ("\n", @{$request->{body}}) . "\n" + if $request->{body}; - printf "%s\nTransaction ID: %s\nIP: %s\n\n%s\n", - parse_modsec_timestamp($transaction->{time}), - $transaction->{transaction_id}, - $transaction->{remote_address}, - $request->{request_line}; + printf "\n%s %s\n", $response->{protocol}, $response->{status} + if $response->{protocol} && $response->{status}; + + for my $header (sort keys %{$response->{headers}}) { + printf "%s: %s\n", $header, $response->{headers}->{$header}; + } - for my $header (sort keys %{$request->{headers}}) { - printf "%s: %s\n", $header, $request->{headers}->{$header}; + printf "\n%s\n", $response->{body} + if $response->{body}; } - # matched rules - for my $chain (@{$entry->{matched_rules}}) { + for my $chain (@{$matched_rules}) { print "\n"; my @extra_data; my $ctr = 0; @@ -365,11 +391,12 @@ sub print_matches { push @extra_data, $rule->{actionset}->{logdata} if $rule->{actionset}->{logdata}; } - printf "\n-- %s\n", join "\n-- ", @extra_data if @extra_data && $verbose; + printf "\n-- %s\n", join "\n-- ", @extra_data + if @extra_data && $verbose; } - # audit message - printf "\n-- %s\n\n", $entry->{audit_data}->{action}->{message} if $verbose; + printf "\n-- %s\n\n", $audit_data->{action}->{message} + if $audit_data->{action}->{message} && $verbose; printf "%s\n", '=' x 80; } @@ -531,7 +558,7 @@ sub main { $after = build_datetime($after); # figure where we're reading from - $logpath ||= '/var/log/modsec_audit.log'; + $logpath ||= '/var/log/mod_sec/modsec_audit.log'; $fh = get_input({ logpath => $logpath, stdin => $stdin, @@ -551,7 +578,7 @@ sub main { delim => $delim, }); - # walk through our input, getting an arrayref of entries valid based on filters and timeframe + # walk through our input, getting an arrayref of valid entries based on filters and timeframe $parsed_ref = grok_input({ fh => $fh, filters => \%filters, From c131dcc93c0ad905b6759a0c48a802ca3af47ea6 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Fri, 29 Jan 2016 13:27:30 -0300 Subject: [PATCH 046/248] Adds information about the pull request #914 on the CHANGES file --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index fc30e44b3f..b0aaf34af4 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD mmm YYYY - X.Y.Z (To be released) ------------------------------------ + * Added support to generate audit logs in JSON format. + [Issue #914, #897, #656 - Robert Paprocki] * Creating AuditLog serial file (or paralel index) respecting the permission configured with SecAuditLogFileMode, before, it was used only to save the transactions while in paralel mode. From eef2c03e644059ef26c007cd9d929df3f13d4fb7 Mon Sep 17 00:00:00 2001 From: Chaim Sanders Date: Mon, 1 Feb 2016 11:16:13 -0500 Subject: [PATCH 047/248] Fixed broken link in readme #1059 --- README.TXT | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.TXT b/README.TXT index 9442e83a2f..03767e345f 100644 --- a/README.TXT +++ b/README.TXT @@ -100,7 +100,7 @@ vulnerability classes for applications such as: * Joomla * For a complete listing of application coverage, please refer to this link (which is updated daily). -https://modsecurity.org/projects/commercial/rules/application_coverage.html +https://modsecurity.org/application_coverage.html 3. Complements and integrates with the OWASP Core Rule Set 4. IP Reputation capabilities which provide protection against malicious From a157ac2946b9d66873e8ddacdb76f6dfb3f140ac Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 3 Feb 2016 08:08:21 -0300 Subject: [PATCH 048/248] Fix compilation issue on "pedantic" compilers --- apache2/msc_logging.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index ee7415da8d..66394adab2 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -571,12 +571,13 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) yajl_kv_string(g, "version", log_escape(msr->mp, rule->actionset->version)); } if (rule->actionset->logdata) { + char *logdata = NULL; msc_string *var = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string)); var->value = (char *)rule->actionset->logdata; var->value_len = strlen(rule->actionset->logdata); expand_macros(msr, var, NULL, msr->mp); - char *logdata = apr_pstrdup(msr->mp, log_escape_hex(msr->mp, (unsigned char *)var->value, var->value_len)); + logdata = apr_pstrdup(msr->mp, log_escape_hex(msr->mp, (unsigned char *)var->value, var->value_len)); // if it is > 512 bytes, then truncate at 512 with ellipsis. if (strlen(logdata) > 515) { @@ -611,6 +612,7 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) for (k = 0; k < tarr->nelts; k++) { msre_action *action = (msre_action *)telts[k].val; if (strcmp(telts[k].key, "tag") == 0) { + msc_string *var = NULL; if (been_opened == 0) { yajl_string(g, "tags"); yajl_gen_array_open(g); @@ -618,7 +620,7 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) } // expand variables in the tag - msc_string *var = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string)); + var = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string)); var->value = (char *)action->param; var->value_len = strlen(action->param); expand_macros(msr, var, NULL, msr->mp); @@ -676,6 +678,9 @@ void sec_audit_logger_json(modsec_rec *msr) { int arg_min, arg_max, sanitize_matched; yajl_gen g; int been_opened = 0; // helper flag for conditionally opening maps + const unsigned char *final_buf; + size_t len; + /* Return silently if we don't have a request line. This * means we will not be logging request timeouts. @@ -1437,8 +1442,6 @@ void sec_audit_logger_json(modsec_rec *msr) { /* finished building JSON */ yajl_gen_map_close(g); // box it up! - const unsigned char *final_buf; - size_t len; yajl_gen_get_buf(g, &final_buf, &len); sec_auditlog_write(msr, final_buf, len); From ad9257c374079b3fc0202178032db2d03bc6b761 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 3 Feb 2016 11:03:00 -0300 Subject: [PATCH 049/248] Version 2.9.1 Increasing version to 2.9.1 and performed small fixes on the CHANGES file --- CHANGES | 24 +++++++++++------------- apache2/msc_release.h | 4 ++-- iis/installer.wxs | 2 +- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/CHANGES b/CHANGES index b0aaf34af4..263d04abe0 100644 --- a/CHANGES +++ b/CHANGES @@ -1,34 +1,32 @@ -DD mmm YYYY - X.Y.Z (To be released) ------------------------------------- +03 Feb 2016 - 2.9.1-RC1 +----------------------- * Added support to generate audit logs in JSON format. [Issue #914, #897, #656 - Robert Paprocki] - * Creating AuditLog serial file (or paralel index) respecting the - permission configured with SecAuditLogFileMode, before, it was used - only to save the transactions while in paralel mode. + * Creating AuditLog serial file (or parallel index) respecting the + permission configured with SecAuditLogFileMode. Previously, it was + used only to save the transactions while in parallel mode. [Issue #852 - @littlecho and ModSecurity team] - * Checking for hashing injection response to report in case of failure. + * Checking for hashing injection response, to report in case of failure. [Issue #1041 - ModSecurity team] * Stop buffering when the request is larger than SecRequestBodyLimit in ProcessPartial mode [Issue #709, #705, #728 - Justin Gerace and ModSecurity team] - * Extanded Lua support to include version 5.3 + * Extended Lua support to include version 5.3 [Issue #837, #762, #814 - Athmane Madjoudj and ModSecurity team] - * mlogc: Allow user to choose between TLS versions (TLSProtocol option + * mlogc: Allows user to choose between TLS versions (TLSProtocol option introduced). [Issue #881 - Ishwor Gurung] - * Allow mod_proxy's "nocanon" behavior to be specified in proxy actions - and Perform the intercept_action as well as the disruptive actions. + * Allows mod_proxy's "nocanon" behavior to be specified in proxy actions [Issue #1031, #961, #763 - Mario D. Santana and ModSecurity team] - * Refactoring conditional directives for if wrappers, alternative if - statements and incomplete if conditions. + * Refactoring conditional #if/#defs directives. [Issue #996 - Wesley M and ModSecurity team] * mlogc-batch-load.pl.in: fix searching SecAuditLogStorageDir files with Apache 2.4 [Issue #775 - Elia Pinto] * Understands IIS 10 as compatible on Windows installer. [Issue #931 - Anton Serbulov, Pavel Vasilevich and ModSecurity team] - * Fix apache logging limitation by using correct apache call. + * Fix apache logging limitation by using correct Apache call. [Issue #840 - Christian Folini] * Fix apr_crypto.h check on 32-bit Linux platform [Issue #882, #883 - Kurt Newman] diff --git a/apache2/msc_release.h b/apache2/msc_release.h index a34578b663..c85b3dbcfd 100644 --- a/apache2/msc_release.h +++ b/apache2/msc_release.h @@ -38,9 +38,9 @@ #define MODSEC_VERSION_MAJOR "2" #define MODSEC_VERSION_MINOR "9" -#define MODSEC_VERSION_MAINT "0" +#define MODSEC_VERSION_MAINT "1" #define MODSEC_VERSION_TYPE "" -#define MODSEC_VERSION_RELEASE "" +#define MODSEC_VERSION_RELEASE "-RC1" #define MODSEC_VERSION_SUFFIX MODSEC_VERSION_TYPE MODSEC_VERSION_RELEASE diff --git a/iis/installer.wxs b/iis/installer.wxs index 49d2e5e42a..b0e2644583 100644 --- a/iis/installer.wxs +++ b/iis/installer.wxs @@ -7,7 +7,7 @@ lightArgs: --> - + From 88bffb1e3eda8cc5e24965c9e06dec15f91be11a Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 9 Mar 2016 10:07:44 -0300 Subject: [PATCH 050/248] Version 2.9.1 (final) Increasing version to 2.9.1 (final) --- CHANGES | 5 +++++ apache2/msc_release.h | 2 +- iis/installer.wxs | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGES b/CHANGES index 263d04abe0..6ca4b4feb6 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,8 @@ +09 Mar 2016 - 2.9.1 +------------------- + + * No changes. + 03 Feb 2016 - 2.9.1-RC1 ----------------------- diff --git a/apache2/msc_release.h b/apache2/msc_release.h index c85b3dbcfd..f2fe898d5d 100644 --- a/apache2/msc_release.h +++ b/apache2/msc_release.h @@ -40,7 +40,7 @@ #define MODSEC_VERSION_MINOR "9" #define MODSEC_VERSION_MAINT "1" #define MODSEC_VERSION_TYPE "" -#define MODSEC_VERSION_RELEASE "-RC1" +#define MODSEC_VERSION_RELEASE "" #define MODSEC_VERSION_SUFFIX MODSEC_VERSION_TYPE MODSEC_VERSION_RELEASE diff --git a/iis/installer.wxs b/iis/installer.wxs index b0e2644583..015c9351f8 100644 --- a/iis/installer.wxs +++ b/iis/installer.wxs @@ -7,7 +7,7 @@ lightArgs: --> - + From f9c253952c564ffefd6c10b8327ef836832facea Mon Sep 17 00:00:00 2001 From: root Date: Fri, 29 Jan 2016 08:43:28 +0000 Subject: [PATCH 051/248] This is fix for reborn of https://github.com/SpiderLabs/ModSecurity/issues/334 This bug has been reborn, because Apache (at least in RedHat/CentOS) since version 2.2.15-47 returns in same case APR_INCOMPLETE (not APR_EOF). Based on same patch I have added handler for APR_INCOMPLETE. --- apache2/apache2_io.c | 3 +++ apache2/mod_security2.c | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/apache2/apache2_io.c b/apache2/apache2_io.c index 0d59613e4d..c14dd41318 100644 --- a/apache2/apache2_io.c +++ b/apache2/apache2_io.c @@ -208,6 +208,9 @@ apr_status_t read_request_body(modsec_rec *msr, char **error_msg) { * too large and APR_EGENERAL when the client disconnects. */ switch(rc) { + case APR_INCOMPLETE : + *error_msg = apr_psprintf(msr->mp, "Error reading request body: %s", get_apr_error(msr->mp, rc)); + return -7; case APR_EOF : *error_msg = apr_psprintf(msr->mp, "Error reading request body: %s", get_apr_error(msr->mp, rc)); return -6; diff --git a/apache2/mod_security2.c b/apache2/mod_security2.c index 5404fd8744..14e9f44234 100644 --- a/apache2/mod_security2.c +++ b/apache2/mod_security2.c @@ -1030,6 +1030,13 @@ static int hook_request_late(request_rec *r) { r->connection->keepalive = AP_CONN_CLOSE; return HTTP_BAD_REQUEST; break; + case -7 : /* Partial recieved */ + if (my_error_msg != NULL) { + msr_log(msr, 4, "%s", my_error_msg); + } + r->connection->keepalive = AP_CONN_CLOSE; + return HTTP_BAD_REQUEST; + break; default : /* allow through */ break; From 808ea48263f2123ea7b8d56678b16ea42855cfb3 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 16 Mar 2016 10:37:01 -0300 Subject: [PATCH 052/248] Adds information about the pull request #1060 on the CHANGES file --- CHANGES | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES b/CHANGES index 6ca4b4feb6..f4cb22fe97 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,9 @@ +DD MMM YYYY - 2.9.2 - To be released +------------------------------------ + + * Treat APR_INCOMPLETE as APR_EOF while receiving the request body. + [Issue #1060, #334 - Alexey Sintsov] + 09 Mar 2016 - 2.9.1 ------------------- From c729b6d0ab5c377b983fe58f10940ba2a4069b0a Mon Sep 17 00:00:00 2001 From: Thomas Deutschmann Date: Sat, 11 Jun 2016 16:18:17 +0200 Subject: [PATCH 053/248] configure: Fix detection whether libcurl is linked against gnutls The find_curl macro is also checking whether libcurl is linked against gnutls. However the check depends on "CURL_LIBS" which wasn't defined by the macro. This commit will define "CURL_LIBS" so that the check works as expected. --- build/find_curl.m4 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/build/find_curl.m4 b/build/find_curl.m4 index 6b23ad696e..3310e40494 100644 --- a/build/find_curl.m4 +++ b/build/find_curl.m4 @@ -2,6 +2,7 @@ dnl Check for CURL Libraries dnl CHECK_CURL(ACTION-IF-FOUND [, ACTION-IF-NOT-FOUND]) dnl Sets: dnl CURL_CFLAGS +dnl CURL_LDADD dnl CURL_LIBS CURL_CONFIG="" @@ -57,7 +58,8 @@ if test -n "${curl_path}"; then if test "$verbose_output" -eq 1; then AC_MSG_NOTICE(curl VERSION: $CURL_VERSION); fi CURL_CFLAGS="`${CURL_CONFIG} --cflags`" if test "$verbose_output" -eq 1; then AC_MSG_NOTICE(curl CFLAGS: $CURL_CFLAGS); fi - CURL_LDADD="`${CURL_CONFIG} --libs`" + CURL_LIBS="`${CURL_CONFIG} --libs`" + CURL_LDADD="${CURL_LIBS}" if test "$verbose_output" -eq 1; then AC_MSG_NOTICE(curl LDADD: $CURL_LIBS); fi dnl # Check version is ok From 692712cc953bb567c4b7facb5a367b3cd0e3495a Mon Sep 17 00:00:00 2001 From: Thomas Deutschmann Date: Sat, 11 Jun 2016 16:48:58 +0200 Subject: [PATCH 054/248] configure: Move verbose_output declaration up to the beginning Macros like "find_curl" are using "verbose_output" variable but because some of them are called before we define the variable we are seeing errors like ./configure: line 13855: test: : integer expression expected This commit will fix the problem by moving the "verbose_output" declaration up to the beginning so that the variable is available for every macro. --- configure.ac | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/configure.ac b/configure.ac index 7517885893..0f32b01bf2 100644 --- a/configure.ac +++ b/configure.ac @@ -70,6 +70,22 @@ AC_SUBST(MSC_REGRESSION_DOCROOT_DIR) ### Configure Options +# Verbose output +AC_ARG_ENABLE(verbose-output, + AS_HELP_STRING([--enable-verbose-output], + [Enable more verbose configure output.]), +[ + if test "$enableval" != "no"; then + verbose_output=1 + else + verbose_output=0 + fi +], +[ + verbose_output=0 +]) + + #OS type AC_CANONICAL_HOST @@ -410,20 +426,6 @@ AC_ARG_ENABLE(errors, report_errors=1 ]) -# Verbose output -AC_ARG_ENABLE(verbose-output, - AS_HELP_STRING([--enable-verbose-output], - [Enable more verbose configure output.]), -[ - if test "$enableval" != "no"; then - verbose_output=1 - else - verbose_output=0 - fi -], -[ - verbose_output=0 -]) # Strict Compile AC_ARG_ENABLE(strict-compile, From a2bb610d7c6909cf8c8de259e206c00342eb6c7e Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Tue, 14 Jun 2016 15:19:00 -0300 Subject: [PATCH 055/248] Adds information about #1158 --- CHANGES | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES b/CHANGES index f4cb22fe97..c05d36ff32 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * configure: Fix detection whether libcurl is linked against gnutls and, + move verbose_output declaration up to the beginning. + [Issue #1158 - Thomas Deutschmann (@Whissi)] * Treat APR_INCOMPLETE as APR_EOF while receiving the request body. [Issue #1060, #334 - Alexey Sintsov] From f2ef2017f148fc78cb50c621b2bde9b1aff7a64d Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Wed, 29 Jun 2016 11:38:22 -0700 Subject: [PATCH 056/248] Fix file upload JSON audit log entry Each uploaded file is a separate yajl array, but we forgot to open the a map for the proper k/v pairs. This fixes issue #1173. --- apache2/msc_logging.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 66394adab2..2aff589b3f 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -1364,10 +1364,12 @@ void sec_audit_logger_json(modsec_rec *msr) { for(cfiles = 0; cfiles < msr->mpd->parts->nelts; cfiles++) { if (parts[cfiles]->type == MULTIPART_FILE) { if(parts[cfiles]->filename != NULL) { + yajl_gen_map_open(g); yajl_kv_int(g, "file_size", parts[cfiles]->tmp_file_size); yajl_kv_string(g, "file_name", log_escape(msr->mp, parts[cfiles]->filename)); yajl_kv_string(g, "content_type", parts[cfiles]->content_type ? parts[cfiles]->content_type : ""); total_size += parts[cfiles]->tmp_file_size; + yajl_gen_map_close(g); } } } From 2538d90e5f8901fb9348311308bc7910a112c523 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 11 Jul 2016 12:17:31 -0300 Subject: [PATCH 057/248] Adds information about pull request #1181 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index c05d36ff32..ffe9cd4772 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Fix file upload JSON audit log entry + [Issue #1181 and #1173 - Robert Paprocki and Christian Folini] * configure: Fix detection whether libcurl is linked against gnutls and, move verbose_output declaration up to the beginning. [Issue #1158 - Thomas Deutschmann (@Whissi)] From 947cef7c8c4cbb57462d740bcf5c980d8d4460a6 Mon Sep 17 00:00:00 2001 From: Chaim sanders Date: Wed, 22 Jun 2016 19:23:13 -0400 Subject: [PATCH 058/248] Adapted patch from 977 to fix status failing to report in Nginx auditlogs --- apache2/msc_logging.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 2aff589b3f..608f31c09b 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -1897,7 +1897,7 @@ void sec_audit_logger_native(modsec_rec *msr) { /* There are no response headers (or the status line) in HTTP 0.9 */ if (msr->response_headers_sent) { - if (msr->status_line != NULL) { + if (msr->status_line != NULL && msr->status_line[0] != '\0') { text = apr_psprintf(msr->mp, "%s %s\n", msr->response_protocol, msr->status_line); } else { From 923c3c67938da4de4f7f147816b6d2d6ffff5e6f Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 11 Jul 2016 13:36:16 -0300 Subject: [PATCH 059/248] Adds information about pull request #1171 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index ffe9cd4772..67fdbfbdf8 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Fix status failing to report in Nginx auditlogs + [Issue #977, #1171 - @charlymps and Chaim Sanders] * Fix file upload JSON audit log entry [Issue #1181 and #1173 - Robert Paprocki and Christian Folini] * configure: Fix detection whether libcurl is linked against gnutls and, From 21a63cb83e39c660015c1aac5f7f283faff6e07a Mon Sep 17 00:00:00 2001 From: Ephraim Vider Date: Sun, 28 Aug 2016 17:58:59 +0300 Subject: [PATCH 060/248] json parser handle cleanup --- apache2/msc_json.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/apache2/msc_json.c b/apache2/msc_json.c index 4bec0f14df..0f9a4645e2 100644 --- a/apache2/msc_json.c +++ b/apache2/msc_json.c @@ -306,10 +306,14 @@ int json_complete(modsec_rec *msr, char **error_msg) { } /** - * Frees the resources used for XML parsing. + * Frees the resources used for JSON parsing. */ apr_status_t json_cleanup(modsec_rec *msr) { msr_log(msr, 4, "JSON: Cleaning up JSON results"); + if (msr->json->handle != NULL) { + yajl_free(msr->json->handle); + msr->json->handle = NULL; + } return 1; } From 5f4a098f08150669ba15a5249f4d49f41cf1856d Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 21 Sep 2016 00:05:13 -0300 Subject: [PATCH 061/248] Adds information about pull request #1204 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 67fdbfbdf8..52cf5d9f3e 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Better handle the json parser cleanup + [Issue #1204 - Ephraim Vider] * Fix status failing to report in Nginx auditlogs [Issue #977, #1171 - @charlymps and Chaim Sanders] * Fix file upload JSON audit log entry From 2b4ece14c67d84220fab56cf37397884707fec36 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Tue, 19 Jul 2016 09:08:58 -0700 Subject: [PATCH 062/248] Remove logdata and msg fields from JSON audit log rule elements Writing macro-expanded strings to JSON elements during the post-logging phase can be misleading, because it's possible that variable contents (such as MATCHED_VAR) could have changed after the rule match, altering their expected contents. Writing macro-epanded audit data really only makes sense when the macros are expanded immediately following the rule match. See issue #1174 for more details. --- apache2/msc_logging.c | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 608f31c09b..0fe74ab7ca 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -559,36 +559,9 @@ static void write_rule_json(modsec_rec *msr, const msre_rule *rule, yajl_gen g) if (rule->actionset->rev) { yajl_kv_string(g, "rev", log_escape(msr->mp, rule->actionset->rev)); } - if (rule->actionset->msg) { - msc_string *var = (msc_string *)apr_palloc(msr->mp, sizeof(msc_string)); - var->value = (char *)rule->actionset->msg; - var->value_len = strlen(rule->actionset->msg); - expand_macros(msr, var, NULL, msr->mp); - - yajl_kv_string(g, "msg", log_escape_ex(msr->mp, var->value, var->value_len)); - } if (rule->actionset->version) { yajl_kv_string(g, "version", log_escape(msr->mp, rule->actionset->version)); } - if (rule->actionset->logdata) { - char *logdata = NULL; - msc_string *var = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string)); - var->value = (char *)rule->actionset->logdata; - var->value_len = strlen(rule->actionset->logdata); - expand_macros(msr, var, NULL, msr->mp); - - logdata = apr_pstrdup(msr->mp, log_escape_hex(msr->mp, (unsigned char *)var->value, var->value_len)); - - // if it is > 512 bytes, then truncate at 512 with ellipsis. - if (strlen(logdata) > 515) { - logdata[512] = '.'; - logdata[513] = '.'; - logdata[514] = '.'; - logdata[515] = '\0'; - } - - yajl_kv_string(g, "logdata", logdata); - } if (rule->actionset->severity != NOT_SET) { yajl_kv_int(g, "severity", rule->actionset->severity); } From b1ee051cee813856f1f7cfcb7a747dcab3008864 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Tue, 4 Oct 2016 09:41:16 -0300 Subject: [PATCH 063/248] Adds information about pull request #1190 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 52cf5d9f3e..3dbd737602 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Remove logdata and msg fields from JSON audit log rule. + [Issue #1190 and #1174 - Robert Paprocki] * Better handle the json parser cleanup [Issue #1204 - Ephraim Vider] * Fix status failing to report in Nginx auditlogs From b83c1109adb709b7e93089805aa7e5647c39c0ee Mon Sep 17 00:00:00 2001 From: culyerr Date: Tue, 27 Sep 2016 17:09:18 +0100 Subject: [PATCH 064/248] Fixed IPv4+Port address parsing --- iis/mymodule.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/iis/mymodule.cpp b/iis/mymodule.cpp index 464968cba9..607fdf0a6d 100644 --- a/iis/mymodule.cpp +++ b/iis/mymodule.cpp @@ -88,6 +88,10 @@ class REQUEST_STORED_CONTEXT : public IHttpStoredContext char *GetIpAddr(apr_pool_t *pool, PSOCKADDR pAddr) { + const char *format = "%15[0-9.]:%5[0-9]"; + char ip[16] = { 0 }; // ip4 addresses have max len 15 + char port[6] = { 0 }; // port numbers are 16bit, ie 5 digits max + DWORD len = 50; char *buf = (char *)apr_palloc(pool, len); @@ -98,6 +102,14 @@ char *GetIpAddr(apr_pool_t *pool, PSOCKADDR pAddr) WSAAddressToString(pAddr, sizeof(SOCKADDR), NULL, buf, &len); + // test for IPV4 with port on the end + if (sscanf(buf, format, ip, port) == 2) { + // IPV4 but with port - remove the port + char* input = ":"; + char* ipv4 = strtok(buf, input); + return ipv4; + } + return buf; } From 137331748c60751a27674692630989b5eaafae94 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Tue, 4 Oct 2016 13:26:43 -0300 Subject: [PATCH 065/248] Adds information about pull request #1220 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 3dbd737602..f906921edd 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Remove port from IPV4 address when running under IIS. + [Issue #1220, #1109 and #734 - Robert Culyer] * Remove logdata and msg fields from JSON audit log rule. [Issue #1190 and #1174 - Robert Paprocki] * Better handle the json parser cleanup From e7f029b55aa11a4ac4536adf6003dc127bee4582 Mon Sep 17 00:00:00 2001 From: arminabf Date: Mon, 26 Sep 2016 12:03:50 +0200 Subject: [PATCH 066/248] fix error message both info->format and fmt (for versions prio 2.4) contain the error message format but not the actual formatted error message --- apache2/mod_security2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apache2/mod_security2.c b/apache2/mod_security2.c index 14e9f44234..ffb450b4c3 100644 --- a/apache2/mod_security2.c +++ b/apache2/mod_security2.c @@ -1144,13 +1144,13 @@ static void hook_error_log(const char *file, int line, int level, apr_status_t s em->line = info->line; em->level = info->level; em->status = info->status; - if (info->format != NULL) em->message = apr_pstrdup(msr->mp, info->format); + em->message = apr_pstrdup(msr->mp, errstr); #else if (file != NULL) em->file = apr_pstrdup(msr->mp, file); em->line = line; em->level = level; em->status = status; - if (fmt != NULL) em->message = apr_pstrdup(msr->mp, fmt); + em->message = apr_pstrdup(msr->mp, errstr); #endif /* Remove \n from the end of the message */ if (em->message != NULL) { From fb3bbf37e806de10641f3de37b25e410be00067a Mon Sep 17 00:00:00 2001 From: arminabf Date: Mon, 26 Sep 2016 13:58:31 +0200 Subject: [PATCH 067/248] revert error message assignment for older versions as errstr is only available since version > 2.2 --- apache2/mod_security2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apache2/mod_security2.c b/apache2/mod_security2.c index ffb450b4c3..93c2fbe492 100644 --- a/apache2/mod_security2.c +++ b/apache2/mod_security2.c @@ -1150,7 +1150,7 @@ static void hook_error_log(const char *file, int line, int level, apr_status_t s em->line = line; em->level = level; em->status = status; - em->message = apr_pstrdup(msr->mp, errstr); + if (fmt != NULL) em->message = apr_pstrdup(msr->mp, fmt); #endif /* Remove \n from the end of the message */ if (em->message != NULL) { From 8559dd3b8bfab22a06d1372a2a20fcaa207c96be Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Thu, 6 Oct 2016 13:30:25 -0300 Subject: [PATCH 068/248] Adds information about pull request #1216 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index f906921edd..1ee330af1d 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Fix error message inside audit logs + [Issue #1216 and #1073 - Armin Abfalterer] * Remove port from IPV4 address when running under IIS. [Issue #1220, #1109 and #734 - Robert Culyer] * Remove logdata and msg fields from JSON audit log rule. From 709042a4720697bf0d2085ef7276a6aa190f2142 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Tue, 4 Oct 2016 15:45:25 -0700 Subject: [PATCH 069/248] Don't unnecessarily rename request body parts in cleanup When tmp_dir and upload_dir are identical, there's no reason to rename multipart and request body parts, as this is a non-op. Let's save the cycles and syscall. --- apache2/msc_multipart.c | 5 +++++ apache2/msc_reqbody.c | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/apache2/msc_multipart.c b/apache2/msc_multipart.c index 88eff9b7a1..9bf327d2bc 100644 --- a/apache2/msc_multipart.c +++ b/apache2/msc_multipart.c @@ -1327,6 +1327,11 @@ apr_status_t multipart_cleanup(modsec_rec *msr) { } else { /* Move file to the upload dir. */ if (parts[i]->tmp_file_name != NULL) { + if (strcmp(msr->txcfg->upload_dir, msr->txcfg->tmp_dir) == 0) { + msr_log(msr, 4, "Not moving part to identical location"); + continue; + } + const char *new_filename = NULL; const char *new_basename = NULL; diff --git a/apache2/msc_reqbody.c b/apache2/msc_reqbody.c index 7d150eedff..a8411fa04d 100644 --- a/apache2/msc_reqbody.c +++ b/apache2/msc_reqbody.c @@ -884,6 +884,11 @@ apr_status_t modsecurity_request_body_clear(modsec_rec *msr, char **error_msg) { if (msr->msc_reqbody_filename != NULL) { if (keep_body) { + if (strcmp(msr->txcfg->upload_dir, msr->txcfg->tmp_dir) == 0) { + msr_log(msr, 4, "Not moving file to identical location."); + goto nullify; + } + /* Move request body (which is a file) to the storage area. */ const char *put_filename = NULL; const char *put_basename = NULL; @@ -933,6 +938,8 @@ apr_status_t modsecurity_request_body_clear(modsec_rec *msr, char **error_msg) { msr->msc_reqbody_filename); } +nullify: + msr->msc_reqbody_filename = NULL; } } From c95d93483b76e1ed1eb5a163d1cc2e72019cb866 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 10 Oct 2016 12:34:04 -0300 Subject: [PATCH 070/248] Adds information about pull request #1223 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 1ee330af1d..4cb1384a4d 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * JSON Log: Don't unnecessarily rename request body parts in cleanup + [Issue #1223 - Robert Paprocki] * Fix error message inside audit logs [Issue #1216 and #1073 - Armin Abfalterer] * Remove port from IPV4 address when running under IIS. From a34f9eb785dc1b0069b38f785af00c142ac6d793 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Wed, 19 Oct 2016 14:51:59 -0700 Subject: [PATCH 071/248] Append a newline to concurrent JSON audit logs --- apache2/msc_logging.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 0fe74ab7ca..e35da5520f 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -1423,11 +1423,12 @@ void sec_audit_logger_json(modsec_rec *msr) { yajl_gen_clear(g); yajl_gen_free(g); + sec_auditlog_write(msr, "\n", 1); + /* Return here if we were writing to a serial log * as it does not need an index file. */ if (msr->txcfg->auditlog_type != AUDITLOG_CONCURRENT) { - sec_auditlog_write(msr, "\n", 1); /* Unlock the mutex we used to serialise access to the audit log file. */ rc = apr_global_mutex_unlock(msr->modsecurity->auditlog_lock); From bb577950bf983811ff1892e87d815a1909c0b96b Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Thu, 20 Oct 2016 09:44:25 -0300 Subject: [PATCH 072/248] Adds information about pull request #1233 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 4cb1384a4d..403a30600f 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * JSON Log: Append a newline to concurrent JSON audit logs + [Issue #1233 - Robert Paprocki] * JSON Log: Don't unnecessarily rename request body parts in cleanup [Issue #1223 - Robert Paprocki] * Fix error message inside audit logs From 7ff0e7e7b25530d524dd8beb66826e60ab4403c3 Mon Sep 17 00:00:00 2001 From: Marc Stern Date: Tue, 17 May 2016 16:50:16 +0200 Subject: [PATCH 073/248] Added ALLOW_ID_NOT_UNIQUE compile flag to allow duplicate rule ids and no id --- apache2/apache2_config.c | 4 +++- configure.ac | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/apache2/apache2_config.c b/apache2/apache2_config.c index 3ab618a9e2..ce97950f54 100644 --- a/apache2/apache2_config.c +++ b/apache2/apache2_config.c @@ -799,7 +799,8 @@ static const char *add_rule(cmd_parms *cmd, directory_config *dcfg, int type, return my_error_msg; } - /* Rules must have uniq ID */ +#ifndef ALLOW_ID_NOT_UNIQUE + /* Rules must have uniq ID */ type_rule = (dcfg->tmp_chain_starter == NULL); #if defined(WITH_LUA) type_rule = (type != RULE_TYPE_LUA && type_rule); @@ -831,6 +832,7 @@ static const char *add_rule(cmd_parms *cmd, directory_config *dcfg, int type, // return "ModSecurity: Found another rule with the same id"; } } +#endif /* Create default actionset if one does not already exist. */ if (dcfg->tmp_default_actionset == NULL) { diff --git a/configure.ac b/configure.ac index 0f32b01bf2..fd576e6e2f 100644 --- a/configure.ac +++ b/configure.ac @@ -411,6 +411,22 @@ AC_ARG_ENABLE(request-early, request_early='-DREQUEST_EARLY' ]) +# Enable duplicate rules id +AC_ARG_ENABLE(rule-id-validation, + AS_HELP_STRING([--enable-rule-id-validation], + [Forbid duplicate rule ids and missing ones. This is the default]), +[ + if test "$enableval" != "no"; then + unique_id= + else + unique_id="-DALLOW_ID_NOT_UNIQUE" + MODSEC_EXTRA_CFLAGS="$MODSEC_EXTRA_CFLAGS $unique_id" + fi +], +[ + unique_id='' +]) + # Ignore configure errors AC_ARG_ENABLE(errors, AS_HELP_STRING([--disable-errors], From 3fce12a96cddee4d702d2082f53b58ee84115fac Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 21 Nov 2016 10:58:43 -0300 Subject: [PATCH 074/248] Fix on the patch proposal #1150 That is a fix on the top of #1150 without this fix the patch won't work as expected. --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index fd576e6e2f..4eefad818c 100644 --- a/configure.ac +++ b/configure.ac @@ -677,7 +677,7 @@ else fi fi -MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type" +MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type $unique_id" APXS_WRAPPER=build/apxs-wrapper APXS_EXTRA_CFLAGS="" From 3e6f6e63bcaf086af298dfc249aced32709d41ac Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 21 Nov 2016 11:02:13 -0300 Subject: [PATCH 075/248] Adds information about pull request #1150 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 403a30600f..34f23b638f 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * {dis|en}able-rule-id-validation: Option to disable rule id validation + [Issue #1150 - Marc Stern and ModSecurity team] * JSON Log: Append a newline to concurrent JSON audit logs [Issue #1233 - Robert Paprocki] * JSON Log: Don't unnecessarily rename request body parts in cleanup From 792a351de62339f6943d2e7a43d9f59d3e77d9ae Mon Sep 17 00:00:00 2001 From: Master Yoda Date: Fri, 11 Nov 2016 13:09:44 +0100 Subject: [PATCH 076/248] As of 17 May 2016, the country name "Czechia" replaces this MemberState's former short name of Czech Republic (code 203) --- apache2/msc_geo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apache2/msc_geo.c b/apache2/msc_geo.c index 49cf292c7d..134f40fa21 100644 --- a/apache2/msc_geo.c +++ b/apache2/msc_geo.c @@ -82,7 +82,7 @@ static const char *const geo_country_name[GEO_COUNTRY_LAST + 1] = { "Bangladesh","Belgium","Burkina Faso","Bulgaria","Bahrain","Burundi","Benin","Bermuda","Brunei Darussalam","Bolivia", "Brazil","Bahamas","Bhutan","Bouvet Island","Botswana","Belarus","Belize","Canada","Cocos (Keeling) Islands","Congo, The Democratic Republic of the", "Central African Republic","Congo","Switzerland","Cote D'Ivoire","Cook Islands","Chile","Cameroon","China","Colombia","Costa Rica", - "Cuba","Cape Verde","Christmas Island","Cyprus","Czech Republic","Germany","Djibouti","Denmark","Dominica","Dominican Republic", + "Cuba","Cape Verde","Christmas Island","Cyprus","Czechia","Germany","Djibouti","Denmark","Dominica","Dominican Republic", "Algeria","Ecuador","Estonia","Egypt","Western Sahara","Eritrea","Spain","Ethiopia","Finland","Fiji", "Falkland Islands (Malvinas)","Micronesia, Federated States of","Faroe Islands","France","France, Metropolitan","Gabon","United Kingdom","Grenada","Georgia","French Guiana", "Ghana","Gibraltar","Greenland","Gambia","Guinea","Guadeloupe","Equatorial Guinea","Greece","South Georgia and the South Sandwich Islands","Guatemala", From b6053df941f872b5c6ee8754837a8d5b21024769 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Thu, 1 Dec 2016 15:14:39 -0300 Subject: [PATCH 077/248] Adds information about pull request #1258 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 34f23b638f..e39d5eefe9 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Using Czechia instea of Czech Republic + [Issue #1258 - Michael Kjeldsen] * {dis|en}able-rule-id-validation: Option to disable rule id validation [Issue #1150 - Marc Stern and ModSecurity team] * JSON Log: Append a newline to concurrent JSON audit logs From 1bb2ffcd6b01bd492a0db48c55295421075d355f Mon Sep 17 00:00:00 2001 From: Andrei Belov Date: Mon, 3 Apr 2017 12:52:01 +0300 Subject: [PATCH 078/248] Fix building with nginx >= 1.11.11 Closes SpiderLabs/ModSecurity#1359 See also: http://hg.nginx.org/nginx/rev/e662cbf1b932 --- nginx/modsecurity/ngx_http_modsecurity.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nginx/modsecurity/ngx_http_modsecurity.c b/nginx/modsecurity/ngx_http_modsecurity.c index 7c1395315b..367b2b8de3 100644 --- a/nginx/modsecurity/ngx_http_modsecurity.c +++ b/nginx/modsecurity/ngx_http_modsecurity.c @@ -528,9 +528,15 @@ ngx_http_modsecurity_save_request_body(ngx_http_request_t *r) hc = r->http_connection; +#if defined(nginx_version) && nginx_version >= 1011011 + if (hc->free && size == cscf->large_client_header_buffers.size) { + + buf = hc->free->buf; +#else if (hc->nfree && size == cscf->large_client_header_buffers.size) { buf = hc->free[--hc->nfree]; +#endif ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ModSecurity: use http free large header buffer: %p %uz", From eb798d8c553d9fd48456cbe443462ddf38bbec44 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Mon, 3 Apr 2017 16:23:33 -0300 Subject: [PATCH 079/248] Adds information about pull request #1373 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index e39d5eefe9..0e55eceebb 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Fix building with nginx >= 1.11.11 + [Issue #1373, #1359 - Andrei Belov and Thomas Deutschmann] * Using Czechia instea of Czech Republic [Issue #1258 - Michael Kjeldsen] * {dis|en}able-rule-id-validation: Option to disable rule id validation From 5e4e2af7a6f07854fee6ed36ef4a381d4e03960e Mon Sep 17 00:00:00 2001 From: Chaim Sanders Date: Mon, 3 Apr 2017 19:52:14 -0400 Subject: [PATCH 080/248] add support for soap+xml As was talked about by @emphazer in https://github.com/SpiderLabs/owasp-modsecurity-crs/pull/721, RFC 3902 adds support for the application/soap+xml header used by SOAP 1.2. --- modsecurity.conf-recommended | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modsecurity.conf-recommended b/modsecurity.conf-recommended index 42a6f6c889..728afc1afd 100644 --- a/modsecurity.conf-recommended +++ b/modsecurity.conf-recommended @@ -19,7 +19,7 @@ SecRequestBodyAccess On # Enable XML request body parser. # Initiate XML Processor in case of xml content-type # -SecRule REQUEST_HEADERS:Content-Type "(?:text|application)/xml" \ +SecRule REQUEST_HEADERS:Content-Type "(?:application(?:/soap\+|/)|text/)xml" \ "id:'200000',phase:1,t:none,t:lowercase,pass,nolog,ctl:requestBodyProcessor=XML" # Enable JSON request body parser. From 6cce8a2764587e5d363504f960ff87ad2a32bc2b Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Thu, 6 Apr 2017 09:37:52 -0300 Subject: [PATCH 081/248] Adds information about pull request #1374 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index 0e55eceebb..c36a540cda 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Changes the configuration to recognize soap+xml as XML + [Issue #1374 - @emphazer and Chaim Sanders] * Fix building with nginx >= 1.11.11 [Issue #1373, #1359 - Andrei Belov and Thomas Deutschmann] * Using Czechia instea of Czech Republic From fd49ca71387b0489168012658417f73652c3f4f2 Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Wed, 22 Feb 2017 13:54:49 -0800 Subject: [PATCH 082/248] Don't leak an fd on fuzzy hash initialization Since we're re-opening this file with every invocation, let's close our sanity check fd. --- apache2/re_operators.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/apache2/re_operators.c b/apache2/re_operators.c index 4f748d77da..597891f561 100644 --- a/apache2/re_operators.c +++ b/apache2/re_operators.c @@ -3828,6 +3828,7 @@ static int msre_op_fuzzy_hash_init(msre_rule *rule, char **error_msg) { #ifdef WITH_SSDEEP struct fuzzy_hash_param_data *param_data; + FILE *fp; char *file; int param_len,threshold; @@ -3876,14 +3877,15 @@ static int msre_op_fuzzy_hash_init(msre_rule *rule, char **error_msg) } file = resolve_relative_path(rule->ruleset->mp, rule->filename, file); - - if (!fopen(file, "r")) + + fp = fopen(file, "r"); + if (!fp) { *error_msg = apr_psprintf(rule->ruleset->mp, "Not able to open file:" \ " %s.", file); return -1; } - + fclose(fp); param_data->file = file; param_data->threshold = threshold; From 96a1f55e16da7c6b9f09b74a2e5d5227d8aa692d Mon Sep 17 00:00:00 2001 From: Robert Paprocki Date: Thu, 2 Mar 2017 13:52:45 -0800 Subject: [PATCH 083/248] Read fuzzy hash databases on init Instead of reading the fuzzy db on every invocation, read and store the db contents during initialization and store the contents in memory. The only significant behavior change here is that a change in db contents now (obviously) requires a daemon restart, as no API is provided to flush the list of ssdeep chunks. --- apache2/re.h | 6 +++++ apache2/re_operators.c | 53 +++++++++++++++++++++++++++--------------- 2 files changed, 40 insertions(+), 19 deletions(-) diff --git a/apache2/re.h b/apache2/re.h index 836e2af2b8..c0c5433965 100644 --- a/apache2/re.h +++ b/apache2/re.h @@ -409,8 +409,14 @@ struct msre_cache_rec { apr_size_t val_len; }; +struct fuzzy_hash_chunk { + const char *data; + struct fuzzy_hash_chunk *next; +}; + struct fuzzy_hash_param_data { const char *file; + struct fuzzy_hash_chunk *head; int threshold; }; diff --git a/apache2/re_operators.c b/apache2/re_operators.c index 597891f561..e54a540700 100644 --- a/apache2/re_operators.c +++ b/apache2/re_operators.c @@ -1279,7 +1279,7 @@ static int msre_op_pmFromFile_param_init(msre_rule *rule, char **error_msg) { strncmp(fn, "http://", strlen("http://")) == 0) { *error_msg = apr_psprintf(rule->ruleset->mp, "HTTPS address or " \ - "file path are expected for operator pmFromFile \"%s\"", fn); + "file path are expected for operator pmFromFile \"%s\"", fn); return 0; } else if (strlen(fn) > strlen("https://") && @@ -1316,7 +1316,7 @@ static int msre_op_pmFromFile_param_init(msre_rule *rule, char **error_msg) { msc_remote_clean_chunk(&chunk); #else *error_msg = apr_psprintf(rule->ruleset->mp, "ModSecurity was not " \ - "compiled with Curl support, it cannot load: \"%s\"", fn); + "compiled with Curl support, it cannot load: \"%s\"", fn); return 0; #endif } @@ -3828,9 +3828,11 @@ static int msre_op_fuzzy_hash_init(msre_rule *rule, char **error_msg) { #ifdef WITH_SSDEEP struct fuzzy_hash_param_data *param_data; + struct fuzzy_hash_chunk *chunk, *t; FILE *fp; char *file; int param_len,threshold; + char line[1024]; char *data = NULL; char *threshold_str = NULL; @@ -3838,6 +3840,8 @@ static int msre_op_fuzzy_hash_init(msre_rule *rule, char **error_msg) param_data = apr_palloc(rule->ruleset->mp, sizeof(struct fuzzy_hash_param_data)); + param_data->head = NULL; + data = apr_pstrdup(rule->ruleset->mp, rule->op_param); threshold_str = data; #endif @@ -3885,6 +3889,28 @@ static int msre_op_fuzzy_hash_init(msre_rule *rule, char **error_msg) " %s.", file); return -1; } + + while (read_line(line, sizeof(line), fp)) + { + chunk = apr_palloc(rule->ruleset->mp, + sizeof(struct fuzzy_hash_chunk)); + + chunk->data = apr_pstrdup(rule->ruleset->mp, line); + chunk->next = NULL; + + if (param_data->head == NULL) { + param_data->head = chunk; + } else { + t = param_data->head; + + while (t->next) { + t = t->next; + } + + t->next = chunk; + } + } + fclose(fp); param_data->file = file; @@ -3911,8 +3937,7 @@ static int msre_op_fuzzy_hash_execute(modsec_rec *msr, msre_rule *rule, #ifdef WITH_SSDEEP char result[FUZZY_MAX_RESULT]; struct fuzzy_hash_param_data *param = rule->op_param_data; - FILE *fp; - char line[1024]; + struct fuzzy_hash_chunk *chunk = param->head; #endif if (error_msg == NULL) @@ -3931,29 +3956,19 @@ static int msre_op_fuzzy_hash_execute(modsec_rec *msr, msre_rule *rule, return -1; } - fp = fopen(param->file, "r"); - if (!fp) - { - *error_msg = apr_psprintf(rule->ruleset->mp, "Not able to open " \ - "fuzzy hash file: %s", param->file); - - return 1; - } - - while (read_line(line, sizeof(line), fp)) + while (chunk != NULL) { - int i = fuzzy_compare(line, result); + int i = fuzzy_compare(chunk->data, result); + msr_log(msr, 9, "%d (%s)", i, chunk->data); if (i >= param->threshold) { *error_msg = apr_psprintf(msr->mp, "Fuzzy hash of %s matched " \ - "with %s (from: %s). Score: %d.", var->name, line, + "with %s (from: %s). Score: %d.", var->name, chunk->data, param->file, i); - fclose(fp); return 1; } + chunk = chunk->next; } - - fclose(fp); #else *error_msg = apr_psprintf(rule->ruleset->mp, "ModSecurity was not " \ "compiled with ssdeep support."); From 42c819d1b97ab48c90dd35bf2267bc793dca6539 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Thu, 6 Apr 2017 13:21:32 -0300 Subject: [PATCH 084/248] Adds information about pull request #1339 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index c36a540cda..833bef0f3d 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Reads fuzzy hash databases on init + [Issue #1339 - Robert Paprocki and @Rendername] * Changes the configuration to recognize soap+xml as XML [Issue #1374 - @emphazer and Chaim Sanders] * Fix building with nginx >= 1.11.11 From c1c91e24cde4bd27e1e0ff5dbd6df48956a7731c Mon Sep 17 00:00:00 2001 From: Marc Stern Date: Fri, 9 Dec 2016 10:27:24 +0100 Subject: [PATCH 085/248] {dis|en}able-filename-logging: Option to disable logging of filename in audit log [Issue #1065 - Marc Stern] --- CHANGES | 3 +++ apache2/apache2_util.c | 2 ++ apache2/re.c | 2 ++ configure.ac | 17 ++++++++++++++++- 4 files changed, 23 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 833bef0f3d..9765bb2a2a 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * {dis|en}able-filename-logging: Option to disable logging of filename + in audit log. + [Issue #1065 - Marc Stern] * Reads fuzzy hash databases on init [Issue #1339 - Robert Paprocki and @Rendername] * Changes the configuration to recognize soap+xml as XML diff --git a/apache2/apache2_util.c b/apache2/apache2_util.c index ed5b0ba216..24bba0cee9 100644 --- a/apache2/apache2_util.c +++ b/apache2/apache2_util.c @@ -339,6 +339,7 @@ char *format_error_log_message(apr_pool_t *mp, error_message_t *em) { if (em == NULL) return NULL; +#ifndef LOG_NO_FILENAME if (em->file != NULL) { s_file = apr_psprintf(mp, "[file \"%s\"] ", log_escape(mp, (char *)em->file)); @@ -349,6 +350,7 @@ char *format_error_log_message(apr_pool_t *mp, error_message_t *em) { s_line = apr_psprintf(mp, "[line %d] ", em->line); if (s_line == NULL) return NULL; } +#endif s_level = apr_psprintf(mp, "[level %d] ", em->level); if (s_level == NULL) return NULL; diff --git a/apache2/re.c b/apache2/re.c index 7e0a238c63..0d643ab6fc 100644 --- a/apache2/re.c +++ b/apache2/re.c @@ -2194,10 +2194,12 @@ char *msre_format_metadata(modsec_rec *msr, msre_actionset *actionset) { if (actionset == NULL) return ""; +#ifndef LOG_NO_FILENAME if ((actionset->rule != NULL) && (actionset->rule->filename != NULL)) { fn = apr_psprintf(msr->mp, " [file \"%s\"] [line \"%d\"]", actionset->rule->filename, actionset->rule->line_num); } +#endif if (actionset->id != NULL) { id = apr_psprintf(msr->mp, " [id \"%s\"]", log_escape(msr->mp, actionset->id)); diff --git a/configure.ac b/configure.ac index 4eefad818c..e7e5098afd 100644 --- a/configure.ac +++ b/configure.ac @@ -427,6 +427,21 @@ AC_ARG_ENABLE(rule-id-validation, unique_id='' ]) +# Disable logging of filename +AC_ARG_ENABLE(filename-logging, + AS_HELP_STRING([--enable-filename-logging], + [Enable logging of filename in audit log. This is the default]), +[ + if test "$enableval" != "no"; then + log_filename= + else + log_filename="-DLOG_NO_FILENAME" + fi +], +[ + log_filename='' +]) + # Ignore configure errors AC_ARG_ENABLE(errors, AS_HELP_STRING([--disable-errors], @@ -677,7 +692,7 @@ else fi fi -MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type $unique_id" +MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type $unique_id $log_filename" APXS_WRAPPER=build/apxs-wrapper APXS_EXTRA_CFLAGS="" From 9244cd9824915aa954beb928e7a21ee5f8e32e90 Mon Sep 17 00:00:00 2001 From: Marc Stern Date: Mon, 10 Apr 2017 15:03:09 +0200 Subject: [PATCH 086/248] Option to disable logging of "Server" in audit log when log level < 9. [Issue #1070 - Marc Stern] --- CHANGES | 3 +++ apache2/msc_logging.c | 3 +++ configure.ac | 17 ++++++++++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 9765bb2a2a..f64d6db5b7 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * {dis|en}able-server-logging: Option to disable logging of + "Server" in audit log when log level < 9. + [Issue #1070 - Marc Stern] * {dis|en}able-filename-logging: Option to disable logging of filename in audit log. [Issue #1065 - Marc Stern] diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index e35da5520f..e77ccb855f 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -2004,6 +2004,9 @@ void sec_audit_logger_native(modsec_rec *msr) { sec_auditlog_write_producer_header(msr); /* Server */ +#ifdef LOG_NO_SERVER + if (msr->txcfg->debuglog_level >= 9) +#endif if (msr->server_software != NULL) { text = apr_psprintf(msr->mp, "Server: %s\n", msr->server_software); sec_auditlog_write(msr, text, strlen(text)); diff --git a/configure.ac b/configure.ac index e7e5098afd..4faa3890bd 100644 --- a/configure.ac +++ b/configure.ac @@ -442,6 +442,21 @@ AC_ARG_ENABLE(filename-logging, log_filename='' ]) +# Disable logging of "Server" +AC_ARG_ENABLE(server-logging, + AS_HELP_STRING([--enable-server-logging], + [Enable logging of "Server" in audit log when log level < 9. This is the default]), +[ + if test "$enableval" != "no"; then + log_server= + else + log_server="-DLOG_NO_SERVER" + fi +], +[ + log_server='' +]) + # Ignore configure errors AC_ARG_ENABLE(errors, AS_HELP_STRING([--disable-errors], @@ -692,7 +707,7 @@ else fi fi -MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type $unique_id $log_filename" +MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type $unique_id $log_filename $log_server" APXS_WRAPPER=build/apxs-wrapper APXS_EXTRA_CFLAGS="" From 99eb07d94428c288ba54338b7050ab072a7cd754 Mon Sep 17 00:00:00 2001 From: Marc Stern Date: Mon, 10 Apr 2017 12:01:17 +0200 Subject: [PATCH 087/248] Fix missing rule id in log See https://github.com/SpiderLabs/ModSecurity/issues/391 --- apache2/re.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apache2/re.c b/apache2/re.c index 0d643ab6fc..e7014bf8dd 100644 --- a/apache2/re.c +++ b/apache2/re.c @@ -1889,7 +1889,7 @@ static apr_status_t msre_ruleset_process_phase_(msre_ruleset *ruleset, modsec_re } } else if (rc < 0) { - msr_log(msr, 1, "Rule processing failed."); + msr_log(msr, 1, "Rule processing failed (id=%s, msg=%s).", rule->actionset->id, rule->actionset->msg); if (msr->txcfg->reqintercept_oe == 1) { @@ -1920,7 +1920,7 @@ static apr_status_t msre_ruleset_process_phase_(msre_ruleset *ruleset, modsec_re } } else { - msr_log(msr, 1, "Rule processing failed with unknown return code: %d.", rc); + msr_log(msr, 1, "Rule processing failed with unknown return code: %d (id=%s, msg=%s).", rc, rule->actionset->id, rule->actionset->msg); apr_table_clear(msr->matched_vars); return -1; } From 45b7706f1f7a5f2c370bfe08e0fa74b5b8580f9d Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Tue, 11 Apr 2017 10:01:30 -0300 Subject: [PATCH 088/248] Adds sanity check before print action message in the logs This is a sanity check on top of #1379 --- apache2/re.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/apache2/re.c b/apache2/re.c index e7014bf8dd..46def9e4d5 100644 --- a/apache2/re.c +++ b/apache2/re.c @@ -1889,8 +1889,11 @@ static apr_status_t msre_ruleset_process_phase_(msre_ruleset *ruleset, modsec_re } } else if (rc < 0) { - msr_log(msr, 1, "Rule processing failed (id=%s, msg=%s).", rule->actionset->id, rule->actionset->msg); - + if (rule->actionset != NULL && rule->actionset->msg != NULL) { + msr_log(msr, 1, "Rule processing failed (id=%s, msg=%s).", rule->actionset->id, rule->actionset->msg); + } else { + msr_log(msr, 1, "Rule processing failed."); + } if (msr->txcfg->reqintercept_oe == 1) { apr_table_clear(msr->matched_vars); @@ -1920,7 +1923,11 @@ static apr_status_t msre_ruleset_process_phase_(msre_ruleset *ruleset, modsec_re } } else { - msr_log(msr, 1, "Rule processing failed with unknown return code: %d (id=%s, msg=%s).", rc, rule->actionset->id, rule->actionset->msg); + if (rule->actionset != NULL && rule->actionset->msg != NULL) { + msr_log(msr, 1, "Rule processing failed with unknown return code: %d (id=%s, msg=%s).", rc, rule->actionset->id, rule->actionset->msg); + } else { + msr_log(msr, 1, "Rule processing failed with unknown return code: %d", rc); + } apr_table_clear(msr->matched_vars); return -1; } From 53a8bb2e18bc3f95db8ce69e739be0a7584f2565 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Tue, 11 Apr 2017 11:12:14 -0300 Subject: [PATCH 089/248] Adds information about pull request #1379 --- CHANGES | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES b/CHANGES index f64d6db5b7..622d78edae 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Adds rule id in logs whenever a rule fail. + [Issue #1379, #391 - Marc Stern] * {dis|en}able-server-logging: Option to disable logging of "Server" in audit log when log level < 9. [Issue #1070 - Marc Stern] From d243818affebefec127034f9714eb521d8c0ee6e Mon Sep 17 00:00:00 2001 From: Marc Stern Date: Wed, 26 Apr 2017 14:04:45 -0300 Subject: [PATCH 090/248] {dis|en}able-collection-delete-problem-logging: Option to disable logging of collection delete problem in audit log when log level < 9 in audit log [Issue #576 - Marc Stern] --- CHANGES | 3 +++ apache2/persist_dbm.c | 14 ++++++++++---- configure.ac | 17 ++++++++++++++++- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/CHANGES b/CHANGES index 622d78edae..78f0721522 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * {dis|en}able-collection-delete-problem-logging: Option to disable logging of + collection delete problem in audit log when log level < 9. + [Issue #576 - Marc Stern] * Adds rule id in logs whenever a rule fail. [Issue #1379, #391 - Marc Stern] * {dis|en}able-server-logging: Option to disable logging of diff --git a/apache2/persist_dbm.c b/apache2/persist_dbm.c index 76c3820baf..7f8b6f104c 100644 --- a/apache2/persist_dbm.c +++ b/apache2/persist_dbm.c @@ -217,10 +217,13 @@ static apr_table_t *collection_retrieve_ex(apr_sdbm_t *existing_dbm, modsec_rec rc = apr_sdbm_delete(dbm, key); if (rc != APR_SUCCESS) { - msr_log(msr, 1, "collection_retrieve_ex: Failed deleting collection (name \"%s\", " +#ifdef LOG_NO_COLL_DELET_PB + if (msr->txcfg->debuglog_level >= 9) +#endif + msr_log(msr, 1, "collection_retrieve_ex: Failed deleting collection (name \"%s\", " "key \"%s\"): %s", log_escape(msr->mp, col_name), log_escape_ex(msr->mp, col_key, col_key_len), get_apr_error(msr->mp, rc)); - msr->msc_sdbm_delete_error = 1; + msr->msc_sdbm_delete_error = 1; goto cleanup; } @@ -678,10 +681,13 @@ int collections_remove_stale(modsec_rec *msr, const char *col_name) { if (expiry_time <= now) { rc = apr_sdbm_delete(dbm, key); if (rc != APR_SUCCESS) { - msr_log(msr, 1, "collections_remove_stale: Failed deleting collection (name \"%s\", " +#ifdef LOG_NO_COLL_DELET_PB + if (msr->txcfg->debuglog_level >= 9) +#endif + msr_log(msr, 1, "collections_remove_stale: Failed deleting collection (name \"%s\", " "key \"%s\"): %s", log_escape(msr->mp, col_name), log_escape_ex(msr->mp, key.dptr, key.dsize - 1), get_apr_error(msr->mp, rc)); - msr->msc_sdbm_delete_error = 1; + msr->msc_sdbm_delete_error = 1; goto error; } diff --git a/configure.ac b/configure.ac index 4faa3890bd..83adeebbaa 100644 --- a/configure.ac +++ b/configure.ac @@ -457,6 +457,21 @@ AC_ARG_ENABLE(server-logging, log_server='' ]) +# Disable logging of problem when deleting collection +AC_ARG_ENABLE(collection-delete-problem-logging, + AS_HELP_STRING([--enable-collection-delete-problem-logging], + [Enable logging of collection delete problem in audit log when log level < 9. This is the default]), +[ + if test "$enableval" != "no"; then + log_collection_delete_problem= + else + log_collection_delete_problem="-DLOG_NO_COLL_DELET_PB" + fi +], +[ + log_collection_delete_problem='' +]) + # Ignore configure errors AC_ARG_ENABLE(errors, AS_HELP_STRING([--disable-errors], @@ -707,7 +722,7 @@ else fi fi -MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type $unique_id $log_filename $log_server" +MODSEC_EXTRA_CFLAGS="$pcre_study $pcre_match_limit $pcre_match_limit_recursion $pcre_jit $request_early $htaccess_config $lua_cache $debug_conf $debug_cache $debug_acmp $debug_mem $perf_meas $modsec_api $cpu_type $unique_id $log_filename $log_server $log_collection_delete_problem" APXS_WRAPPER=build/apxs-wrapper APXS_EXTRA_CFLAGS="" From 67908f45f442d7caff9d9525e8d4077d8e8c8517 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 26 Apr 2017 14:09:48 -0300 Subject: [PATCH 091/248] Cosmetics: fix #1380 indentation --- apache2/persist_dbm.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/apache2/persist_dbm.c b/apache2/persist_dbm.c index 7f8b6f104c..4e891cee84 100644 --- a/apache2/persist_dbm.c +++ b/apache2/persist_dbm.c @@ -218,12 +218,12 @@ static apr_table_t *collection_retrieve_ex(apr_sdbm_t *existing_dbm, modsec_rec rc = apr_sdbm_delete(dbm, key); if (rc != APR_SUCCESS) { #ifdef LOG_NO_COLL_DELET_PB - if (msr->txcfg->debuglog_level >= 9) + if (msr->txcfg->debuglog_level >= 9) #endif - msr_log(msr, 1, "collection_retrieve_ex: Failed deleting collection (name \"%s\", " - "key \"%s\"): %s", log_escape(msr->mp, col_name), - log_escape_ex(msr->mp, col_key, col_key_len), get_apr_error(msr->mp, rc)); - msr->msc_sdbm_delete_error = 1; + msr_log(msr, 1, "collection_retrieve_ex: Failed deleting collection (name \"%s\", " + "key \"%s\"): %s", log_escape(msr->mp, col_name), + log_escape_ex(msr->mp, col_key, col_key_len), get_apr_error(msr->mp, rc)); + msr->msc_sdbm_delete_error = 1; goto cleanup; } @@ -682,12 +682,12 @@ int collections_remove_stale(modsec_rec *msr, const char *col_name) { rc = apr_sdbm_delete(dbm, key); if (rc != APR_SUCCESS) { #ifdef LOG_NO_COLL_DELET_PB - if (msr->txcfg->debuglog_level >= 9) + if (msr->txcfg->debuglog_level >= 9) #endif - msr_log(msr, 1, "collections_remove_stale: Failed deleting collection (name \"%s\", " + msr_log(msr, 1, "collections_remove_stale: Failed deleting collection (name \"%s\", " "key \"%s\"): %s", log_escape(msr->mp, col_name), log_escape_ex(msr->mp, key.dptr, key.dsize - 1), get_apr_error(msr->mp, rc)); - msr->msc_sdbm_delete_error = 1; + msr->msc_sdbm_delete_error = 1; goto error; } From 1a5ff4e3718fa757ee5f2d8a51e15da6fdcc019c Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 26 Apr 2017 15:18:24 -0300 Subject: [PATCH 092/248] Fix help message on configuration option added by #1380 --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 83adeebbaa..685a8373d9 100644 --- a/configure.ac +++ b/configure.ac @@ -460,7 +460,7 @@ AC_ARG_ENABLE(server-logging, # Disable logging of problem when deleting collection AC_ARG_ENABLE(collection-delete-problem-logging, AS_HELP_STRING([--enable-collection-delete-problem-logging], - [Enable logging of collection delete problem in audit log when log level < 9. This is the default]), + [Enable logging of collection delete problem even when log level is < 9. This is the default]), [ if test "$enableval" != "no"; then log_collection_delete_problem= From f44852b4e011dff25edf947b7101102044877e3e Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 26 Apr 2017 15:57:48 -0300 Subject: [PATCH 093/248] Fix the issue number on Marc's CHANGE log entry --- CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 78f0721522..0cddedfd38 100644 --- a/CHANGES +++ b/CHANGES @@ -3,7 +3,7 @@ DD MMM YYYY - 2.9.2 - To be released * {dis|en}able-collection-delete-problem-logging: Option to disable logging of collection delete problem in audit log when log level < 9. - [Issue #576 - Marc Stern] + [Issue #1380 - Marc Stern] * Adds rule id in logs whenever a rule fail. [Issue #1379, #391 - Marc Stern] * {dis|en}able-server-logging: Option to disable logging of From d1376c55259517baa1afd11422afa401e45e2136 Mon Sep 17 00:00:00 2001 From: Marc Stern Date: Wed, 26 Apr 2017 16:01:39 -0300 Subject: [PATCH 094/248] Adds option to disable logging of Apache handler in audit log --- CHANGES | 3 +++ apache2/msc_logging.c | 5 ++++- configure.ac | 15 +++++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 0cddedfd38..f255f7866f 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,9 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * {dis|en}able-handler-logging: Option to disable logging of Apache handler + in audit log + [Issue #1070, #1381 - Marc Stern] * {dis|en}able-collection-delete-problem-logging: Option to disable logging of collection delete problem in audit log when log level < 9. [Issue #1380 - Marc Stern] diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index e77ccb855f..6be9a0e188 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -1974,7 +1974,10 @@ void sec_audit_logger_native(modsec_rec *msr) { } /* Apache-Handler */ - if (msr->r->handler != NULL) { +#ifdef LOG_NO_HANDLER + if (msr->txcfg->debuglog_level >= 9) +#endif + if (msr->r->handler != NULL) { text = apr_psprintf(msr->mp, "Apache-Handler: %s\n", msr->r->handler); sec_auditlog_write(msr, text, strlen(text)); } diff --git a/configure.ac b/configure.ac index 685a8373d9..fe1e2f4de6 100644 --- a/configure.ac +++ b/configure.ac @@ -472,6 +472,21 @@ AC_ARG_ENABLE(collection-delete-problem-logging, log_collection_delete_problem='' ]) +# Disable logging of Apache handler +AC_ARG_ENABLE(handler-logging, + AS_HELP_STRING([--enable-handler-logging], + [Enable logging of Apache handler in audit log when log level < 9. This is the default]), +[ + if test "$enableval" != "no"; then + log_handler= + else + log_handler="-DLOG_NO_HANDLER" + fi +], +[ + log_handler='' +]) + # Ignore configure errors AC_ARG_ENABLE(errors, AS_HELP_STRING([--disable-errors], From 3de0dfc5fd8ef0106020f69ad1035f6d05190ec8 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 26 Apr 2017 16:04:31 -0300 Subject: [PATCH 095/248] Cosmetics: fix #1381 indentation --- apache2/msc_logging.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 6be9a0e188..1e401b8767 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -1975,9 +1975,9 @@ void sec_audit_logger_native(modsec_rec *msr) { /* Apache-Handler */ #ifdef LOG_NO_HANDLER - if (msr->txcfg->debuglog_level >= 9) + if (msr->txcfg->debuglog_level >= 9) #endif - if (msr->r->handler != NULL) { + if (msr->r->handler != NULL) { text = apr_psprintf(msr->mp, "Apache-Handler: %s\n", msr->r->handler); sec_auditlog_write(msr, text, strlen(text)); } From 7b86d8c51d1d93498c179e4a92960326ed693097 Mon Sep 17 00:00:00 2001 From: Marc Stern Date: Wed, 26 Apr 2017 16:38:12 -0300 Subject: [PATCH 096/248] Extends a7731c by adding JSON support --- apache2/msc_logging.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/apache2/msc_logging.c b/apache2/msc_logging.c index 1e401b8767..39fcdd719b 100644 --- a/apache2/msc_logging.c +++ b/apache2/msc_logging.c @@ -1156,6 +1156,9 @@ void sec_audit_logger_json(modsec_rec *msr) { } /* Apache-Handler */ +#ifdef LOG_NO_HANDLER + if (msr->txcfg->debuglog_level >= 9) +#endif if (msr->r->handler != NULL) { yajl_kv_string(g, "handler", msr->r->handler); } @@ -2007,9 +2010,6 @@ void sec_audit_logger_native(modsec_rec *msr) { sec_auditlog_write_producer_header(msr); /* Server */ -#ifdef LOG_NO_SERVER - if (msr->txcfg->debuglog_level >= 9) -#endif if (msr->server_software != NULL) { text = apr_psprintf(msr->mp, "Server: %s\n", msr->server_software); sec_auditlog_write(msr, text, strlen(text)); From 2c07a17fa372f50f0ce0daa4640121b429f6ce49 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Wed, 26 Apr 2017 16:47:42 -0300 Subject: [PATCH 097/248] Fix help message on configuration option added by #1381 --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index fe1e2f4de6..fb0fa5d9f6 100644 --- a/configure.ac +++ b/configure.ac @@ -475,7 +475,7 @@ AC_ARG_ENABLE(collection-delete-problem-logging, # Disable logging of Apache handler AC_ARG_ENABLE(handler-logging, AS_HELP_STRING([--enable-handler-logging], - [Enable logging of Apache handler in audit log when log level < 9. This is the default]), + [Enable logging of Apache handler in audit log even when log level is < 9. This is the default]), [ if test "$enableval" != "no"; then log_handler= From a4724dfdab9bf6d073d308c16d211294dbd20773 Mon Sep 17 00:00:00 2001 From: Felipe Zimmerle Date: Thu, 27 Apr 2017 18:40:50 -0300 Subject: [PATCH 098/248] Updates the libinjection --- CHANGES | 2 + apache2/libinjection/libinjection.h | 12 +- apache2/libinjection/libinjection_html5.c | 118 +- apache2/libinjection/libinjection_sqli.c | 132 +- apache2/libinjection/libinjection_sqli.h | 28 +- apache2/libinjection/libinjection_sqli_data.h | 1157 +++++++++++------ apache2/libinjection/libinjection_xss.c | 411 +++--- 7 files changed, 1158 insertions(+), 702 deletions(-) diff --git a/CHANGES b/CHANGES index f255f7866f..b1a148e92d 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ DD MMM YYYY - 2.9.2 - To be released ------------------------------------ + * Updates libinjection to: da027ab52f9cf14401dd92e34e6683d183bdb3b4 + [ModSecurity team] * {dis|en}able-handler-logging: Option to disable logging of Apache handler in audit log [Issue #1070, #1381 - Marc Stern] diff --git a/apache2/libinjection/libinjection.h b/apache2/libinjection/libinjection.h index 11b14ac5f3..6b40b1df6a 100644 --- a/apache2/libinjection/libinjection.h +++ b/apache2/libinjection/libinjection.h @@ -1,5 +1,5 @@ /** - * Copyright 2012, 2013 Nick Galbreath + * Copyright 2012-2016 Nick Galbreath * nickg@client9.com * BSD License -- see COPYING.txt for details * @@ -7,8 +7,8 @@ * */ -#ifndef _LIBINJECTION_H -#define _LIBINJECTION_H +#ifndef LIBINJECTION_H +#define LIBINJECTION_H #ifdef __cplusplus # define LIBINJECTION_BEGIN_DECLS extern "C" { @@ -49,9 +49,9 @@ const char* libinjection_version(void); */ int libinjection_sqli(const char* s, size_t slen, char fingerprint[]); -/** ALPHA version of xss detector. +/** ALPHA version of xss detector. * - * NOT DONE. + * NOT DONE. * * \param[in] s input string, may contain nulls, does not need to be null-terminated * \param[in] slen input string length @@ -62,4 +62,4 @@ int libinjection_xss(const char* s, size_t slen); LIBINJECTION_END_DECLS -#endif /* _LIBINJECTION_H */ +#endif /* LIBINJECTION_H */ diff --git a/apache2/libinjection/libinjection_html5.c b/apache2/libinjection/libinjection_html5.c index 38ef9f0f64..379bb9960d 100644 --- a/apache2/libinjection/libinjection_html5.c +++ b/apache2/libinjection/libinjection_html5.c @@ -71,20 +71,20 @@ void libinjection_h5_init(h5_state_t* hs, const char* s, size_t len, enum html5_ switch (flags) { case DATA_STATE: - hs->state = h5_state_data; - break; + hs->state = h5_state_data; + break; case VALUE_NO_QUOTE: - hs->state = h5_state_before_attribute_name; - break; + hs->state = h5_state_before_attribute_name; + break; case VALUE_SINGLE_QUOTE: - hs->state = h5_state_attribute_value_single_quote; - break; + hs->state = h5_state_attribute_value_single_quote; + break; case VALUE_DOUBLE_QUOTE: - hs->state = h5_state_attribute_value_double_quote; - break; + hs->state = h5_state_attribute_value_double_quote; + break; case VALUE_BACK_QUOTE: - hs->state = h5_state_attribute_value_back_quote; - break; + hs->state = h5_state_attribute_value_back_quote; + break; } } @@ -100,10 +100,18 @@ int libinjection_h5_next(h5_state_t* hs) /** * Everything below here is private * -*/ + */ + static int h5_is_white(char ch) { + /* + * \t = horizontal tab = 0x09 + * \n = newline = 0x0A + * \v = vertical tab = 0x0B + * \f = form feed = 0x0C + * \r = cr = 0x0D + */ return strchr(" \t\n\v\f\r", ch) != NULL; } @@ -112,19 +120,19 @@ static int h5_skip_white(h5_state_t* hs) char ch; while (hs->pos < hs->len) { ch = hs->s[hs->pos]; - switch (ch) { - case 0x00: /* IE only */ - case 0x20: - case 0x09: - case 0x0A: - case 0x0B: /* IE only */ - case 0x0C: + switch (ch) { + case 0x00: /* IE only */ + case 0x20: + case 0x09: + case 0x0A: + case 0x0B: /* IE only */ + case 0x0C: case 0x0D: /* IE only */ hs->pos += 1; - break; - default: + break; + default: return ch; - } + } } return CHAR_EOF; } @@ -259,12 +267,12 @@ static int h5_state_tag_name(h5_state_t* hs) pos = hs->pos; while (pos < hs->len) { ch = hs->s[pos]; - if (ch == 0) { - /* special non-standard case */ - /* allow nulls in tag name */ - /* some old browsers apparently allow and ignore them */ - pos += 1; - } else if (h5_is_white(ch)) { + if (ch == 0) { + /* special non-standard case */ + /* allow nulls in tag name */ + /* some old browsers apparently allow and ignore them */ + pos += 1; + } else if (h5_is_white(ch)) { hs->token_start = hs->s + hs->pos; hs->token_len = pos - hs->pos; hs->token_type = TAG_NAME_OPEN; @@ -332,7 +340,7 @@ static int h5_state_before_attribute_name(h5_state_t* hs) default: { return h5_state_attribute_name(hs); } - } + } } static int h5_state_attribute_name(h5_state_t* hs) @@ -450,12 +458,12 @@ static int h5_state_attribute_value_quote(h5_state_t* hs, char qchar) TRACE(); /* skip initial quote in normal case. - * dont do this is pos == 0 since it means we have started + * don't do this "if (pos == 0)" since it means we have started * in a non-data state. given an input of '>pos > 0) { - hs->pos += 1; + hs->pos += 1; } @@ -705,10 +713,13 @@ static int h5_state_comment(h5_state_t* hs) char ch; const char* idx; size_t pos; + size_t offset; + const char* end = hs->s + hs->len; TRACE(); pos = hs->pos; while (1) { + idx = (const char*) memchr(hs->s + pos, CHAR_DASH, hs->len - pos); /* did not find anything or has less than 3 chars left */ @@ -719,21 +730,62 @@ static int h5_state_comment(h5_state_t* hs) hs->token_type = TAG_COMMENT; return 1; } - ch = *(idx + 1); + offset = 1; + + /* skip all nulls */ + while (idx + offset < end && *(idx + offset) == 0) { + offset += 1; + } + if (idx + offset == end) { + hs->state = h5_state_eof; + hs->token_start = hs->s + hs->pos; + hs->token_len = hs->len - hs->pos; + hs->token_type = TAG_COMMENT; + return 1; + } + + ch = *(idx + offset); if (ch != CHAR_DASH && ch != CHAR_BANG) { pos = (size_t)(idx - hs->s) + 1; continue; } - ch = *(idx + 2); + + /* need to test */ +#if 0 + /* skip all nulls */ + while (idx + offset < end && *(idx + offset) == 0) { + offset += 1; + } + if (idx + offset == end) { + hs->state = h5_state_eof; + hs->token_start = hs->s + hs->pos; + hs->token_len = hs->len - hs->pos; + hs->token_type = TAG_COMMENT; + return 1; + } +#endif + + offset += 1; + if (idx + offset == end) { + hs->state = h5_state_eof; + hs->token_start = hs->s + hs->pos; + hs->token_len = hs->len - hs->pos; + hs->token_type = TAG_COMMENT; + return 1; + } + + + ch = *(idx + offset); if (ch != CHAR_GT) { pos = (size_t)(idx - hs->s) + 1; continue; } + offset += 1; /* ends in --> or -!> */ hs->token_start = hs->s + hs->pos; hs->token_len = (size_t)(idx - hs->s) - hs->pos; - hs->pos = (size_t)(idx - hs->s) + 3; + hs->pos = (size_t)(idx + offset - hs->s); hs->state = h5_state_data; hs->token_type = TAG_COMMENT; return 1; diff --git a/apache2/libinjection/libinjection_sqli.c b/apache2/libinjection/libinjection_sqli.c index 0b67c5cc49..ed506c651e 100644 --- a/apache2/libinjection/libinjection_sqli.c +++ b/apache2/libinjection/libinjection_sqli.c @@ -1,5 +1,5 @@ /** - * Copyright 2012,2013 Nick Galbreath + * Copyright 2012,2016 Nick Galbreath * nickg@client9.com * BSD License -- see COPYING.txt for details * @@ -112,15 +112,11 @@ memchr2(const char *haystack, size_t haystack_len, char c0, char c1) } while (cur < last) { - if (cur[0] == c0) { - if (cur[1] == c1) { - return cur; - } else { - cur += 2; /* (c0 == c1) ? 1 : 2; */ - } - } else { - cur += 1; + /* safe since cur < len - 1 always */ + if (cur[0] == c0 && cur[1] == c1) { + return cur; } + cur += 1; } return NULL; @@ -191,11 +187,11 @@ static int char_is_white(char ch) { /* ' ' space is 0x32 '\t 0x09 \011 horizontal tab '\n' 0x0a \012 new line - '\v' 0x0b \013 verical tab + '\v' 0x0b \013 vertical tab '\f' 0x0c \014 new page '\r' 0x0d \015 carriage return 0x00 \000 null (oracle) - 0xa0 \240 is latin1 + 0xa0 \240 is Latin-1 */ return strchr(" \t\n\v\f\r\240\000", ch) != NULL; } @@ -294,7 +290,7 @@ static void st_clear(stoken_t * st) static void st_assign_char(stoken_t * st, const char stype, size_t pos, size_t len, const char value) { - /* done to elimiate unused warning */ + /* done to eliminate unused warning */ (void)len; st->type = (char) stype; st->pos = pos; @@ -402,7 +398,7 @@ static size_t parse_eol_comment(struct libinjection_sqli_state * sf) } } -/** In Ansi mode, hash is an operator +/** In ANSI mode, hash is an operator * In MYSQL mode, it's a EOL comment like '--' */ static size_t parse_hash(struct libinjection_sqli_state * sf) @@ -842,7 +838,7 @@ static size_t parse_bstring(struct libinjection_sqli_state *sf) /* * hex literal string - * re: [XX]'[0123456789abcdefABCDEF]*' + * re: [xX]'[0123456789abcdefABCDEF]*' * mysql has requirement of having EVEN number of chars, * but pgsql does not */ @@ -1072,7 +1068,7 @@ static size_t parse_money(struct libinjection_sqli_state *sf) /* we have $foobar$ ... find it again */ strend = my_memmem(cs+xlen+2, slen - (pos+xlen+2), cs + pos, xlen+2); - if (strend == NULL) { + if (strend == NULL || ((size_t)(strend - cs) < (pos+xlen+2))) { /* fell off edge */ st_assign(sf->current, TYPE_STRING, pos+xlen+2, slen - pos - xlen - 2, cs+pos+xlen+2); sf->current->str_open = '$'; @@ -1104,7 +1100,6 @@ static size_t parse_number(struct libinjection_sqli_state * sf) const char *cs = sf->s; const size_t slen = sf->slen; size_t pos = sf->pos; - int have_dot = 0; int have_e = 0; int have_exp = 0; @@ -1136,7 +1131,6 @@ static size_t parse_number(struct libinjection_sqli_state * sf) } if (pos < slen && cs[pos] == '.') { - have_dot = 1; pos += 1; while (pos < slen && ISDIGIT(cs[pos])) { pos += 1; @@ -1185,7 +1179,7 @@ static size_t parse_number(struct libinjection_sqli_state * sf) } } - if (have_dot == 1 && have_e == 1 && have_exp == 0) { + if (have_e == 1 && have_exp == 0) { /* very special form of * "1234.e" * "10.10E" @@ -1242,29 +1236,13 @@ int libinjection_sqli_tokenize(struct libinjection_sqli_state * sf) const unsigned char ch = (unsigned char) (s[*pos]); /* - * if not ascii, then continue... - * actually probably need to just assuming - * it's a string + * look up the parser, and call it + * + * Porting Note: this is mapping of char to function + * charparsers[ch]() */ - if (ch > 127) { + fnptr = char_parse_map[ch]; - /* 160 or 0xA0 or octal 240 is "latin1 non-breaking space" - * but is treated as a space in mysql. - */ - if (ch == 160) { - fnptr = parse_white; - } else { - fnptr = parse_word; - } - } else { - /* - * look up the parser, and call it - * - * Porting Note: this is mapping of char to function - * charparsers[ch]() - */ - fnptr = char_parse_map[ch]; - } *pos = (*fnptr) (sf); /* @@ -1349,16 +1327,22 @@ static int syntax_merge_words(struct libinjection_sqli_state * sf,stoken_t * a, a->type == TYPE_UNION || a->type == TYPE_FUNCTION || a->type == TYPE_EXPRESSION || + a->type == TYPE_TSQL || a->type == TYPE_SQLTYPE)) { - return CHAR_NULL; + return FALSE; } - if (b->type != TYPE_KEYWORD && b->type != TYPE_BAREWORD && - b->type != TYPE_OPERATOR && b->type != TYPE_SQLTYPE && - b->type != TYPE_LOGIC_OPERATOR && - b->type != TYPE_FUNCTION && - b->type != TYPE_UNION && b->type != TYPE_EXPRESSION) { - return CHAR_NULL; + if (! + (b->type == TYPE_KEYWORD || + b->type == TYPE_BAREWORD || + b->type == TYPE_OPERATOR || + b->type == TYPE_UNION || + b->type == TYPE_FUNCTION || + b->type == TYPE_EXPRESSION || + b->type == TYPE_TSQL || + b->type == TYPE_SQLTYPE || + b->type == TYPE_LOGIC_OPERATOR)) { + return FALSE; } sz1 = a->len; @@ -1374,7 +1358,6 @@ static int syntax_merge_words(struct libinjection_sqli_state * sf,stoken_t * a, tmp[sz1] = ' '; memcpy(tmp + sz1 + 1, b->val, sz2); tmp[sz3] = CHAR_NULL; - ch = sf->lookup(sf, LOOKUP_WORD, tmp, sz3); if (ch != CHAR_NULL) { @@ -1450,6 +1433,13 @@ int libinjection_sqli_fold(struct libinjection_sqli_state * sf) sf->tokenvec[2].type == TYPE_COMMA && sf->tokenvec[3].type == TYPE_LEFTPARENS && sf->tokenvec[4].type == TYPE_NUMBER + ) || + ( + sf->tokenvec[0].type == TYPE_BAREWORD && + sf->tokenvec[1].type == TYPE_RIGHTPARENS && + sf->tokenvec[2].type == TYPE_OPERATOR && + sf->tokenvec[3].type == TYPE_LEFTPARENS && + sf->tokenvec[4].type == TYPE_BAREWORD ) ) { @@ -1541,7 +1531,7 @@ int libinjection_sqli_fold(struct libinjection_sqli_state * sf) continue; } else if ((sf->tokenvec[left].type == TYPE_BAREWORD || sf->tokenvec[left].type == TYPE_VARIABLE) && sf->tokenvec[left+1].type == TYPE_LEFTPARENS && ( - /* TSQL functions but common enough to be collumn names */ + /* TSQL functions but common enough to be column names */ cstrcasecmp("USER_ID", sf->tokenvec[left].val, sf->tokenvec[left].len) == 0 || cstrcasecmp("USER_NAME", sf->tokenvec[left].val, sf->tokenvec[left].len) == 0 || @@ -1564,7 +1554,7 @@ int libinjection_sqli_fold(struct libinjection_sqli_state * sf) /* pos is the same * other conversions need to go here... for instance - * password CAN be a function, coalese CAN be a function + * password CAN be a function, coalesce CAN be a function */ sf->tokenvec[left].type = TYPE_FUNCTION; continue; @@ -1828,7 +1818,7 @@ int libinjection_sqli_fold(struct libinjection_sqli_state * sf) * 1,-sin(1) --> 1 (1) * Here, just do * 1,-sin(1) --> 1,sin(1) - * just remove unary opartor + * just remove unary operator */ st_copy(&sf->tokenvec[left+1], &sf->tokenvec[left+2]); pos -= 1; @@ -1852,9 +1842,21 @@ int libinjection_sqli_fold(struct libinjection_sqli_state * sf) pos -= 1; left = 0; continue; + } else if ((sf->tokenvec[left].type == TYPE_FUNCTION) && + (sf->tokenvec[left+1].type == TYPE_LEFTPARENS) && + (sf->tokenvec[left+2].type != TYPE_RIGHTPARENS)) { + /* + * whats going on here + * Some SQL functions like USER() have 0 args + * if we get User(foo), then User is not a function + * This should be expanded since it eliminated a lot of false + * positives. + */ + if (cstrcasecmp("USER", sf->tokenvec[left].val, sf->tokenvec[left].len) == 0) { + sf->tokenvec[left].type = TYPE_BAREWORD; + } } - /* no folding -- assume left-most token is is good, now use the existing 2 tokens -- do not get another @@ -2019,7 +2021,7 @@ int libinjection_sqli_blacklist(struct libinjection_sqli_state* sql_state) } /* - * return TRUE if sqli, false is benign + * return TRUE if SQLi, false is benign */ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state* sql_state) { @@ -2033,10 +2035,10 @@ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state* sql_state) if (tlen > 1 && sql_state->fingerprint[tlen-1] == TYPE_COMMENT) { /* - * if ending comment is contains 'sp_password' then it's sqli! + * if ending comment is contains 'sp_password' then it's SQLi! * MS Audit log apparently ignores anything with - * 'sp_password' in it. Unable to find primary refernece to - * this "feature" of SQL Server but seems to be known sqli + * 'sp_password' in it. Unable to find primary reference to + * this "feature" of SQL Server but seems to be known SQLi * technique */ if (my_memmem(sql_state->s, sql_state->slen, @@ -2055,7 +2057,7 @@ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state* sql_state) if (sql_state->fingerprint[1] == TYPE_UNION) { if (sql_state->stats_tokens == 2) { - /* not sure why but 1U comes up in Sqli attack + /* not sure why but 1U comes up in SQLi attack * likely part of parameter splitting/etc. * lots of reasons why "1 union" might be normal * input, so beep only if other SQLi things are present @@ -2080,7 +2082,7 @@ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state* sql_state) /* * for fingerprint like 'nc', only comments of /x are treated - * as SQL... ending comments of "--" and "#" are not sqli + * as SQL... ending comments of "--" and "#" are not SQLi */ if (sql_state->tokenvec[0].type == TYPE_BAREWORD && sql_state->tokenvec[1].type == TYPE_COMMENT && @@ -2090,7 +2092,7 @@ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state* sql_state) } /* - * if '1c' ends with '/x' then it's sqli + * if '1c' ends with '/x' then it's SQLi */ if (sql_state->tokenvec[0].type == TYPE_NUMBER && sql_state->tokenvec[1].type == TYPE_COMMENT && @@ -2113,13 +2115,13 @@ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state* sql_state) if (sql_state->tokenvec[0].type == TYPE_NUMBER && sql_state->tokenvec[1].type == TYPE_COMMENT) { if (sql_state->stats_tokens > 2) { - /* we have some folding going on, highly likely sqli */ + /* we have some folding going on, highly likely SQLi */ sql_state->reason = __LINE__; return TRUE; } /* * we check that next character after the number is either whitespace, - * or '/' or a '-' ==> sqli. + * or '/' or a '-' ==> SQLi. */ ch = sql_state->s[sql_state->tokenvec[0].len]; if ( ch <= 32 ) { @@ -2141,7 +2143,7 @@ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state* sql_state) } /* - * detect obvious sqli scans.. many people put '--' in plain text + * detect obvious SQLi scans.. many people put '--' in plain text * so only detect if input ends with '--', e.g. 1-- but not 1-- foo */ if ((sql_state->tokenvec[1].len > 2) @@ -2177,7 +2179,7 @@ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state* sql_state) } /* - * not sqli + * not SQLi */ sql_state->reason = __LINE__; return FALSE; @@ -2186,8 +2188,8 @@ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state* sql_state) streq(sql_state->fingerprint, "1&1") || streq(sql_state->fingerprint, "1&v") || streq(sql_state->fingerprint, "1&s")) { - /* 'sexy and 17' not sqli - * 'sexy and 17<18' sqli + /* 'sexy and 17' not SQLi + * 'sexy and 17<18' SQLi */ if (sql_state->stats_tokens == 3) { sql_state->reason = __LINE__; @@ -2243,7 +2245,7 @@ int libinjection_is_sqli(struct libinjection_sqli_state * sql_state) size_t slen = sql_state->slen; /* - * no input? not sqli + * no input? not SQLi */ if (slen == 0) { return FALSE; diff --git a/apache2/libinjection/libinjection_sqli.h b/apache2/libinjection/libinjection_sqli.h index 4f16db8dbf..749f7a44a3 100644 --- a/apache2/libinjection/libinjection_sqli.h +++ b/apache2/libinjection/libinjection_sqli.h @@ -1,14 +1,14 @@ /** - * Copyright 2012, 2013 Nick Galbreath + * Copyright 2012-2016 Nick Galbreath * nickg@client9.com - * BSD License -- see COPYING.txt for details + * BSD License -- see `COPYING.txt` for details * * https://libinjection.client9.com/ * */ -#ifndef _LIBINJECTION_SQLI_H -#define _LIBINJECTION_SQLI_H +#ifndef LIBINJECTION_SQLI_H +#define LIBINJECTION_SQLI_H #ifdef __cplusplus extern "C" { @@ -53,7 +53,7 @@ struct libinjection_sqli_token { /* count: * in type 'v', used for number of opening '@' - * but maybe unsed in other contexts + * but maybe used in other contexts */ int count; @@ -63,7 +63,7 @@ struct libinjection_sqli_token { typedef struct libinjection_sqli_token stoken_t; /** - * Pointer to function, takes cstr input, + * Pointer to function, takes c-string input, * returns '\0' for no match, else a char */ struct libinjection_sqli_state; @@ -97,7 +97,7 @@ struct libinjection_sqli_state { int flags; /* - * pos is index in string we are at when tokenizing + * pos is the index in the string during tokenization */ size_t pos; @@ -118,7 +118,7 @@ struct libinjection_sqli_state { /* * fingerprint pattern c-string * +1 for ending null - * Mimimum of 8 bytes to add gcc's -fstack-protector to work + * Minimum of 8 bytes to add gcc's -fstack-protector to work */ char fingerprint[8]; @@ -156,7 +156,7 @@ struct libinjection_sqli_state { */ int stats_comment_c; - /* '#' operators or mysql EOL comments found + /* '#' operators or MySQL EOL comments found * */ int stats_comment_hash; @@ -208,8 +208,8 @@ void libinjection_sqli_init(struct libinjection_sqli_state* sql_state, */ int libinjection_is_sqli(struct libinjection_sqli_state* sql_state); -/* FOR H@CKERS ONLY - * +/* FOR HACKERS ONLY + * provides deep hooks into the decision making process */ void libinjection_sqli_callback(struct libinjection_sqli_state* sql_state, ptr_lookup_fn fn, @@ -269,7 +269,7 @@ int libinjection_sqli_fold(struct libinjection_sqli_state * sql_state); * two functions. With this, you over-ride one part or the other. * * return libinjection_sqli_blacklist(sql_state) && - * libinject_sqli_not_whitelist(sql_state); + * libinjection_sqli_not_whitelist(sql_state); * * \param sql_state should be filled out after libinjection_sqli_fingerprint is called */ @@ -284,7 +284,7 @@ int libinjection_sqli_blacklist(struct libinjection_sqli_state* sql_state); /* Given a positive match for a pattern (i.e. pattern is SQLi), this function * does additional analysis to reduce false positives. * - * \return TRUE if sqli, false otherwise + * \return TRUE if SQLi, false otherwise */ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state * sql_state); @@ -292,4 +292,4 @@ int libinjection_sqli_not_whitelist(struct libinjection_sqli_state * sql_state); } #endif -#endif /* _LIBINJECTION_SQLI_H */ +#endif /* LIBINJECTION_SQLI_H */ diff --git a/apache2/libinjection/libinjection_sqli_data.h b/apache2/libinjection/libinjection_sqli_data.h index 8f3a2e0e23..14b969e3de 100644 --- a/apache2/libinjection/libinjection_sqli_data.h +++ b/apache2/libinjection/libinjection_sqli_data.h @@ -1,6 +1,6 @@ -#ifndef _LIBINJECTION_SQLI_DATA_H -#define _LIBINJECTION_SQLI_DATA_H +#ifndef LIBINJECTION_SQLI_DATA_H +#define LIBINJECTION_SQLI_DATA_H #include "libinjection.h" #include "libinjection_sqli.h" @@ -164,6 +164,134 @@ static const pt2Function char_parse_map[] = { &parse_char, /* 125 */ &parse_operator1, /* 126 */ &parse_white, /* 127 */ + &parse_word, /* 128 */ + &parse_word, /* 129 */ + &parse_word, /* 130 */ + &parse_word, /* 131 */ + &parse_word, /* 132 */ + &parse_word, /* 133 */ + &parse_word, /* 134 */ + &parse_word, /* 135 */ + &parse_word, /* 136 */ + &parse_word, /* 137 */ + &parse_word, /* 138 */ + &parse_word, /* 139 */ + &parse_word, /* 140 */ + &parse_word, /* 141 */ + &parse_word, /* 142 */ + &parse_word, /* 143 */ + &parse_word, /* 144 */ + &parse_word, /* 145 */ + &parse_word, /* 146 */ + &parse_word, /* 147 */ + &parse_word, /* 148 */ + &parse_word, /* 149 */ + &parse_word, /* 150 */ + &parse_word, /* 151 */ + &parse_word, /* 152 */ + &parse_word, /* 153 */ + &parse_word, /* 154 */ + &parse_word, /* 155 */ + &parse_word, /* 156 */ + &parse_word, /* 157 */ + &parse_word, /* 158 */ + &parse_word, /* 159 */ + &parse_white, /* 160 */ + &parse_word, /* 161 */ + &parse_word, /* 162 */ + &parse_word, /* 163 */ + &parse_word, /* 164 */ + &parse_word, /* 165 */ + &parse_word, /* 166 */ + &parse_word, /* 167 */ + &parse_word, /* 168 */ + &parse_word, /* 169 */ + &parse_word, /* 170 */ + &parse_word, /* 171 */ + &parse_word, /* 172 */ + &parse_word, /* 173 */ + &parse_word, /* 174 */ + &parse_word, /* 175 */ + &parse_word, /* 176 */ + &parse_word, /* 177 */ + &parse_word, /* 178 */ + &parse_word, /* 179 */ + &parse_word, /* 180 */ + &parse_word, /* 181 */ + &parse_word, /* 182 */ + &parse_word, /* 183 */ + &parse_word, /* 184 */ + &parse_word, /* 185 */ + &parse_word, /* 186 */ + &parse_word, /* 187 */ + &parse_word, /* 188 */ + &parse_word, /* 189 */ + &parse_word, /* 190 */ + &parse_word, /* 191 */ + &parse_word, /* 192 */ + &parse_word, /* 193 */ + &parse_word, /* 194 */ + &parse_word, /* 195 */ + &parse_word, /* 196 */ + &parse_word, /* 197 */ + &parse_word, /* 198 */ + &parse_word, /* 199 */ + &parse_word, /* 200 */ + &parse_word, /* 201 */ + &parse_word, /* 202 */ + &parse_word, /* 203 */ + &parse_word, /* 204 */ + &parse_word, /* 205 */ + &parse_word, /* 206 */ + &parse_word, /* 207 */ + &parse_word, /* 208 */ + &parse_word, /* 209 */ + &parse_word, /* 210 */ + &parse_word, /* 211 */ + &parse_word, /* 212 */ + &parse_word, /* 213 */ + &parse_word, /* 214 */ + &parse_word, /* 215 */ + &parse_word, /* 216 */ + &parse_word, /* 217 */ + &parse_word, /* 218 */ + &parse_word, /* 219 */ + &parse_word, /* 220 */ + &parse_word, /* 221 */ + &parse_word, /* 222 */ + &parse_word, /* 223 */ + &parse_word, /* 224 */ + &parse_word, /* 225 */ + &parse_word, /* 226 */ + &parse_word, /* 227 */ + &parse_word, /* 228 */ + &parse_word, /* 229 */ + &parse_word, /* 230 */ + &parse_word, /* 231 */ + &parse_word, /* 232 */ + &parse_word, /* 233 */ + &parse_word, /* 234 */ + &parse_word, /* 235 */ + &parse_word, /* 236 */ + &parse_word, /* 237 */ + &parse_word, /* 238 */ + &parse_word, /* 239 */ + &parse_word, /* 240 */ + &parse_word, /* 241 */ + &parse_word, /* 242 */ + &parse_word, /* 243 */ + &parse_word, /* 244 */ + &parse_word, /* 245 */ + &parse_word, /* 246 */ + &parse_word, /* 247 */ + &parse_word, /* 248 */ + &parse_word, /* 249 */ + &parse_word, /* 250 */ + &parse_word, /* 251 */ + &parse_word, /* 252 */ + &parse_word, /* 253 */ + &parse_word, /* 254 */ + &parse_word, /* 255 */ }; static const keyword_t sql_keywords[] = { @@ -395,7 +523,6 @@ static const keyword_t sql_keywords[] = { {"01&1KV", 'F'}, {"01&1O(", 'F'}, {"01&1OF", 'F'}, - {"01&1OO", 'F'}, {"01&1OS", 'F'}, {"01&1OV", 'F'}, {"01&1TN", 'F'}, @@ -522,7 +649,6 @@ static const keyword_t sql_keywords[] = { {"01&S1", 'F'}, {"01&S1;", 'F'}, {"01&S1C", 'F'}, - {"01&S1O", 'F'}, {"01&S;", 'F'}, {"01&S;C", 'F'}, {"01&S;E", 'F'}, @@ -547,7 +673,6 @@ static const keyword_t sql_keywords[] = { {"01&SO1", 'F'}, {"01&SOF", 'F'}, {"01&SON", 'F'}, - {"01&SOO", 'F'}, {"01&SOS", 'F'}, {"01&SOV", 'F'}, {"01&STN", 'F'}, @@ -593,7 +718,6 @@ static const keyword_t sql_keywords[] = { {"01&VKV", 'F'}, {"01&VO(", 'F'}, {"01&VOF", 'F'}, - {"01&VOO", 'F'}, {"01&VOS", 'F'}, {"01&VS", 'F'}, {"01&VS;", 'F'}, @@ -730,6 +854,7 @@ static const keyword_t sql_keywords[] = { {"01)ESO", 'F'}, {"01)EVC", 'F'}, {"01)EVO", 'F'}, + {"01)F(F", 'F'}, {"01)K(1", 'F'}, {"01)K(F", 'F'}, {"01)K(N", 'F'}, @@ -882,6 +1007,7 @@ static const keyword_t sql_keywords[] = { {"01;T(N", 'F'}, {"01;T(S", 'F'}, {"01;T(V", 'F'}, + {"01;T1(", 'F'}, {"01;T1,", 'F'}, {"01;T1;", 'F'}, {"01;T1C", 'F'}, @@ -913,6 +1039,7 @@ static const keyword_t sql_keywords[] = { {"01;TNT", 'F'}, {"01;TNV", 'F'}, {"01;TO(", 'F'}, + {"01;TS(", 'F'}, {"01;TS,", 'F'}, {"01;TS;", 'F'}, {"01;TSC", 'F'}, @@ -920,12 +1047,8 @@ static const keyword_t sql_keywords[] = { {"01;TSK", 'F'}, {"01;TSO", 'F'}, {"01;TST", 'F'}, - {"01;TT(", 'F'}, - {"01;TT1", 'F'}, - {"01;TTF", 'F'}, {"01;TTN", 'F'}, - {"01;TTS", 'F'}, - {"01;TTV", 'F'}, + {"01;TV(", 'F'}, {"01;TV,", 'F'}, {"01;TV;", 'F'}, {"01;TVC", 'F'}, @@ -967,7 +1090,6 @@ static const keyword_t sql_keywords[] = { {"01B(1)", 'F'}, {"01B(1O", 'F'}, {"01B(F(", 'F'}, - {"01B(N)", 'F'}, {"01B(NO", 'F'}, {"01B(S)", 'F'}, {"01B(SO", 'F'}, @@ -1116,11 +1238,18 @@ static const keyword_t sql_keywords[] = { {"01E(SO", 'F'}, {"01E(V)", 'F'}, {"01E(VO", 'F'}, + {"01E1;T", 'F'}, {"01E1C", 'F'}, {"01E1O(", 'F'}, {"01E1OF", 'F'}, {"01E1OS", 'F'}, {"01E1OV", 'F'}, + {"01E1T(", 'F'}, + {"01E1T1", 'F'}, + {"01E1TF", 'F'}, + {"01E1TN", 'F'}, + {"01E1TS", 'F'}, + {"01E1TV", 'F'}, {"01E1UE", 'F'}, {"01EF()", 'F'}, {"01EF(1", 'F'}, @@ -1134,35 +1263,50 @@ static const keyword_t sql_keywords[] = { {"01EK(N", 'F'}, {"01EK(S", 'F'}, {"01EK(V", 'F'}, + {"01EK1;", 'F'}, {"01EK1C", 'F'}, {"01EK1O", 'F'}, + {"01EK1T", 'F'}, {"01EK1U", 'F'}, {"01EKF(", 'F'}, + {"01EKN;", 'F'}, {"01EKNC", 'F'}, {"01EKNE", 'F'}, + {"01EKNT", 'F'}, {"01EKNU", 'F'}, {"01EKOK", 'F'}, + {"01EKS;", 'F'}, {"01EKSC", 'F'}, {"01EKSO", 'F'}, + {"01EKST", 'F'}, {"01EKSU", 'F'}, {"01EKU(", 'F'}, {"01EKU1", 'F'}, {"01EKUE", 'F'}, {"01EKUF", 'F'}, - {"01EKUN", 'F'}, {"01EKUS", 'F'}, {"01EKUV", 'F'}, + {"01EKV;", 'F'}, {"01EKVC", 'F'}, {"01EKVO", 'F'}, + {"01EKVT", 'F'}, {"01EKVU", 'F'}, + {"01EN;T", 'F'}, {"01ENC", 'F'}, {"01ENEN", 'F'}, {"01ENO(", 'F'}, {"01ENOF", 'F'}, {"01ENOS", 'F'}, {"01ENOV", 'F'}, + {"01ENT(", 'F'}, + {"01ENT1", 'F'}, + {"01ENTF", 'F'}, + {"01ENTN", 'F'}, + {"01ENTS", 'F'}, + {"01ENTV", 'F'}, {"01ENUE", 'F'}, {"01EOKN", 'F'}, + {"01ES;T", 'F'}, {"01ESC", 'F'}, {"01ESO(", 'F'}, {"01ESO1", 'F'}, @@ -1170,6 +1314,12 @@ static const keyword_t sql_keywords[] = { {"01ESON", 'F'}, {"01ESOS", 'F'}, {"01ESOV", 'F'}, + {"01EST(", 'F'}, + {"01EST1", 'F'}, + {"01ESTF", 'F'}, + {"01ESTN", 'F'}, + {"01ESTS", 'F'}, + {"01ESTV", 'F'}, {"01ESUE", 'F'}, {"01EU(1", 'F'}, {"01EU(F", 'F'}, @@ -1182,19 +1332,23 @@ static const keyword_t sql_keywords[] = { {"01EUEF", 'F'}, {"01EUEK", 'F'}, {"01EUF(", 'F'}, - {"01EUN,", 'F'}, - {"01EUNC", 'F'}, - {"01EUNO", 'F'}, {"01EUS,", 'F'}, {"01EUSC", 'F'}, {"01EUSO", 'F'}, {"01EUV,", 'F'}, {"01EUVC", 'F'}, {"01EUVO", 'F'}, + {"01EV;T", 'F'}, {"01EVC", 'F'}, {"01EVO(", 'F'}, {"01EVOF", 'F'}, {"01EVOS", 'F'}, + {"01EVT(", 'F'}, + {"01EVT1", 'F'}, + {"01EVTF", 'F'}, + {"01EVTN", 'F'}, + {"01EVTS", 'F'}, + {"01EVTV", 'F'}, {"01EVUE", 'F'}, {"01F()1", 'F'}, {"01F()F", 'F'}, @@ -1251,6 +1405,8 @@ static const keyword_t sql_keywords[] = { {"01K)EN", 'F'}, {"01K)ES", 'F'}, {"01K)EV", 'F'}, + {"01K)F(", 'F'}, + {"01K)O(", 'F'}, {"01K)OF", 'F'}, {"01K)UE", 'F'}, {"01K1", 'F'}, @@ -1387,7 +1543,6 @@ static const keyword_t sql_keywords[] = { {"01KVU(", 'F'}, {"01KVUE", 'F'}, {"01N&F(", 'F'}, - {"01N(1)", 'F'}, {"01N(1O", 'F'}, {"01N(F(", 'F'}, {"01N(S)", 'F'}, @@ -1410,12 +1565,6 @@ static const keyword_t sql_keywords[] = { {"01NESO", 'F'}, {"01NEVC", 'F'}, {"01NEVO", 'F'}, - {"01NF()", 'F'}, - {"01NF(1", 'F'}, - {"01NF(F", 'F'}, - {"01NF(N", 'F'}, - {"01NF(S", 'F'}, - {"01NF(V", 'F'}, {"01NU(E", 'F'}, {"01NUE", 'F'}, {"01NUE(", 'F'}, @@ -1437,6 +1586,7 @@ static const keyword_t sql_keywords[] = { {"01O(EF", 'F'}, {"01O(EK", 'F'}, {"01O(EN", 'F'}, + {"01O(EO", 'F'}, {"01O(ES", 'F'}, {"01O(EV", 'F'}, {"01O(F(", 'F'}, @@ -1502,6 +1652,7 @@ static const keyword_t sql_keywords[] = { {"01OS)B", 'F'}, {"01OS)C", 'F'}, {"01OS)E", 'F'}, + {"01OS)F", 'F'}, {"01OS)K", 'F'}, {"01OS)O", 'F'}, {"01OS)U", 'F'}, @@ -1550,6 +1701,14 @@ static const keyword_t sql_keywords[] = { {"01OSKS", 'F'}, {"01OSKU", 'F'}, {"01OSKV", 'F'}, + {"01OST(", 'F'}, + {"01OST1", 'F'}, + {"01OSTE", 'F'}, + {"01OSTF", 'F'}, + {"01OSTN", 'F'}, + {"01OSTS", 'F'}, + {"01OSTT", 'F'}, + {"01OSTV", 'F'}, {"01OSU", 'F'}, {"01OSU(", 'F'}, {"01OSU1", 'F'}, @@ -1558,7 +1717,6 @@ static const keyword_t sql_keywords[] = { {"01OSUE", 'F'}, {"01OSUF", 'F'}, {"01OSUK", 'F'}, - {"01OSUN", 'F'}, {"01OSUO", 'F'}, {"01OSUS", 'F'}, {"01OSUT", 'F'}, @@ -1589,6 +1747,7 @@ static const keyword_t sql_keywords[] = { {"01OV)B", 'F'}, {"01OV)C", 'F'}, {"01OV)E", 'F'}, + {"01OV)F", 'F'}, {"01OV)K", 'F'}, {"01OV)O", 'F'}, {"01OV)U", 'F'}, @@ -1642,6 +1801,14 @@ static const keyword_t sql_keywords[] = { {"01OVSO", 'F'}, {"01OVSU", 'F'}, {"01OVSV", 'F'}, + {"01OVT(", 'F'}, + {"01OVT1", 'F'}, + {"01OVTE", 'F'}, + {"01OVTF", 'F'}, + {"01OVTN", 'F'}, + {"01OVTS", 'F'}, + {"01OVTT", 'F'}, + {"01OVTV", 'F'}, {"01OVU", 'F'}, {"01OVU(", 'F'}, {"01OVU1", 'F'}, @@ -1650,7 +1817,6 @@ static const keyword_t sql_keywords[] = { {"01OVUE", 'F'}, {"01OVUF", 'F'}, {"01OVUK", 'F'}, - {"01OVUN", 'F'}, {"01OVUO", 'F'}, {"01OVUS", 'F'}, {"01OVUT", 'F'}, @@ -1672,6 +1838,96 @@ static const keyword_t sql_keywords[] = { {"01SVO(", 'F'}, {"01SVOF", 'F'}, {"01SVOS", 'F'}, + {"01T(1)", 'F'}, + {"01T(1O", 'F'}, + {"01T(F(", 'F'}, + {"01T(N)", 'F'}, + {"01T(NO", 'F'}, + {"01T(S)", 'F'}, + {"01T(SO", 'F'}, + {"01T(V)", 'F'}, + {"01T(VO", 'F'}, + {"01T1(F", 'F'}, + {"01T1O(", 'F'}, + {"01T1OF", 'F'}, + {"01T1OS", 'F'}, + {"01T1OV", 'F'}, + {"01TE(1", 'F'}, + {"01TE(F", 'F'}, + {"01TE(N", 'F'}, + {"01TE(S", 'F'}, + {"01TE(V", 'F'}, + {"01TE1N", 'F'}, + {"01TE1O", 'F'}, + {"01TEF(", 'F'}, + {"01TEK(", 'F'}, + {"01TEK1", 'F'}, + {"01TEKF", 'F'}, + {"01TEKN", 'F'}, + {"01TEKS", 'F'}, + {"01TEKV", 'F'}, + {"01TENN", 'F'}, + {"01TENO", 'F'}, + {"01TESN", 'F'}, + {"01TESO", 'F'}, + {"01TEVN", 'F'}, + {"01TEVO", 'F'}, + {"01TF()", 'F'}, + {"01TF(1", 'F'}, + {"01TF(F", 'F'}, + {"01TF(N", 'F'}, + {"01TF(S", 'F'}, + {"01TF(V", 'F'}, + {"01TN(1", 'F'}, + {"01TN(F", 'F'}, + {"01TN(S", 'F'}, + {"01TN(V", 'F'}, + {"01TN1C", 'F'}, + {"01TN1O", 'F'}, + {"01TN;E", 'F'}, + {"01TN;N", 'F'}, + {"01TN;T", 'F'}, + {"01TNE(", 'F'}, + {"01TNE1", 'F'}, + {"01TNEF", 'F'}, + {"01TNEN", 'F'}, + {"01TNES", 'F'}, + {"01TNEV", 'F'}, + {"01TNF(", 'F'}, + {"01TNKN", 'F'}, + {"01TNN:", 'F'}, + {"01TNNC", 'F'}, + {"01TNNO", 'F'}, + {"01TNO(", 'F'}, + {"01TNOF", 'F'}, + {"01TNOS", 'F'}, + {"01TNOV", 'F'}, + {"01TNSC", 'F'}, + {"01TNSO", 'F'}, + {"01TNT(", 'F'}, + {"01TNT1", 'F'}, + {"01TNTF", 'F'}, + {"01TNTN", 'F'}, + {"01TNTS", 'F'}, + {"01TNTV", 'F'}, + {"01TNVC", 'F'}, + {"01TNVO", 'F'}, + {"01TS(F", 'F'}, + {"01TSO(", 'F'}, + {"01TSO1", 'F'}, + {"01TSOF", 'F'}, + {"01TSON", 'F'}, + {"01TSOS", 'F'}, + {"01TSOV", 'F'}, + {"01TTNE", 'F'}, + {"01TTNK", 'F'}, + {"01TTNN", 'F'}, + {"01TTNT", 'F'}, + {"01TV(1", 'F'}, + {"01TV(F", 'F'}, + {"01TVO(", 'F'}, + {"01TVOF", 'F'}, + {"01TVOS", 'F'}, {"01U", 'F'}, {"01U(1)", 'F'}, {"01U(1O", 'F'}, @@ -1757,7 +2013,6 @@ static const keyword_t sql_keywords[] = { {"01UENU", 'F'}, {"01UEOK", 'F'}, {"01UEON", 'F'}, - {"01UEOO", 'F'}, {"01UES", 'F'}, {"01UES&", 'F'}, {"01UES(", 'F'}, @@ -1793,30 +2048,6 @@ static const keyword_t sql_keywords[] = { {"01UF(S", 'F'}, {"01UF(V", 'F'}, {"01UK(E", 'F'}, - {"01UN(1", 'F'}, - {"01UN(F", 'F'}, - {"01UN(S", 'F'}, - {"01UN(V", 'F'}, - {"01UN,(", 'F'}, - {"01UN,F", 'F'}, - {"01UN1(", 'F'}, - {"01UN1,", 'F'}, - {"01UN1O", 'F'}, - {"01UNC", 'F'}, - {"01UNE(", 'F'}, - {"01UNE1", 'F'}, - {"01UNEF", 'F'}, - {"01UNEN", 'F'}, - {"01UNES", 'F'}, - {"01UNEV", 'F'}, - {"01UNF(", 'F'}, - {"01UNO(", 'F'}, - {"01UNOF", 'F'}, - {"01UNOS", 'F'}, - {"01UNOV", 'F'}, - {"01UNS(", 'F'}, - {"01UNS,", 'F'}, - {"01UNSO", 'F'}, {"01UO(E", 'F'}, {"01UON(", 'F'}, {"01UON1", 'F'}, @@ -1834,7 +2065,9 @@ static const keyword_t sql_keywords[] = { {"01UTN(", 'F'}, {"01UTN1", 'F'}, {"01UTNF", 'F'}, + {"01UTNN", 'F'}, {"01UTNS", 'F'}, + {"01UTNV", 'F'}, {"01UV,(", 'F'}, {"01UV,F", 'F'}, {"01UVC", 'F'}, @@ -1986,7 +2219,6 @@ static const keyword_t sql_keywords[] = { {"0E(S)V", 'F'}, {"0E(S,F", 'F'}, {"0E(S1)", 'F'}, - {"0E(S1O", 'F'}, {"0E(SF(", 'F'}, {"0E(SO(", 'F'}, {"0E(SO1", 'F'}, @@ -2270,7 +2502,6 @@ static const keyword_t sql_keywords[] = { {"0EK1N)", 'F'}, {"0EK1N;", 'F'}, {"0EK1NC", 'F'}, - {"0EK1NF", 'F'}, {"0EK1NK", 'F'}, {"0EK1O(", 'F'}, {"0EK1OF", 'F'}, @@ -2321,7 +2552,6 @@ static const keyword_t sql_keywords[] = { {"0EKN1", 'F'}, {"0EKN1;", 'F'}, {"0EKN1C", 'F'}, - {"0EKN1F", 'F'}, {"0EKN1K", 'F'}, {"0EKN1O", 'F'}, {"0EKN;(", 'F'}, @@ -2367,7 +2597,6 @@ static const keyword_t sql_keywords[] = { {"0EKS1C", 'F'}, {"0EKS1F", 'F'}, {"0EKS1K", 'F'}, - {"0EKS1O", 'F'}, {"0EKS;(", 'F'}, {"0EKSB(", 'F'}, {"0EKSB1", 'F'}, @@ -2486,7 +2715,6 @@ static const keyword_t sql_keywords[] = { {"0EN,F(", 'F'}, {"0EN1;", 'F'}, {"0EN1;C", 'F'}, - {"0EN1C", 'F'}, {"0EN1O(", 'F'}, {"0EN1OF", 'F'}, {"0EN1OS", 'F'}, @@ -2620,10 +2848,6 @@ static const keyword_t sql_keywords[] = { {"0ES1;", 'F'}, {"0ES1;C", 'F'}, {"0ES1C", 'F'}, - {"0ES1O(", 'F'}, - {"0ES1OF", 'F'}, - {"0ES1OS", 'F'}, - {"0ES1OV", 'F'}, {"0ES;(E", 'F'}, {"0ESB(1", 'F'}, {"0ESB(F", 'F'}, @@ -2942,6 +3166,14 @@ static const keyword_t sql_keywords[] = { {"0F()SO", 'F'}, {"0F()SU", 'F'}, {"0F()SV", 'F'}, + {"0F()T(", 'F'}, + {"0F()T1", 'F'}, + {"0F()TE", 'F'}, + {"0F()TF", 'F'}, + {"0F()TN", 'F'}, + {"0F()TS", 'F'}, + {"0F()TT", 'F'}, + {"0F()TV", 'F'}, {"0F()U", 'F'}, {"0F()U(", 'F'}, {"0F()U1", 'F'}, @@ -2950,7 +3182,6 @@ static const keyword_t sql_keywords[] = { {"0F()UE", 'F'}, {"0F()UF", 'F'}, {"0F()UK", 'F'}, - {"0F()UN", 'F'}, {"0F()UO", 'F'}, {"0F()US", 'F'}, {"0F()UT", 'F'}, @@ -2980,6 +3211,7 @@ static const keyword_t sql_keywords[] = { {"0F(1)N", 'F'}, {"0F(1)O", 'F'}, {"0F(1)S", 'F'}, + {"0F(1)T", 'F'}, {"0F(1)U", 'F'}, {"0F(1)V", 'F'}, {"0F(1,(", 'F'}, @@ -3003,12 +3235,14 @@ static const keyword_t sql_keywords[] = { {"0F(EK1", 'F'}, {"0F(EKF", 'F'}, {"0F(EKN", 'F'}, + {"0F(EKO", 'F'}, {"0F(EKS", 'F'}, {"0F(EKV", 'F'}, {"0F(EN&", 'F'}, {"0F(EN)", 'F'}, {"0F(ENK", 'F'}, {"0F(ENO", 'F'}, + {"0F(EOK", 'F'}, {"0F(ES&", 'F'}, {"0F(ES)", 'F'}, {"0F(ESK", 'F'}, @@ -3047,6 +3281,7 @@ static const keyword_t sql_keywords[] = { {"0F(N)N", 'F'}, {"0F(N)O", 'F'}, {"0F(N)S", 'F'}, + {"0F(N)T", 'F'}, {"0F(N)U", 'F'}, {"0F(N)V", 'F'}, {"0F(N,(", 'F'}, @@ -3075,6 +3310,7 @@ static const keyword_t sql_keywords[] = { {"0F(S)N", 'F'}, {"0F(S)O", 'F'}, {"0F(S)S", 'F'}, + {"0F(S)T", 'F'}, {"0F(S)U", 'F'}, {"0F(S)V", 'F'}, {"0F(S,(", 'F'}, @@ -3107,6 +3343,7 @@ static const keyword_t sql_keywords[] = { {"0F(V)N", 'F'}, {"0F(V)O", 'F'}, {"0F(V)S", 'F'}, + {"0F(V)T", 'F'}, {"0F(V)U", 'F'}, {"0F(V)V", 'F'}, {"0F(V,(", 'F'}, @@ -3395,7 +3632,6 @@ static const keyword_t sql_keywords[] = { {"0N&1KV", 'F'}, {"0N&1O(", 'F'}, {"0N&1OF", 'F'}, - {"0N&1OO", 'F'}, {"0N&1OS", 'F'}, {"0N&1OV", 'F'}, {"0N&1TN", 'F'}, @@ -3481,7 +3717,6 @@ static const keyword_t sql_keywords[] = { {"0N&NB(", 'F'}, {"0N&NB1", 'F'}, {"0N&NBF", 'F'}, - {"0N&NBN", 'F'}, {"0N&NBS", 'F'}, {"0N&NBV", 'F'}, {"0N&NF(", 'F'}, @@ -3512,7 +3747,6 @@ static const keyword_t sql_keywords[] = { {"0N&S1", 'F'}, {"0N&S1;", 'F'}, {"0N&S1C", 'F'}, - {"0N&S1O", 'F'}, {"0N&S;", 'F'}, {"0N&S;C", 'F'}, {"0N&S;E", 'F'}, @@ -3537,7 +3771,6 @@ static const keyword_t sql_keywords[] = { {"0N&SO1", 'F'}, {"0N&SOF", 'F'}, {"0N&SON", 'F'}, - {"0N&SOO", 'F'}, {"0N&SOS", 'F'}, {"0N&SOV", 'F'}, {"0N&STN", 'F'}, @@ -3583,7 +3816,6 @@ static const keyword_t sql_keywords[] = { {"0N&VKV", 'F'}, {"0N&VO(", 'F'}, {"0N&VOF", 'F'}, - {"0N&VOO", 'F'}, {"0N&VOS", 'F'}, {"0N&VS", 'F'}, {"0N&VS;", 'F'}, @@ -3595,48 +3827,6 @@ static const keyword_t sql_keywords[] = { {"0N&VU;", 'F'}, {"0N&VUC", 'F'}, {"0N&VUE", 'F'}, - {"0N(1)F", 'F'}, - {"0N(1)O", 'F'}, - {"0N(1)U", 'F'}, - {"0N(1)V", 'F'}, - {"0N(1O(", 'F'}, - {"0N(1OF", 'F'}, - {"0N(1OS", 'F'}, - {"0N(1OV", 'F'}, - {"0N(EF(", 'F'}, - {"0N(EKF", 'F'}, - {"0N(EKN", 'F'}, - {"0N(ENK", 'F'}, - {"0N(F()", 'F'}, - {"0N(F(1", 'F'}, - {"0N(F(F", 'F'}, - {"0N(F(N", 'F'}, - {"0N(F(S", 'F'}, - {"0N(F(V", 'F'}, - {"0N(S)1", 'F'}, - {"0N(S)F", 'F'}, - {"0N(S)N", 'F'}, - {"0N(S)O", 'F'}, - {"0N(S)S", 'F'}, - {"0N(S)U", 'F'}, - {"0N(S)V", 'F'}, - {"0N(SO(", 'F'}, - {"0N(SO1", 'F'}, - {"0N(SOF", 'F'}, - {"0N(SON", 'F'}, - {"0N(SOS", 'F'}, - {"0N(SOV", 'F'}, - {"0N(U(E", 'F'}, - {"0N(V)1", 'F'}, - {"0N(V)F", 'F'}, - {"0N(V)N", 'F'}, - {"0N(V)O", 'F'}, - {"0N(V)S", 'F'}, - {"0N(V)U", 'F'}, - {"0N(V)V", 'F'}, - {"0N(VO(", 'F'}, - {"0N(VOF", 'F'}, - {"0N(VOS", 'F'}, {"0N)&(1", 'F'}, {"0N)&(E", 'F'}, {"0N)&(F", 'F'}, @@ -3756,6 +3946,7 @@ static const keyword_t sql_keywords[] = { {"0N)ESO", 'F'}, {"0N)EVC", 'F'}, {"0N)EVO", 'F'}, + {"0N)F(F", 'F'}, {"0N)K(1", 'F'}, {"0N)K(F", 'F'}, {"0N)K(N", 'F'}, @@ -3807,7 +3998,6 @@ static const keyword_t sql_keywords[] = { {"0N)O1K", 'F'}, {"0N)O1U", 'F'}, {"0N)OF(", 'F'}, - {"0N)ON", 'F'}, {"0N)ON&", 'F'}, {"0N)ON)", 'F'}, {"0N)ON;", 'F'}, @@ -3861,12 +4051,6 @@ static const keyword_t sql_keywords[] = { {"0N,F(N", 'F'}, {"0N,F(S", 'F'}, {"0N,F(V", 'F'}, - {"0N1F()", 'F'}, - {"0N1F(1", 'F'}, - {"0N1F(F", 'F'}, - {"0N1F(N", 'F'}, - {"0N1F(S", 'F'}, - {"0N1F(V", 'F'}, {"0N1O(1", 'F'}, {"0N1O(F", 'F'}, {"0N1O(N", 'F'}, @@ -3942,6 +4126,7 @@ static const keyword_t sql_keywords[] = { {"0N;T(N", 'F'}, {"0N;T(S", 'F'}, {"0N;T(V", 'F'}, + {"0N;T1(", 'F'}, {"0N;T1,", 'F'}, {"0N;T1;", 'F'}, {"0N;T1C", 'F'}, @@ -3956,7 +4141,6 @@ static const keyword_t sql_keywords[] = { {"0N;TK1", 'F'}, {"0N;TKF", 'F'}, {"0N;TKK", 'F'}, - {"0N;TKN", 'F'}, {"0N;TKO", 'F'}, {"0N;TKS", 'F'}, {"0N;TKV", 'F'}, @@ -3974,6 +4158,7 @@ static const keyword_t sql_keywords[] = { {"0N;TNT", 'F'}, {"0N;TNV", 'F'}, {"0N;TO(", 'F'}, + {"0N;TS(", 'F'}, {"0N;TS,", 'F'}, {"0N;TS;", 'F'}, {"0N;TSC", 'F'}, @@ -3981,12 +4166,8 @@ static const keyword_t sql_keywords[] = { {"0N;TSK", 'F'}, {"0N;TSO", 'F'}, {"0N;TST", 'F'}, - {"0N;TT(", 'F'}, - {"0N;TT1", 'F'}, - {"0N;TTF", 'F'}, {"0N;TTN", 'F'}, - {"0N;TTS", 'F'}, - {"0N;TTV", 'F'}, + {"0N;TV(", 'F'}, {"0N;TV,", 'F'}, {"0N;TV;", 'F'}, {"0N;TVC", 'F'}, @@ -4028,7 +4209,6 @@ static const keyword_t sql_keywords[] = { {"0NB(1)", 'F'}, {"0NB(1O", 'F'}, {"0NB(F(", 'F'}, - {"0NB(N)", 'F'}, {"0NB(NO", 'F'}, {"0NB(S)", 'F'}, {"0NB(SO", 'F'}, @@ -4176,11 +4356,18 @@ static const keyword_t sql_keywords[] = { {"0NE(SO", 'F'}, {"0NE(V)", 'F'}, {"0NE(VO", 'F'}, + {"0NE1;T", 'F'}, {"0NE1C", 'F'}, {"0NE1O(", 'F'}, {"0NE1OF", 'F'}, {"0NE1OS", 'F'}, {"0NE1OV", 'F'}, + {"0NE1T(", 'F'}, + {"0NE1T1", 'F'}, + {"0NE1TF", 'F'}, + {"0NE1TN", 'F'}, + {"0NE1TS", 'F'}, + {"0NE1TV", 'F'}, {"0NE1UE", 'F'}, {"0NEF()", 'F'}, {"0NEF(1", 'F'}, @@ -4188,13 +4375,20 @@ static const keyword_t sql_keywords[] = { {"0NEF(N", 'F'}, {"0NEF(S", 'F'}, {"0NEF(V", 'F'}, - {"0NENC", 'F'}, + {"0NEN;T", 'F'}, {"0NENO(", 'F'}, {"0NENOF", 'F'}, {"0NENOS", 'F'}, {"0NENOV", 'F'}, + {"0NENT(", 'F'}, + {"0NENT1", 'F'}, + {"0NENTF", 'F'}, + {"0NENTN", 'F'}, + {"0NENTS", 'F'}, + {"0NENTV", 'F'}, {"0NENUE", 'F'}, {"0NEOKN", 'F'}, + {"0NES;T", 'F'}, {"0NESC", 'F'}, {"0NESO(", 'F'}, {"0NESO1", 'F'}, @@ -4202,6 +4396,12 @@ static const keyword_t sql_keywords[] = { {"0NESON", 'F'}, {"0NESOS", 'F'}, {"0NESOV", 'F'}, + {"0NEST(", 'F'}, + {"0NEST1", 'F'}, + {"0NESTF", 'F'}, + {"0NESTN", 'F'}, + {"0NESTS", 'F'}, + {"0NESTV", 'F'}, {"0NESUE", 'F'}, {"0NEU(1", 'F'}, {"0NEU(F", 'F'}, @@ -4214,19 +4414,23 @@ static const keyword_t sql_keywords[] = { {"0NEUEF", 'F'}, {"0NEUEK", 'F'}, {"0NEUF(", 'F'}, - {"0NEUN,", 'F'}, - {"0NEUNC", 'F'}, - {"0NEUNO", 'F'}, {"0NEUS,", 'F'}, {"0NEUSC", 'F'}, {"0NEUSO", 'F'}, {"0NEUV,", 'F'}, {"0NEUVC", 'F'}, {"0NEUVO", 'F'}, + {"0NEV;T", 'F'}, {"0NEVC", 'F'}, {"0NEVO(", 'F'}, {"0NEVOF", 'F'}, {"0NEVOS", 'F'}, + {"0NEVT(", 'F'}, + {"0NEVT1", 'F'}, + {"0NEVTF", 'F'}, + {"0NEVTN", 'F'}, + {"0NEVTS", 'F'}, + {"0NEVTV", 'F'}, {"0NEVUE", 'F'}, {"0NF()1", 'F'}, {"0NF()F", 'F'}, @@ -4237,7 +4441,6 @@ static const keyword_t sql_keywords[] = { {"0NF()U", 'F'}, {"0NF()V", 'F'}, {"0NF(1)", 'F'}, - {"0NF(1N", 'F'}, {"0NF(1O", 'F'}, {"0NF(E(", 'F'}, {"0NF(E1", 'F'}, @@ -4247,7 +4450,6 @@ static const keyword_t sql_keywords[] = { {"0NF(ES", 'F'}, {"0NF(EV", 'F'}, {"0NF(F(", 'F'}, - {"0NF(N)", 'F'}, {"0NF(N,", 'F'}, {"0NF(NO", 'F'}, {"0NF(S)", 'F'}, @@ -4257,7 +4459,6 @@ static const keyword_t sql_keywords[] = { {"0NK(1)", 'F'}, {"0NK(1O", 'F'}, {"0NK(F(", 'F'}, - {"0NK(N)", 'F'}, {"0NK(NO", 'F'}, {"0NK(S)", 'F'}, {"0NK(SO", 'F'}, @@ -4284,6 +4485,8 @@ static const keyword_t sql_keywords[] = { {"0NK)EN", 'F'}, {"0NK)ES", 'F'}, {"0NK)EV", 'F'}, + {"0NK)F(", 'F'}, + {"0NK)O(", 'F'}, {"0NK)OF", 'F'}, {"0NK)UE", 'F'}, {"0NK1", 'F'}, @@ -4337,7 +4540,6 @@ static const keyword_t sql_keywords[] = { {"0NKNBN", 'F'}, {"0NKNBS", 'F'}, {"0NKNBV", 'F'}, - {"0NKNC", 'F'}, {"0NKNE(", 'F'}, {"0NKNE1", 'F'}, {"0NKNEF", 'F'}, @@ -4425,6 +4627,7 @@ static const keyword_t sql_keywords[] = { {"0NO(EF", 'F'}, {"0NO(EK", 'F'}, {"0NO(EN", 'F'}, + {"0NO(EO", 'F'}, {"0NO(ES", 'F'}, {"0NO(EV", 'F'}, {"0NO(F(", 'F'}, @@ -4448,11 +4651,6 @@ static const keyword_t sql_keywords[] = { {"0NOF(S", 'F'}, {"0NOF(V", 'F'}, {"0NOK&(", 'F'}, - {"0NOK&1", 'F'}, - {"0NOK&F", 'F'}, - {"0NOK&N", 'F'}, - {"0NOK&S", 'F'}, - {"0NOK&V", 'F'}, {"0NOK(1", 'F'}, {"0NOK(F", 'F'}, {"0NOK(N", 'F'}, @@ -4490,6 +4688,7 @@ static const keyword_t sql_keywords[] = { {"0NOS)B", 'F'}, {"0NOS)C", 'F'}, {"0NOS)E", 'F'}, + {"0NOS)F", 'F'}, {"0NOS)K", 'F'}, {"0NOS)O", 'F'}, {"0NOS)U", 'F'}, @@ -4498,7 +4697,6 @@ static const keyword_t sql_keywords[] = { {"0NOS1(", 'F'}, {"0NOS1F", 'F'}, {"0NOS1N", 'F'}, - {"0NOS1O", 'F'}, {"0NOS1S", 'F'}, {"0NOS1U", 'F'}, {"0NOS1V", 'F'}, @@ -4538,6 +4736,14 @@ static const keyword_t sql_keywords[] = { {"0NOSKS", 'F'}, {"0NOSKU", 'F'}, {"0NOSKV", 'F'}, + {"0NOST(", 'F'}, + {"0NOST1", 'F'}, + {"0NOSTE", 'F'}, + {"0NOSTF", 'F'}, + {"0NOSTN", 'F'}, + {"0NOSTS", 'F'}, + {"0NOSTT", 'F'}, + {"0NOSTV", 'F'}, {"0NOSU", 'F'}, {"0NOSU(", 'F'}, {"0NOSU1", 'F'}, @@ -4546,7 +4752,6 @@ static const keyword_t sql_keywords[] = { {"0NOSUE", 'F'}, {"0NOSUF", 'F'}, {"0NOSUK", 'F'}, - {"0NOSUN", 'F'}, {"0NOSUO", 'F'}, {"0NOSUS", 'F'}, {"0NOSUT", 'F'}, @@ -4576,6 +4781,7 @@ static const keyword_t sql_keywords[] = { {"0NOV)B", 'F'}, {"0NOV)C", 'F'}, {"0NOV)E", 'F'}, + {"0NOV)F", 'F'}, {"0NOV)K", 'F'}, {"0NOV)O", 'F'}, {"0NOV)U", 'F'}, @@ -4629,6 +4835,14 @@ static const keyword_t sql_keywords[] = { {"0NOVSO", 'F'}, {"0NOVSU", 'F'}, {"0NOVSV", 'F'}, + {"0NOVT(", 'F'}, + {"0NOVT1", 'F'}, + {"0NOVTE", 'F'}, + {"0NOVTF", 'F'}, + {"0NOVTN", 'F'}, + {"0NOVTS", 'F'}, + {"0NOVTT", 'F'}, + {"0NOVTV", 'F'}, {"0NOVU", 'F'}, {"0NOVU(", 'F'}, {"0NOVU1", 'F'}, @@ -4637,7 +4851,6 @@ static const keyword_t sql_keywords[] = { {"0NOVUE", 'F'}, {"0NOVUF", 'F'}, {"0NOVUK", 'F'}, - {"0NOVUN", 'F'}, {"0NOVUO", 'F'}, {"0NOVUS", 'F'}, {"0NOVUT", 'F'}, @@ -4650,6 +4863,96 @@ static const keyword_t sql_keywords[] = { {"0NSUE;", 'F'}, {"0NSUEC", 'F'}, {"0NSUEK", 'F'}, + {"0NT(1)", 'F'}, + {"0NT(1O", 'F'}, + {"0NT(F(", 'F'}, + {"0NT(N)", 'F'}, + {"0NT(NO", 'F'}, + {"0NT(S)", 'F'}, + {"0NT(SO", 'F'}, + {"0NT(V)", 'F'}, + {"0NT(VO", 'F'}, + {"0NT1(F", 'F'}, + {"0NT1O(", 'F'}, + {"0NT1OF", 'F'}, + {"0NT1OS", 'F'}, + {"0NT1OV", 'F'}, + {"0NTE(1", 'F'}, + {"0NTE(F", 'F'}, + {"0NTE(N", 'F'}, + {"0NTE(S", 'F'}, + {"0NTE(V", 'F'}, + {"0NTE1N", 'F'}, + {"0NTE1O", 'F'}, + {"0NTEF(", 'F'}, + {"0NTEK(", 'F'}, + {"0NTEK1", 'F'}, + {"0NTEKF", 'F'}, + {"0NTEKN", 'F'}, + {"0NTEKS", 'F'}, + {"0NTEKV", 'F'}, + {"0NTENN", 'F'}, + {"0NTENO", 'F'}, + {"0NTESN", 'F'}, + {"0NTESO", 'F'}, + {"0NTEVN", 'F'}, + {"0NTEVO", 'F'}, + {"0NTF()", 'F'}, + {"0NTF(1", 'F'}, + {"0NTF(F", 'F'}, + {"0NTF(N", 'F'}, + {"0NTF(S", 'F'}, + {"0NTF(V", 'F'}, + {"0NTN(1", 'F'}, + {"0NTN(F", 'F'}, + {"0NTN(S", 'F'}, + {"0NTN(V", 'F'}, + {"0NTN1C", 'F'}, + {"0NTN1O", 'F'}, + {"0NTN;E", 'F'}, + {"0NTN;N", 'F'}, + {"0NTN;T", 'F'}, + {"0NTNE(", 'F'}, + {"0NTNE1", 'F'}, + {"0NTNEF", 'F'}, + {"0NTNEN", 'F'}, + {"0NTNES", 'F'}, + {"0NTNEV", 'F'}, + {"0NTNF(", 'F'}, + {"0NTNKN", 'F'}, + {"0NTNN:", 'F'}, + {"0NTNNC", 'F'}, + {"0NTNNO", 'F'}, + {"0NTNO(", 'F'}, + {"0NTNOF", 'F'}, + {"0NTNOS", 'F'}, + {"0NTNOV", 'F'}, + {"0NTNSC", 'F'}, + {"0NTNSO", 'F'}, + {"0NTNT(", 'F'}, + {"0NTNT1", 'F'}, + {"0NTNTF", 'F'}, + {"0NTNTN", 'F'}, + {"0NTNTS", 'F'}, + {"0NTNTV", 'F'}, + {"0NTNVC", 'F'}, + {"0NTNVO", 'F'}, + {"0NTS(F", 'F'}, + {"0NTSO(", 'F'}, + {"0NTSO1", 'F'}, + {"0NTSOF", 'F'}, + {"0NTSON", 'F'}, + {"0NTSOS", 'F'}, + {"0NTSOV", 'F'}, + {"0NTTNE", 'F'}, + {"0NTTNK", 'F'}, + {"0NTTNN", 'F'}, + {"0NTTNT", 'F'}, + {"0NTV(1", 'F'}, + {"0NTV(F", 'F'}, + {"0NTVO(", 'F'}, + {"0NTVOF", 'F'}, + {"0NTVOS", 'F'}, {"0NU(1)", 'F'}, {"0NU(1O", 'F'}, {"0NU(E(", 'F'}, @@ -4733,7 +5036,6 @@ static const keyword_t sql_keywords[] = { {"0NUENU", 'F'}, {"0NUEOK", 'F'}, {"0NUEON", 'F'}, - {"0NUEOO", 'F'}, {"0NUES", 'F'}, {"0NUES&", 'F'}, {"0NUES(", 'F'}, @@ -4769,30 +5071,6 @@ static const keyword_t sql_keywords[] = { {"0NUF(S", 'F'}, {"0NUF(V", 'F'}, {"0NUK(E", 'F'}, - {"0NUN(1", 'F'}, - {"0NUN(F", 'F'}, - {"0NUN(S", 'F'}, - {"0NUN(V", 'F'}, - {"0NUN,(", 'F'}, - {"0NUN,F", 'F'}, - {"0NUN1(", 'F'}, - {"0NUN1,", 'F'}, - {"0NUN1O", 'F'}, - {"0NUNC", 'F'}, - {"0NUNE(", 'F'}, - {"0NUNE1", 'F'}, - {"0NUNEF", 'F'}, - {"0NUNEN", 'F'}, - {"0NUNES", 'F'}, - {"0NUNEV", 'F'}, - {"0NUNF(", 'F'}, - {"0NUNO(", 'F'}, - {"0NUNOF", 'F'}, - {"0NUNOS", 'F'}, - {"0NUNOV", 'F'}, - {"0NUNS(", 'F'}, - {"0NUNS,", 'F'}, - {"0NUNSO", 'F'}, {"0NUO(E", 'F'}, {"0NUON(", 'F'}, {"0NUON1", 'F'}, @@ -4810,110 +5088,15 @@ static const keyword_t sql_keywords[] = { {"0NUTN(", 'F'}, {"0NUTN1", 'F'}, {"0NUTNF", 'F'}, + {"0NUTNN", 'F'}, {"0NUTNS", 'F'}, + {"0NUTNV", 'F'}, {"0NUV,(", 'F'}, {"0NUV,F", 'F'}, {"0NUVC", 'F'}, {"0NUVO(", 'F'}, {"0NUVOF", 'F'}, {"0NUVOS", 'F'}, - {"0O(1)O", 'F'}, - {"0O(1)U", 'F'}, - {"0O(1O(", 'F'}, - {"0O(1OF", 'F'}, - {"0O(1OS", 'F'}, - {"0O(1OV", 'F'}, - {"0O(F()", 'F'}, - {"0O(F(1", 'F'}, - {"0O(F(F", 'F'}, - {"0O(F(N", 'F'}, - {"0O(F(S", 'F'}, - {"0O(F(V", 'F'}, - {"0O(N)O", 'F'}, - {"0O(N)U", 'F'}, - {"0O(NO(", 'F'}, - {"0O(NOF", 'F'}, - {"0O(NOS", 'F'}, - {"0O(NOV", 'F'}, - {"0O(S)O", 'F'}, - {"0O(S)U", 'F'}, - {"0O(SO(", 'F'}, - {"0O(SO1", 'F'}, - {"0O(SOF", 'F'}, - {"0O(SON", 'F'}, - {"0O(SOS", 'F'}, - {"0O(SOV", 'F'}, - {"0O(V)O", 'F'}, - {"0O(V)U", 'F'}, - {"0O(VO(", 'F'}, - {"0O(VOF", 'F'}, - {"0O(VOS", 'F'}, - {"0O1UE(", 'F'}, - {"0O1UE1", 'F'}, - {"0O1UEF", 'F'}, - {"0O1UEK", 'F'}, - {"0O1UEN", 'F'}, - {"0O1UES", 'F'}, - {"0O1UEV", 'F'}, - {"0OF()O", 'F'}, - {"0OF()U", 'F'}, - {"0OF(1)", 'F'}, - {"0OF(1O", 'F'}, - {"0OF(F(", 'F'}, - {"0OF(N)", 'F'}, - {"0OF(NO", 'F'}, - {"0OF(S)", 'F'}, - {"0OF(SO", 'F'}, - {"0OF(V)", 'F'}, - {"0OF(VO", 'F'}, - {"0ONUE(", 'F'}, - {"0ONUE1", 'F'}, - {"0ONUEF", 'F'}, - {"0ONUEK", 'F'}, - {"0ONUEN", 'F'}, - {"0ONUES", 'F'}, - {"0ONUEV", 'F'}, - {"0OSUE(", 'F'}, - {"0OSUE1", 'F'}, - {"0OSUEF", 'F'}, - {"0OSUEK", 'F'}, - {"0OSUEN", 'F'}, - {"0OSUES", 'F'}, - {"0OSUEV", 'F'}, - {"0OUE(1", 'F'}, - {"0OUE(F", 'F'}, - {"0OUE(N", 'F'}, - {"0OUE(S", 'F'}, - {"0OUE(V", 'F'}, - {"0OUE1,", 'F'}, - {"0OUE1O", 'F'}, - {"0OUEF(", 'F'}, - {"0OUEK(", 'F'}, - {"0OUEK1", 'F'}, - {"0OUEKF", 'F'}, - {"0OUEKN", 'F'}, - {"0OUEKS", 'F'}, - {"0OUEKV", 'F'}, - {"0OUEN,", 'F'}, - {"0OUENO", 'F'}, - {"0OUES,", 'F'}, - {"0OUESO", 'F'}, - {"0OUEV,", 'F'}, - {"0OUEVO", 'F'}, - {"0OVO(1", 'F'}, - {"0OVO(F", 'F'}, - {"0OVO(N", 'F'}, - {"0OVO(S", 'F'}, - {"0OVO(V", 'F'}, - {"0OVOF(", 'F'}, - {"0OVOSU", 'F'}, - {"0OVUE(", 'F'}, - {"0OVUE1", 'F'}, - {"0OVUEF", 'F'}, - {"0OVUEK", 'F'}, - {"0OVUEN", 'F'}, - {"0OVUES", 'F'}, - {"0OVUEV", 'F'}, {"0S&(1&", 'F'}, {"0S&(1)", 'F'}, {"0S&(1,", 'F'}, @@ -4972,7 +5155,6 @@ static const keyword_t sql_keywords[] = { {"0S&1KV", 'F'}, {"0S&1O(", 'F'}, {"0S&1OF", 'F'}, - {"0S&1OO", 'F'}, {"0S&1OS", 'F'}, {"0S&1OV", 'F'}, {"0S&1TN", 'F'}, @@ -5098,7 +5280,6 @@ static const keyword_t sql_keywords[] = { {"0S&S1", 'F'}, {"0S&S1;", 'F'}, {"0S&S1C", 'F'}, - {"0S&S1O", 'F'}, {"0S&S;", 'F'}, {"0S&S;C", 'F'}, {"0S&S;E", 'F'}, @@ -5123,7 +5304,6 @@ static const keyword_t sql_keywords[] = { {"0S&SO1", 'F'}, {"0S&SOF", 'F'}, {"0S&SON", 'F'}, - {"0S&SOO", 'F'}, {"0S&SOS", 'F'}, {"0S&SOV", 'F'}, {"0S&STN", 'F'}, @@ -5169,7 +5349,6 @@ static const keyword_t sql_keywords[] = { {"0S&VKV", 'F'}, {"0S&VO(", 'F'}, {"0S&VOF", 'F'}, - {"0S&VOO", 'F'}, {"0S&VOS", 'F'}, {"0S&VS", 'F'}, {"0S&VS;", 'F'}, @@ -5306,6 +5485,7 @@ static const keyword_t sql_keywords[] = { {"0S)ESO", 'F'}, {"0S)EVC", 'F'}, {"0S)EVO", 'F'}, + {"0S)F(F", 'F'}, {"0S)K(1", 'F'}, {"0S)K(F", 'F'}, {"0S)K(N", 'F'}, @@ -5419,22 +5599,6 @@ static const keyword_t sql_keywords[] = { {"0S1F(S", 'F'}, {"0S1F(V", 'F'}, {"0S1NC", 'F'}, - {"0S1O(1", 'F'}, - {"0S1O(F", 'F'}, - {"0S1O(N", 'F'}, - {"0S1O(S", 'F'}, - {"0S1O(V", 'F'}, - {"0S1OF(", 'F'}, - {"0S1OS(", 'F'}, - {"0S1OS1", 'F'}, - {"0S1OSF", 'F'}, - {"0S1OSU", 'F'}, - {"0S1OSV", 'F'}, - {"0S1OV(", 'F'}, - {"0S1OVF", 'F'}, - {"0S1OVO", 'F'}, - {"0S1OVS", 'F'}, - {"0S1OVU", 'F'}, {"0S1S;", 'F'}, {"0S1S;C", 'F'}, {"0S1SC", 'F'}, @@ -5495,6 +5659,7 @@ static const keyword_t sql_keywords[] = { {"0S;T(N", 'F'}, {"0S;T(S", 'F'}, {"0S;T(V", 'F'}, + {"0S;T1(", 'F'}, {"0S;T1,", 'F'}, {"0S;T1;", 'F'}, {"0S;T1C", 'F'}, @@ -5527,6 +5692,7 @@ static const keyword_t sql_keywords[] = { {"0S;TNT", 'F'}, {"0S;TNV", 'F'}, {"0S;TO(", 'F'}, + {"0S;TS(", 'F'}, {"0S;TS,", 'F'}, {"0S;TS;", 'F'}, {"0S;TSC", 'F'}, @@ -5534,12 +5700,8 @@ static const keyword_t sql_keywords[] = { {"0S;TSK", 'F'}, {"0S;TSO", 'F'}, {"0S;TST", 'F'}, - {"0S;TT(", 'F'}, - {"0S;TT1", 'F'}, - {"0S;TTF", 'F'}, {"0S;TTN", 'F'}, - {"0S;TTS", 'F'}, - {"0S;TTV", 'F'}, + {"0S;TV(", 'F'}, {"0S;TV,", 'F'}, {"0S;TV;", 'F'}, {"0S;TVC", 'F'}, @@ -5581,7 +5743,6 @@ static const keyword_t sql_keywords[] = { {"0SB(1)", 'F'}, {"0SB(1O", 'F'}, {"0SB(F(", 'F'}, - {"0SB(N)", 'F'}, {"0SB(NO", 'F'}, {"0SB(S)", 'F'}, {"0SB(SO", 'F'}, @@ -5730,11 +5891,18 @@ static const keyword_t sql_keywords[] = { {"0SE(SO", 'F'}, {"0SE(V)", 'F'}, {"0SE(VO", 'F'}, + {"0SE1;T", 'F'}, {"0SE1C", 'F'}, {"0SE1O(", 'F'}, {"0SE1OF", 'F'}, {"0SE1OS", 'F'}, {"0SE1OV", 'F'}, + {"0SE1T(", 'F'}, + {"0SE1T1", 'F'}, + {"0SE1TF", 'F'}, + {"0SE1TN", 'F'}, + {"0SE1TS", 'F'}, + {"0SE1TV", 'F'}, {"0SE1UE", 'F'}, {"0SEF()", 'F'}, {"0SEF(1", 'F'}, @@ -5748,35 +5916,50 @@ static const keyword_t sql_keywords[] = { {"0SEK(N", 'F'}, {"0SEK(S", 'F'}, {"0SEK(V", 'F'}, + {"0SEK1;", 'F'}, {"0SEK1C", 'F'}, {"0SEK1O", 'F'}, + {"0SEK1T", 'F'}, {"0SEK1U", 'F'}, {"0SEKF(", 'F'}, + {"0SEKN;", 'F'}, {"0SEKNC", 'F'}, {"0SEKNE", 'F'}, + {"0SEKNT", 'F'}, {"0SEKNU", 'F'}, {"0SEKOK", 'F'}, + {"0SEKS;", 'F'}, {"0SEKSC", 'F'}, {"0SEKSO", 'F'}, + {"0SEKST", 'F'}, {"0SEKSU", 'F'}, {"0SEKU(", 'F'}, {"0SEKU1", 'F'}, {"0SEKUE", 'F'}, {"0SEKUF", 'F'}, - {"0SEKUN", 'F'}, {"0SEKUS", 'F'}, {"0SEKUV", 'F'}, + {"0SEKV;", 'F'}, {"0SEKVC", 'F'}, {"0SEKVO", 'F'}, + {"0SEKVT", 'F'}, {"0SEKVU", 'F'}, + {"0SEN;T", 'F'}, {"0SENC", 'F'}, {"0SENEN", 'F'}, {"0SENO(", 'F'}, {"0SENOF", 'F'}, {"0SENOS", 'F'}, {"0SENOV", 'F'}, + {"0SENT(", 'F'}, + {"0SENT1", 'F'}, + {"0SENTF", 'F'}, + {"0SENTN", 'F'}, + {"0SENTS", 'F'}, + {"0SENTV", 'F'}, {"0SENUE", 'F'}, {"0SEOKN", 'F'}, + {"0SES;T", 'F'}, {"0SESC", 'F'}, {"0SESO(", 'F'}, {"0SESO1", 'F'}, @@ -5784,6 +5967,12 @@ static const keyword_t sql_keywords[] = { {"0SESON", 'F'}, {"0SESOS", 'F'}, {"0SESOV", 'F'}, + {"0SEST(", 'F'}, + {"0SEST1", 'F'}, + {"0SESTF", 'F'}, + {"0SESTN", 'F'}, + {"0SESTS", 'F'}, + {"0SESTV", 'F'}, {"0SESUE", 'F'}, {"0SEU(1", 'F'}, {"0SEU(F", 'F'}, @@ -5796,19 +5985,23 @@ static const keyword_t sql_keywords[] = { {"0SEUEF", 'F'}, {"0SEUEK", 'F'}, {"0SEUF(", 'F'}, - {"0SEUN,", 'F'}, - {"0SEUNC", 'F'}, - {"0SEUNO", 'F'}, {"0SEUS,", 'F'}, {"0SEUSC", 'F'}, {"0SEUSO", 'F'}, {"0SEUV,", 'F'}, {"0SEUVC", 'F'}, {"0SEUVO", 'F'}, + {"0SEV;T", 'F'}, {"0SEVC", 'F'}, {"0SEVO(", 'F'}, {"0SEVOF", 'F'}, {"0SEVOS", 'F'}, + {"0SEVT(", 'F'}, + {"0SEVT1", 'F'}, + {"0SEVTF", 'F'}, + {"0SEVTN", 'F'}, + {"0SEVTS", 'F'}, + {"0SEVTV", 'F'}, {"0SEVUE", 'F'}, {"0SF()1", 'F'}, {"0SF()F", 'F'}, @@ -5866,6 +6059,8 @@ static const keyword_t sql_keywords[] = { {"0SK)EN", 'F'}, {"0SK)ES", 'F'}, {"0SK)EV", 'F'}, + {"0SK)F(", 'F'}, + {"0SK)O(", 'F'}, {"0SK)OF", 'F'}, {"0SK)UE", 'F'}, {"0SK1", 'F'}, @@ -6011,6 +6206,7 @@ static const keyword_t sql_keywords[] = { {"0SO(EF", 'F'}, {"0SO(EK", 'F'}, {"0SO(EN", 'F'}, + {"0SO(EO", 'F'}, {"0SO(ES", 'F'}, {"0SO(EV", 'F'}, {"0SO(F(", 'F'}, @@ -6043,6 +6239,7 @@ static const keyword_t sql_keywords[] = { {"0SO1)B", 'F'}, {"0SO1)C", 'F'}, {"0SO1)E", 'F'}, + {"0SO1)F", 'F'}, {"0SO1)K", 'F'}, {"0SO1)O", 'F'}, {"0SO1)U", 'F'}, @@ -6089,12 +6286,17 @@ static const keyword_t sql_keywords[] = { {"0SO1N(", 'F'}, {"0SO1N,", 'F'}, {"0SO1NE", 'F'}, - {"0SO1NF", 'F'}, {"0SO1NU", 'F'}, - {"0SO1S(", 'F'}, - {"0SO1SF", 'F'}, {"0SO1SU", 'F'}, {"0SO1SV", 'F'}, + {"0SO1T(", 'F'}, + {"0SO1T1", 'F'}, + {"0SO1TE", 'F'}, + {"0SO1TF", 'F'}, + {"0SO1TN", 'F'}, + {"0SO1TS", 'F'}, + {"0SO1TT", 'F'}, + {"0SO1TV", 'F'}, {"0SO1U", 'F'}, {"0SO1U(", 'F'}, {"0SO1U1", 'F'}, @@ -6103,7 +6305,6 @@ static const keyword_t sql_keywords[] = { {"0SO1UE", 'F'}, {"0SO1UF", 'F'}, {"0SO1UK", 'F'}, - {"0SO1UN", 'F'}, {"0SO1UO", 'F'}, {"0SO1US", 'F'}, {"0SO1UT", 'F'}, @@ -6166,16 +6367,14 @@ static const keyword_t sql_keywords[] = { {"0SON)B", 'F'}, {"0SON)C", 'F'}, {"0SON)E", 'F'}, + {"0SON)F", 'F'}, {"0SON)K", 'F'}, {"0SON)O", 'F'}, {"0SON)U", 'F'}, {"0SON,(", 'F'}, {"0SON,F", 'F'}, {"0SON1(", 'F'}, - {"0SON1F", 'F'}, - {"0SON1N", 'F'}, {"0SON1O", 'F'}, - {"0SON1S", 'F'}, {"0SON1U", 'F'}, {"0SON1V", 'F'}, {"0SON;", 'F'}, @@ -6213,6 +6412,14 @@ static const keyword_t sql_keywords[] = { {"0SONKU", 'F'}, {"0SONKV", 'F'}, {"0SONSU", 'F'}, + {"0SONT(", 'F'}, + {"0SONT1", 'F'}, + {"0SONTE", 'F'}, + {"0SONTF", 'F'}, + {"0SONTN", 'F'}, + {"0SONTS", 'F'}, + {"0SONTT", 'F'}, + {"0SONTV", 'F'}, {"0SONU", 'F'}, {"0SONU(", 'F'}, {"0SONU1", 'F'}, @@ -6221,7 +6428,6 @@ static const keyword_t sql_keywords[] = { {"0SONUE", 'F'}, {"0SONUF", 'F'}, {"0SONUK", 'F'}, - {"0SONUN", 'F'}, {"0SONUO", 'F'}, {"0SONUS", 'F'}, {"0SONUT", 'F'}, @@ -6244,6 +6450,7 @@ static const keyword_t sql_keywords[] = { {"0SOS)B", 'F'}, {"0SOS)C", 'F'}, {"0SOS)E", 'F'}, + {"0SOS)F", 'F'}, {"0SOS)K", 'F'}, {"0SOS)O", 'F'}, {"0SOS)U", 'F'}, @@ -6252,7 +6459,6 @@ static const keyword_t sql_keywords[] = { {"0SOS1(", 'F'}, {"0SOS1F", 'F'}, {"0SOS1N", 'F'}, - {"0SOS1O", 'F'}, {"0SOS1S", 'F'}, {"0SOS1U", 'F'}, {"0SOS1V", 'F'}, @@ -6293,6 +6499,14 @@ static const keyword_t sql_keywords[] = { {"0SOSKS", 'F'}, {"0SOSKU", 'F'}, {"0SOSKV", 'F'}, + {"0SOST(", 'F'}, + {"0SOST1", 'F'}, + {"0SOSTE", 'F'}, + {"0SOSTF", 'F'}, + {"0SOSTN", 'F'}, + {"0SOSTS", 'F'}, + {"0SOSTT", 'F'}, + {"0SOSTV", 'F'}, {"0SOSU", 'F'}, {"0SOSU(", 'F'}, {"0SOSU1", 'F'}, @@ -6301,7 +6515,6 @@ static const keyword_t sql_keywords[] = { {"0SOSUE", 'F'}, {"0SOSUF", 'F'}, {"0SOSUK", 'F'}, - {"0SOSUN", 'F'}, {"0SOSUO", 'F'}, {"0SOSUS", 'F'}, {"0SOSUT", 'F'}, @@ -6332,6 +6545,7 @@ static const keyword_t sql_keywords[] = { {"0SOV)B", 'F'}, {"0SOV)C", 'F'}, {"0SOV)E", 'F'}, + {"0SOV)F", 'F'}, {"0SOV)K", 'F'}, {"0SOV)O", 'F'}, {"0SOV)U", 'F'}, @@ -6385,6 +6599,14 @@ static const keyword_t sql_keywords[] = { {"0SOVSO", 'F'}, {"0SOVSU", 'F'}, {"0SOVSV", 'F'}, + {"0SOVT(", 'F'}, + {"0SOVT1", 'F'}, + {"0SOVTE", 'F'}, + {"0SOVTF", 'F'}, + {"0SOVTN", 'F'}, + {"0SOVTS", 'F'}, + {"0SOVTT", 'F'}, + {"0SOVTV", 'F'}, {"0SOVU", 'F'}, {"0SOVU(", 'F'}, {"0SOVU1", 'F'}, @@ -6393,11 +6615,100 @@ static const keyword_t sql_keywords[] = { {"0SOVUE", 'F'}, {"0SOVUF", 'F'}, {"0SOVUK", 'F'}, - {"0SOVUN", 'F'}, {"0SOVUO", 'F'}, {"0SOVUS", 'F'}, {"0SOVUT", 'F'}, {"0SOVUV", 'F'}, + {"0ST(1)", 'F'}, + {"0ST(1O", 'F'}, + {"0ST(F(", 'F'}, + {"0ST(N)", 'F'}, + {"0ST(NO", 'F'}, + {"0ST(S)", 'F'}, + {"0ST(SO", 'F'}, + {"0ST(V)", 'F'}, + {"0ST(VO", 'F'}, + {"0ST1(F", 'F'}, + {"0ST1O(", 'F'}, + {"0ST1OF", 'F'}, + {"0ST1OS", 'F'}, + {"0ST1OV", 'F'}, + {"0STE(1", 'F'}, + {"0STE(F", 'F'}, + {"0STE(N", 'F'}, + {"0STE(S", 'F'}, + {"0STE(V", 'F'}, + {"0STE1N", 'F'}, + {"0STE1O", 'F'}, + {"0STEF(", 'F'}, + {"0STEK(", 'F'}, + {"0STEK1", 'F'}, + {"0STEKF", 'F'}, + {"0STEKN", 'F'}, + {"0STEKS", 'F'}, + {"0STEKV", 'F'}, + {"0STENN", 'F'}, + {"0STENO", 'F'}, + {"0STESN", 'F'}, + {"0STESO", 'F'}, + {"0STEVN", 'F'}, + {"0STEVO", 'F'}, + {"0STF()", 'F'}, + {"0STF(1", 'F'}, + {"0STF(F", 'F'}, + {"0STF(N", 'F'}, + {"0STF(S", 'F'}, + {"0STF(V", 'F'}, + {"0STN(1", 'F'}, + {"0STN(F", 'F'}, + {"0STN(S", 'F'}, + {"0STN(V", 'F'}, + {"0STN1C", 'F'}, + {"0STN1O", 'F'}, + {"0STN;E", 'F'}, + {"0STN;N", 'F'}, + {"0STN;T", 'F'}, + {"0STNE(", 'F'}, + {"0STNE1", 'F'}, + {"0STNEF", 'F'}, + {"0STNEN", 'F'}, + {"0STNES", 'F'}, + {"0STNEV", 'F'}, + {"0STNF(", 'F'}, + {"0STNKN", 'F'}, + {"0STNN:", 'F'}, + {"0STNNC", 'F'}, + {"0STNNO", 'F'}, + {"0STNO(", 'F'}, + {"0STNOF", 'F'}, + {"0STNOS", 'F'}, + {"0STNOV", 'F'}, + {"0STNSC", 'F'}, + {"0STNSO", 'F'}, + {"0STNT(", 'F'}, + {"0STNT1", 'F'}, + {"0STNTF", 'F'}, + {"0STNTN", 'F'}, + {"0STNTS", 'F'}, + {"0STNTV", 'F'}, + {"0STNVC", 'F'}, + {"0STNVO", 'F'}, + {"0STS(F", 'F'}, + {"0STSO(", 'F'}, + {"0STSO1", 'F'}, + {"0STSOF", 'F'}, + {"0STSON", 'F'}, + {"0STSOS", 'F'}, + {"0STSOV", 'F'}, + {"0STTNE", 'F'}, + {"0STTNK", 'F'}, + {"0STTNN", 'F'}, + {"0STTNT", 'F'}, + {"0STV(1", 'F'}, + {"0STV(F", 'F'}, + {"0STVO(", 'F'}, + {"0STVOF", 'F'}, + {"0STVOS", 'F'}, {"0SU(1)", 'F'}, {"0SU(1O", 'F'}, {"0SU(E(", 'F'}, @@ -6481,7 +6792,6 @@ static const keyword_t sql_keywords[] = { {"0SUENU", 'F'}, {"0SUEOK", 'F'}, {"0SUEON", 'F'}, - {"0SUEOO", 'F'}, {"0SUES", 'F'}, {"0SUES&", 'F'}, {"0SUES(", 'F'}, @@ -6517,30 +6827,6 @@ static const keyword_t sql_keywords[] = { {"0SUF(S", 'F'}, {"0SUF(V", 'F'}, {"0SUK(E", 'F'}, - {"0SUN(1", 'F'}, - {"0SUN(F", 'F'}, - {"0SUN(S", 'F'}, - {"0SUN(V", 'F'}, - {"0SUN,(", 'F'}, - {"0SUN,F", 'F'}, - {"0SUN1(", 'F'}, - {"0SUN1,", 'F'}, - {"0SUN1O", 'F'}, - {"0SUNC", 'F'}, - {"0SUNE(", 'F'}, - {"0SUNE1", 'F'}, - {"0SUNEF", 'F'}, - {"0SUNEN", 'F'}, - {"0SUNES", 'F'}, - {"0SUNEV", 'F'}, - {"0SUNF(", 'F'}, - {"0SUNO(", 'F'}, - {"0SUNOF", 'F'}, - {"0SUNOS", 'F'}, - {"0SUNOV", 'F'}, - {"0SUNS(", 'F'}, - {"0SUNS,", 'F'}, - {"0SUNSO", 'F'}, {"0SUO(E", 'F'}, {"0SUON(", 'F'}, {"0SUON1", 'F'}, @@ -6558,7 +6844,9 @@ static const keyword_t sql_keywords[] = { {"0SUTN(", 'F'}, {"0SUTN1", 'F'}, {"0SUTNF", 'F'}, + {"0SUTNN", 'F'}, {"0SUTNS", 'F'}, + {"0SUTNV", 'F'}, {"0SUV,(", 'F'}, {"0SUV,F", 'F'}, {"0SUVC", 'F'}, @@ -6582,7 +6870,6 @@ static const keyword_t sql_keywords[] = { {"0SVOSF", 'F'}, {"0SVOSU", 'F'}, {"0SVOSV", 'F'}, - {"0SVS", 'F'}, {"0SVS;", 'F'}, {"0SVS;C", 'F'}, {"0SVSC", 'F'}, @@ -6622,16 +6909,19 @@ static const keyword_t sql_keywords[] = { {"0T(N1)", 'F'}, {"0T(N1O", 'F'}, {"0T(NF(", 'F'}, + {"0T(NN)", 'F'}, + {"0T(NNO", 'F'}, {"0T(NO(", 'F'}, {"0T(NOF", 'F'}, {"0T(NOS", 'F'}, {"0T(NOV", 'F'}, {"0T(NS)", 'F'}, {"0T(NSO", 'F'}, + {"0T(NV)", 'F'}, + {"0T(NVO", 'F'}, {"0T(S)F", 'F'}, {"0T(S)O", 'F'}, {"0T(S1)", 'F'}, - {"0T(S1O", 'F'}, {"0T(SF(", 'F'}, {"0T(SN)", 'F'}, {"0T(SNO", 'F'}, @@ -6696,6 +6986,12 @@ static const keyword_t sql_keywords[] = { {"0TNF(N", 'F'}, {"0TNF(S", 'F'}, {"0TNF(V", 'F'}, + {"0TNN;", 'F'}, + {"0TNN;C", 'F'}, + {"0TNNO(", 'F'}, + {"0TNNOF", 'F'}, + {"0TNNOS", 'F'}, + {"0TNNOV", 'F'}, {"0TNO(1", 'F'}, {"0TNO(F", 'F'}, {"0TNO(N", 'F'}, @@ -6714,6 +7010,9 @@ static const keyword_t sql_keywords[] = { {"0TNSOS", 'F'}, {"0TNSOV", 'F'}, {"0TNV;", 'F'}, + {"0TNV;C", 'F'}, + {"0TNVO(", 'F'}, + {"0TNVOF", 'F'}, {"0TNVOS", 'F'}, {"0TSF(1", 'F'}, {"0TSF(F", 'F'}, @@ -6944,7 +7243,6 @@ static const keyword_t sql_keywords[] = { {"0V&1KV", 'F'}, {"0V&1O(", 'F'}, {"0V&1OF", 'F'}, - {"0V&1OO", 'F'}, {"0V&1OS", 'F'}, {"0V&1OV", 'F'}, {"0V&1TN", 'F'}, @@ -7070,7 +7368,6 @@ static const keyword_t sql_keywords[] = { {"0V&S1", 'F'}, {"0V&S1;", 'F'}, {"0V&S1C", 'F'}, - {"0V&S1O", 'F'}, {"0V&S;", 'F'}, {"0V&S;C", 'F'}, {"0V&S;E", 'F'}, @@ -7095,7 +7392,6 @@ static const keyword_t sql_keywords[] = { {"0V&SO1", 'F'}, {"0V&SOF", 'F'}, {"0V&SON", 'F'}, - {"0V&SOO", 'F'}, {"0V&SOS", 'F'}, {"0V&SOV", 'F'}, {"0V&STN", 'F'}, @@ -7141,7 +7437,6 @@ static const keyword_t sql_keywords[] = { {"0V&VKV", 'F'}, {"0V&VO(", 'F'}, {"0V&VOF", 'F'}, - {"0V&VOO", 'F'}, {"0V&VOS", 'F'}, {"0V&VS", 'F'}, {"0V&VS;", 'F'}, @@ -7278,6 +7573,7 @@ static const keyword_t sql_keywords[] = { {"0V)ESO", 'F'}, {"0V)EVC", 'F'}, {"0V)EVO", 'F'}, + {"0V)F(F", 'F'}, {"0V)K(1", 'F'}, {"0V)K(F", 'F'}, {"0V)K(N", 'F'}, @@ -7431,6 +7727,7 @@ static const keyword_t sql_keywords[] = { {"0V;T(N", 'F'}, {"0V;T(S", 'F'}, {"0V;T(V", 'F'}, + {"0V;T1(", 'F'}, {"0V;T1,", 'F'}, {"0V;T1;", 'F'}, {"0V;T1C", 'F'}, @@ -7463,6 +7760,7 @@ static const keyword_t sql_keywords[] = { {"0V;TNT", 'F'}, {"0V;TNV", 'F'}, {"0V;TO(", 'F'}, + {"0V;TS(", 'F'}, {"0V;TS,", 'F'}, {"0V;TS;", 'F'}, {"0V;TSC", 'F'}, @@ -7470,12 +7768,8 @@ static const keyword_t sql_keywords[] = { {"0V;TSK", 'F'}, {"0V;TSO", 'F'}, {"0V;TST", 'F'}, - {"0V;TT(", 'F'}, - {"0V;TT1", 'F'}, - {"0V;TTF", 'F'}, {"0V;TTN", 'F'}, - {"0V;TTS", 'F'}, - {"0V;TTV", 'F'}, + {"0V;TV(", 'F'}, {"0V;TV,", 'F'}, {"0V;TV;", 'F'}, {"0V;TVC", 'F'}, @@ -7517,7 +7811,6 @@ static const keyword_t sql_keywords[] = { {"0VB(1)", 'F'}, {"0VB(1O", 'F'}, {"0VB(F(", 'F'}, - {"0VB(N)", 'F'}, {"0VB(NO", 'F'}, {"0VB(S)", 'F'}, {"0VB(SO", 'F'}, @@ -7666,11 +7959,18 @@ static const keyword_t sql_keywords[] = { {"0VE(SO", 'F'}, {"0VE(V)", 'F'}, {"0VE(VO", 'F'}, + {"0VE1;T", 'F'}, {"0VE1C", 'F'}, {"0VE1O(", 'F'}, {"0VE1OF", 'F'}, {"0VE1OS", 'F'}, {"0VE1OV", 'F'}, + {"0VE1T(", 'F'}, + {"0VE1T1", 'F'}, + {"0VE1TF", 'F'}, + {"0VE1TN", 'F'}, + {"0VE1TS", 'F'}, + {"0VE1TV", 'F'}, {"0VE1UE", 'F'}, {"0VEF()", 'F'}, {"0VEF(1", 'F'}, @@ -7684,35 +7984,50 @@ static const keyword_t sql_keywords[] = { {"0VEK(N", 'F'}, {"0VEK(S", 'F'}, {"0VEK(V", 'F'}, + {"0VEK1;", 'F'}, {"0VEK1C", 'F'}, {"0VEK1O", 'F'}, + {"0VEK1T", 'F'}, {"0VEK1U", 'F'}, {"0VEKF(", 'F'}, + {"0VEKN;", 'F'}, {"0VEKNC", 'F'}, {"0VEKNE", 'F'}, + {"0VEKNT", 'F'}, {"0VEKNU", 'F'}, {"0VEKOK", 'F'}, + {"0VEKS;", 'F'}, {"0VEKSC", 'F'}, {"0VEKSO", 'F'}, + {"0VEKST", 'F'}, {"0VEKSU", 'F'}, {"0VEKU(", 'F'}, {"0VEKU1", 'F'}, {"0VEKUE", 'F'}, {"0VEKUF", 'F'}, - {"0VEKUN", 'F'}, {"0VEKUS", 'F'}, {"0VEKUV", 'F'}, + {"0VEKV;", 'F'}, {"0VEKVC", 'F'}, {"0VEKVO", 'F'}, + {"0VEKVT", 'F'}, {"0VEKVU", 'F'}, + {"0VEN;T", 'F'}, {"0VENC", 'F'}, {"0VENEN", 'F'}, {"0VENO(", 'F'}, {"0VENOF", 'F'}, {"0VENOS", 'F'}, {"0VENOV", 'F'}, + {"0VENT(", 'F'}, + {"0VENT1", 'F'}, + {"0VENTF", 'F'}, + {"0VENTN", 'F'}, + {"0VENTS", 'F'}, + {"0VENTV", 'F'}, {"0VENUE", 'F'}, {"0VEOKN", 'F'}, + {"0VES;T", 'F'}, {"0VESC", 'F'}, {"0VESO(", 'F'}, {"0VESO1", 'F'}, @@ -7720,6 +8035,12 @@ static const keyword_t sql_keywords[] = { {"0VESON", 'F'}, {"0VESOS", 'F'}, {"0VESOV", 'F'}, + {"0VEST(", 'F'}, + {"0VEST1", 'F'}, + {"0VESTF", 'F'}, + {"0VESTN", 'F'}, + {"0VESTS", 'F'}, + {"0VESTV", 'F'}, {"0VESUE", 'F'}, {"0VEU(1", 'F'}, {"0VEU(F", 'F'}, @@ -7732,19 +8053,23 @@ static const keyword_t sql_keywords[] = { {"0VEUEF", 'F'}, {"0VEUEK", 'F'}, {"0VEUF(", 'F'}, - {"0VEUN,", 'F'}, - {"0VEUNC", 'F'}, - {"0VEUNO", 'F'}, {"0VEUS,", 'F'}, {"0VEUSC", 'F'}, {"0VEUSO", 'F'}, {"0VEUV,", 'F'}, {"0VEUVC", 'F'}, {"0VEUVO", 'F'}, + {"0VEV;T", 'F'}, {"0VEVC", 'F'}, {"0VEVO(", 'F'}, {"0VEVOF", 'F'}, {"0VEVOS", 'F'}, + {"0VEVT(", 'F'}, + {"0VEVT1", 'F'}, + {"0VEVTF", 'F'}, + {"0VEVTN", 'F'}, + {"0VEVTS", 'F'}, + {"0VEVTV", 'F'}, {"0VEVUE", 'F'}, {"0VF()1", 'F'}, {"0VF()F", 'F'}, @@ -7802,6 +8127,8 @@ static const keyword_t sql_keywords[] = { {"0VK)EN", 'F'}, {"0VK)ES", 'F'}, {"0VK)EV", 'F'}, + {"0VK)F(", 'F'}, + {"0VK)O(", 'F'}, {"0VK)OF", 'F'}, {"0VK)UE", 'F'}, {"0VK1", 'F'}, @@ -7947,6 +8274,7 @@ static const keyword_t sql_keywords[] = { {"0VO(EF", 'F'}, {"0VO(EK", 'F'}, {"0VO(EN", 'F'}, + {"0VO(EO", 'F'}, {"0VO(ES", 'F'}, {"0VO(EV", 'F'}, {"0VO(F(", 'F'}, @@ -8012,6 +8340,7 @@ static const keyword_t sql_keywords[] = { {"0VOS)B", 'F'}, {"0VOS)C", 'F'}, {"0VOS)E", 'F'}, + {"0VOS)F", 'F'}, {"0VOS)K", 'F'}, {"0VOS)O", 'F'}, {"0VOS)U", 'F'}, @@ -8020,7 +8349,6 @@ static const keyword_t sql_keywords[] = { {"0VOS1(", 'F'}, {"0VOS1F", 'F'}, {"0VOS1N", 'F'}, - {"0VOS1O", 'F'}, {"0VOS1S", 'F'}, {"0VOS1U", 'F'}, {"0VOS1V", 'F'}, @@ -8061,6 +8389,14 @@ static const keyword_t sql_keywords[] = { {"0VOSKS", 'F'}, {"0VOSKU", 'F'}, {"0VOSKV", 'F'}, + {"0VOST(", 'F'}, + {"0VOST1", 'F'}, + {"0VOSTE", 'F'}, + {"0VOSTF", 'F'}, + {"0VOSTN", 'F'}, + {"0VOSTS", 'F'}, + {"0VOSTT", 'F'}, + {"0VOSTV", 'F'}, {"0VOSU", 'F'}, {"0VOSU(", 'F'}, {"0VOSU1", 'F'}, @@ -8069,7 +8405,6 @@ static const keyword_t sql_keywords[] = { {"0VOSUE", 'F'}, {"0VOSUF", 'F'}, {"0VOSUK", 'F'}, - {"0VOSUN", 'F'}, {"0VOSUO", 'F'}, {"0VOSUS", 'F'}, {"0VOSUT", 'F'}, @@ -8082,6 +8417,96 @@ static const keyword_t sql_keywords[] = { {"0VOU(E", 'F'}, {"0VOUEK", 'F'}, {"0VOUEN", 'F'}, + {"0VT(1)", 'F'}, + {"0VT(1O", 'F'}, + {"0VT(F(", 'F'}, + {"0VT(N)", 'F'}, + {"0VT(NO", 'F'}, + {"0VT(S)", 'F'}, + {"0VT(SO", 'F'}, + {"0VT(V)", 'F'}, + {"0VT(VO", 'F'}, + {"0VT1(F", 'F'}, + {"0VT1O(", 'F'}, + {"0VT1OF", 'F'}, + {"0VT1OS", 'F'}, + {"0VT1OV", 'F'}, + {"0VTE(1", 'F'}, + {"0VTE(F", 'F'}, + {"0VTE(N", 'F'}, + {"0VTE(S", 'F'}, + {"0VTE(V", 'F'}, + {"0VTE1N", 'F'}, + {"0VTE1O", 'F'}, + {"0VTEF(", 'F'}, + {"0VTEK(", 'F'}, + {"0VTEK1", 'F'}, + {"0VTEKF", 'F'}, + {"0VTEKN", 'F'}, + {"0VTEKS", 'F'}, + {"0VTEKV", 'F'}, + {"0VTENN", 'F'}, + {"0VTENO", 'F'}, + {"0VTESN", 'F'}, + {"0VTESO", 'F'}, + {"0VTEVN", 'F'}, + {"0VTEVO", 'F'}, + {"0VTF()", 'F'}, + {"0VTF(1", 'F'}, + {"0VTF(F", 'F'}, + {"0VTF(N", 'F'}, + {"0VTF(S", 'F'}, + {"0VTF(V", 'F'}, + {"0VTN(1", 'F'}, + {"0VTN(F", 'F'}, + {"0VTN(S", 'F'}, + {"0VTN(V", 'F'}, + {"0VTN1C", 'F'}, + {"0VTN1O", 'F'}, + {"0VTN;E", 'F'}, + {"0VTN;N", 'F'}, + {"0VTN;T", 'F'}, + {"0VTNE(", 'F'}, + {"0VTNE1", 'F'}, + {"0VTNEF", 'F'}, + {"0VTNEN", 'F'}, + {"0VTNES", 'F'}, + {"0VTNEV", 'F'}, + {"0VTNF(", 'F'}, + {"0VTNKN", 'F'}, + {"0VTNN:", 'F'}, + {"0VTNNC", 'F'}, + {"0VTNNO", 'F'}, + {"0VTNO(", 'F'}, + {"0VTNOF", 'F'}, + {"0VTNOS", 'F'}, + {"0VTNOV", 'F'}, + {"0VTNSC", 'F'}, + {"0VTNSO", 'F'}, + {"0VTNT(", 'F'}, + {"0VTNT1", 'F'}, + {"0VTNTF", 'F'}, + {"0VTNTN", 'F'}, + {"0VTNTS", 'F'}, + {"0VTNTV", 'F'}, + {"0VTNVC", 'F'}, + {"0VTNVO", 'F'}, + {"0VTS(F", 'F'}, + {"0VTSO(", 'F'}, + {"0VTSO1", 'F'}, + {"0VTSOF", 'F'}, + {"0VTSON", 'F'}, + {"0VTSOS", 'F'}, + {"0VTSOV", 'F'}, + {"0VTTNE", 'F'}, + {"0VTTNK", 'F'}, + {"0VTTNN", 'F'}, + {"0VTTNT", 'F'}, + {"0VTV(1", 'F'}, + {"0VTV(F", 'F'}, + {"0VTVO(", 'F'}, + {"0VTVOF", 'F'}, + {"0VTVOS", 'F'}, {"0VU", 'F'}, {"0VU(1)", 'F'}, {"0VU(1O", 'F'}, @@ -8166,7 +8591,6 @@ static const keyword_t sql_keywords[] = { {"0VUENU", 'F'}, {"0VUEOK", 'F'}, {"0VUEON", 'F'}, - {"0VUEOO", 'F'}, {"0VUES", 'F'}, {"0VUES&", 'F'}, {"0VUES(", 'F'}, @@ -8202,30 +8626,6 @@ static const keyword_t sql_keywords[] = { {"0VUF(S", 'F'}, {"0VUF(V", 'F'}, {"0VUK(E", 'F'}, - {"0VUN(1", 'F'}, - {"0VUN(F", 'F'}, - {"0VUN(S", 'F'}, - {"0VUN(V", 'F'}, - {"0VUN,(", 'F'}, - {"0VUN,F", 'F'}, - {"0VUN1(", 'F'}, - {"0VUN1,", 'F'}, - {"0VUN1O", 'F'}, - {"0VUNC", 'F'}, - {"0VUNE(", 'F'}, - {"0VUNE1", 'F'}, - {"0VUNEF", 'F'}, - {"0VUNEN", 'F'}, - {"0VUNES", 'F'}, - {"0VUNEV", 'F'}, - {"0VUNF(", 'F'}, - {"0VUNO(", 'F'}, - {"0VUNOF", 'F'}, - {"0VUNOS", 'F'}, - {"0VUNOV", 'F'}, - {"0VUNS(", 'F'}, - {"0VUNS,", 'F'}, - {"0VUNSO", 'F'}, {"0VUO(E", 'F'}, {"0VUON(", 'F'}, {"0VUON1", 'F'}, @@ -8243,7 +8643,9 @@ static const keyword_t sql_keywords[] = { {"0VUTN(", 'F'}, {"0VUTN1", 'F'}, {"0VUTNF", 'F'}, + {"0VUTNN", 'F'}, {"0VUTNS", 'F'}, + {"0VUTNV", 'F'}, {"0VUV,(", 'F'}, {"0VUV,F", 'F'}, {"0VUVC", 'F'}, @@ -8264,7 +8666,6 @@ static const keyword_t sql_keywords[] = { {"ABS", 'f'}, {"ACCESSIBLE", 'k'}, {"ACOS", 'f'}, - {"ADD", 'k'}, {"ADDDATE", 'f'}, {"ADDTIME", 'f'}, {"AES_DECRYPT", 'f'}, @@ -8310,6 +8711,10 @@ static const keyword_t sql_keywords[] = { {"AVG", 'f'}, {"BEFORE", 'k'}, {"BEGIN", 'T'}, + {"BEGIN DECLARE", 'T'}, + {"BEGIN GOTO", 'T'}, + {"BEGIN TRY", 'T'}, + {"BEGIN TRY DECLARE", 'T'}, {"BENCHMARK", 'f'}, {"BETWEEN", 'o'}, {"BIGINT", 't'}, @@ -8361,7 +8766,7 @@ static const keyword_t sql_keywords[] = { {"CHAR_LENGTH", 'f'}, {"CHDIR", 'f'}, {"CHDRIVE", 'f'}, - {"CHECK", 'k'}, + {"CHECK", 'n'}, {"CHECKSUM_AGG", 'f'}, {"CHOOSE", 'f'}, {"CHR", 'f'}, @@ -8544,6 +8949,8 @@ static const keyword_t sql_keywords[] = { {"FILEGROUP_NAME", 'f'}, {"FILELEN", 'f'}, {"FILEPROPERTY", 'f'}, + {"FILETOBLOB", 'f'}, + {"FILETOCLOB", 'f'}, {"FILE_ID", 'f'}, {"FILE_IDEX", 'f'}, {"FILE_NAME", 'f'}, @@ -8676,6 +9083,7 @@ static const keyword_t sql_keywords[] = { {"IS_USED_LOCK", 'f'}, {"ITERATE", 'k'}, {"JOIN", 'k'}, + {"JSON_KEYS", 'f'}, {"JULIANDAY", 'f'}, {"JUSTIFY_DAYS", 'f'}, {"JUSTIFY_HOURS", 'f'}, @@ -8694,7 +9102,7 @@ static const keyword_t sql_keywords[] = { {"LEADING", 'k'}, {"LEAST", 'f'}, {"LEAVE", 'k'}, - {"LEFT", 'n'}, + {"LEFT", 'f'}, {"LEFT JOIN", 'k'}, {"LEFT OUTER", 'k'}, {"LEFT OUTER JOIN", 'k'}, @@ -8818,7 +9226,7 @@ static const keyword_t sql_keywords[] = { {"ORDER BY", 'B'}, {"ORIGINAL_DB_NAME", 'f'}, {"ORIGINAL_LOGIN", 'f'}, - {"OUT", 'k'}, + {"OUT", 'n'}, {"OUTER", 'n'}, {"OUTFILE", 'k'}, {"OVERLAPS", 'f'}, @@ -9037,7 +9445,7 @@ static const keyword_t sql_keywords[] = { {"SYSTEM_USER", 'f'}, {"SYSUSERS", 'k'}, {"SYSUTCDATETME", 'f'}, - {"TABLE", 'k'}, + {"TABLE", 'n'}, {"TAN", 'f'}, {"TERMINATED", 'k'}, {"TERTIARY_WEIGHTS", 'f'}, @@ -9081,6 +9489,7 @@ static const keyword_t sql_keywords[] = { {"TRUE", '1'}, {"TRUNC", 'f'}, {"TRUNCATE", 'f'}, + {"TRY", 'T'}, {"TRY_CAST", 'f'}, {"TRY_CONVERT", 'f'}, {"TRY_PARSE", 'f'}, @@ -9217,5 +9626,5 @@ static const keyword_t sql_keywords[] = { {"||", '&'}, {"~*", 'o'}, }; -static const size_t sql_keywords_sz = 9049; +static const size_t sql_keywords_sz = 9330; #endif diff --git a/apache2/libinjection/libinjection_xss.c b/apache2/libinjection/libinjection_xss.c index 2807c22f08..e89b336ed9 100644 --- a/apache2/libinjection/libinjection_xss.c +++ b/apache2/libinjection/libinjection_xss.c @@ -6,13 +6,6 @@ #include #include -#ifndef DEBUG -#include -#define TRACE() printf("%s:%d\n", __FUNCTION__, __LINE__) -#else -#define TRACE() -#endif - typedef enum attribute { TYPE_NONE , TYPE_BLACK /* ban always */ @@ -37,109 +30,109 @@ typedef struct stringtype { static const int gsHexDecodeMap[256] = { - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 256, 256, - 256, 256, 256, 256, 256, 10, 11, 12, 13, 14, 15, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 10, 11, 12, 13, 14, 15, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, - 256, 256, 256, 256 + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 256, 256, + 256, 256, 256, 256, 256, 10, 11, 12, 13, 14, 15, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 10, 11, 12, 13, 14, 15, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, + 256, 256, 256, 256 }; static int html_decode_char_at(const char* src, size_t len, size_t* consumed) { - int val = 0; - size_t i; - int ch; - - if (len == 0 || src == NULL) { - *consumed = 0; - return -1; - } - - *consumed = 1; - if (*src != '&' || len < 2) { - return (unsigned char)(*src); - } - - - if (*(src+1) != '#') { - /* normally this would be for named entities - * but for this case we don't actually care - */ - return '&'; - } - - if (*(src+2) == 'x' || *(src+2) == 'X') { - ch = (unsigned char) (*(src+3)); - ch = gsHexDecodeMap[ch]; - if (ch == 256) { - /* degenerate case '&#[?]' */ - return '&'; + int val = 0; + size_t i; + int ch; + + if (len == 0 || src == NULL) { + *consumed = 0; + return -1; } - val = ch; - i = 4; - while (i < len) { - ch = (unsigned char) src[i]; - if (ch == ';') { - *consumed = i + 1; - return val; - } - ch = gsHexDecodeMap[ch]; - if (ch == 256) { - *consumed = i; - return val; - } - val = (val * 16) + ch; - if (val > 0x1000FF) { - return '&'; - } - ++i; + + *consumed = 1; + if (*src != '&' || len < 2) { + return (unsigned char)(*src); } - *consumed = i; - return val; - } else { - i = 2; - ch = (unsigned char) src[i]; - if (ch < '0' || ch > '9') { - return '&'; + + + if (*(src+1) != '#') { + /* normally this would be for named entities + * but for this case we don't actually care + */ + return '&'; } - val = ch - '0'; - i += 1; - while (i < len) { - ch = (unsigned char) src[i]; - if (ch == ';') { - *consumed = i + 1; - return val; - } - if (ch < '0' || ch > '9') { - *consumed = i; - return val; - } - val = (val * 10) + (ch - '0'); - if (val > 0x1000FF) { - return '&'; - } - ++i; + + if (*(src+2) == 'x' || *(src+2) == 'X') { + ch = (unsigned char) (*(src+3)); + ch = gsHexDecodeMap[ch]; + if (ch == 256) { + /* degenerate case '&#[?]' */ + return '&'; + } + val = ch; + i = 4; + while (i < len) { + ch = (unsigned char) src[i]; + if (ch == ';') { + *consumed = i + 1; + return val; + } + ch = gsHexDecodeMap[ch]; + if (ch == 256) { + *consumed = i; + return val; + } + val = (val * 16) + ch; + if (val > 0x1000FF) { + return '&'; + } + ++i; + } + *consumed = i; + return val; + } else { + i = 2; + ch = (unsigned char) src[i]; + if (ch < '0' || ch > '9') { + return '&'; + } + val = ch - '0'; + i += 1; + while (i < len) { + ch = (unsigned char) src[i]; + if (ch == ';') { + *consumed = i + 1; + return val; + } + if (ch < '0' || ch > '9') { + *consumed = i; + return val; + } + val = (val * 10) + (ch - '0'); + if (val > 0x1000FF) { + return '&'; + } + ++i; + } + *consumed = i; + return val; } - *consumed = i; - return val; - } } @@ -157,7 +150,7 @@ static stringtype_t BLACKATTR[] = { , { "DATASRC", TYPE_BLACK } /* IE */ , { "DYNSRC", TYPE_ATTR_URL } /* Obsolete img attribute */ , { "FILTER", TYPE_STYLE } /* Opera, SVG inline style */ - , { "FORMACTION", TYPE_ATTR_URL } /* HTML5 */ + , { "FORMACTION", TYPE_ATTR_URL } /* HTML 5 */ , { "FOLDER", TYPE_ATTR_URL } /* Only on A tags, IE-only */ , { "FROM", TYPE_ATTR_URL } /* SVG */ , { "HANDLER", TYPE_ATTR_URL } /* SVG Tiny, Opera */ @@ -173,20 +166,20 @@ static stringtype_t BLACKATTR[] = { }; /* xmlns */ -/* xml-stylesheet > , */ +/* `xml-stylesheet` > , */ /* -static const char* BLACKATTR[] = { - "ATTRIBUTENAME", - "BACKGROUND", - "DATAFORMATAS", - "HREF", - "SCROLL", - "SRC", - "STYLE", - "SRCDOC", - NULL -}; + static const char* BLACKATTR[] = { + "ATTRIBUTENAME", + "BACKGROUND", + "DATAFORMATAS", + "HREF", + "SCROLL", + "SRC", + "STYLE", + "SRCDOC", + NULL + }; */ static const char* BLACKTAG[] = { @@ -220,36 +213,36 @@ static const char* BLACKTAG[] = { static int cstrcasecmp_with_null(const char *a, const char *b, size_t n) { - char ca; - char cb; - /* printf("Comparing to %s %.*s\n", a, (int)n, b); */ - while (n-- > 0) { - cb = *b++; - if (cb == '\0') continue; + char ca; + char cb; + /* printf("Comparing to %s %.*s\n", a, (int)n, b); */ + while (n-- > 0) { + cb = *b++; + if (cb == '\0') continue; - ca = *a++; + ca = *a++; - if (cb >= 'a' && cb <= 'z') { - cb -= 0x20; - } - /* printf("Comparing %c vs %c with %d left\n", ca, cb, (int)n); */ - if (ca != cb) { - return 1; + if (cb >= 'a' && cb <= 'z') { + cb -= 0x20; + } + /* printf("Comparing %c vs %c with %d left\n", ca, cb, (int)n); */ + if (ca != cb) { + return 1; + } } - } - if (*a == 0) { - /* printf(" MATCH \n"); */ - return 0; - } else { - return 1; - } + if (*a == 0) { + /* printf(" MATCH \n"); */ + return 0; + } else { + return 1; + } } /* - * Does an HTML encoded binary string (const char*, lenght) start with - * a all uppercase c-string (null terminated), case insenstive! - * + * Does an HTML encoded binary string (const char*, length) start with + * a all uppercase c-string (null terminated), case insensitive! + * * also ignore any embedded nulls in the HTML string! * * return 1 if match / starts with @@ -257,47 +250,47 @@ static int cstrcasecmp_with_null(const char *a, const char *b, size_t n) */ static int htmlencode_startswith(const char *a, const char *b, size_t n) { - size_t consumed; - int cb; - int first = 1; - /* printf("Comparing %s with %.*s\n", a,(int)n,b); */ + size_t consumed; + int cb; + int first = 1; + /* printf("Comparing %s with %.*s\n", a,(int)n,b); */ while (n > 0) { - if (*a == 0) { - /* printf("Match EOL!\n"); */ - return 1; - } - cb = html_decode_char_at(b, n, &consumed); - b += consumed; - n -= consumed; - - if (first && cb <= 32) { - /* ignore all leading whitespace and control characters */ - continue; - } - first = 0; + if (*a == 0) { + /* printf("Match EOL!\n"); */ + return 1; + } + cb = html_decode_char_at(b, n, &consumed); + b += consumed; + n -= consumed; + + if (first && cb <= 32) { + /* ignore all leading whitespace and control characters */ + continue; + } + first = 0; if (cb == 0) { - /* always ignore null characters in user input */ - continue; - } + /* always ignore null characters in user input */ + continue; + } if (cb == 10) { - /* always ignore vtab characters in user input */ - /* who allows this?? */ - continue; - } + /* always ignore vertical tab characters in user input */ + /* who allows this?? */ + continue; + } if (cb >= 'a' && cb <= 'z') { - /* upcase */ + /* upcase */ cb -= 0x20; } if (*a != (char) cb) { - /* printf(" %c != %c\n", *a, cb); */ - /* mismatch */ - return 0; + /* printf(" %c != %c\n", *a, cb); */ + /* mismatch */ + return 0; } - a++; + a++; } return (*a == 0) ? 1 : 0; @@ -313,8 +306,8 @@ static int is_black_tag(const char* s, size_t len) black = BLACKTAG; while (*black != NULL) { - if (cstrcasecmp_with_null(*black, s, len) == 0) { - /* printf("Got black tag %s\n", *black); */ + if (cstrcasecmp_with_null(*black, s, len) == 0) { + /* printf("Got black tag %s\n", *black); */ return 1; } black += 1; @@ -324,7 +317,7 @@ static int is_black_tag(const char* s, size_t len) if ((s[0] == 's' || s[0] == 'S') && (s[1] == 'v' || s[1] == 'V') && (s[2] == 'g' || s[2] == 'G')) { - /* printf("Got SVG tag \n"); */ + /* printf("Got SVG tag \n"); */ return 1; } @@ -332,7 +325,7 @@ static int is_black_tag(const char* s, size_t len) if ((s[0] == 'x' || s[0] == 'X') && (s[1] == 's' || s[1] == 'S') && (s[2] == 'l' || s[2] == 'L')) { - /* printf("Got XSL tag\n"); */ + /* printf("Got XSL tag\n"); */ return 1; } @@ -347,9 +340,9 @@ static attribute_t is_black_attr(const char* s, size_t len) return TYPE_NONE; } - /* javascript on.* */ + /* JavaScript on.* */ if ((s[0] == 'o' || s[0] == 'O') && (s[1] == 'n' || s[1] == 'N')) { - /* printf("Got javascript on- attribute name\n"); */ + /* printf("Got JavaScript on- attribute name\n"); */ return TYPE_BLACK; } @@ -357,7 +350,7 @@ static attribute_t is_black_attr(const char* s, size_t len) if (len >= 5) { /* XMLNS can be used to create arbitrary tags */ if (cstrcasecmp_with_null("XMLNS", s, 5) == 0 || cstrcasecmp_with_null("XLINK", s, 5) == 0) { - /* printf("Got XMLNS and XLINK tags\n"); */ + /* printf("Got XMLNS and XLINK tags\n"); */ return TYPE_BLACK; } } @@ -365,7 +358,7 @@ static attribute_t is_black_attr(const char* s, size_t len) black = BLACKATTR; while (black->name != NULL) { if (cstrcasecmp_with_null(black->name, s, len) == 0) { - /* printf("Got banned attribute name %s\n", black->name); */ + /* printf("Got banned attribute name %s\n", black->name); */ return black->atype; } black += 1; @@ -387,20 +380,18 @@ static int is_black_url(const char* s, size_t len) static const char* javascript_url = "JAVA"; /* skip whitespace */ - while (len > 0) { + while (len > 0 && (*s <= 32 || *s >= 127)) { /* * HEY: this is a signed character. * We are intentionally skipping high-bit characters too - * since they are not ascii, and Opera sometimes uses UTF8 whitespace + * since they are not ASCII, and Opera sometimes uses UTF-8 whitespace. + * + * Also in EUC-JP some of the high bytes are just ignored. */ - if (*s <= 32) { - ++s; - --len; - } - break; + ++s; + --len; } - if (htmlencode_startswith(data_url, s, len)) { return 1; } @@ -442,16 +433,16 @@ int libinjection_is_xss(const char* s, size_t len, int flags) /* * IE6,7,8 parsing works a bit differently so * a whole