2009-11-11 19:39:14 +03:00
|
|
|
/*
|
|
|
|
* JSON lexer
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2009
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
|
|
|
|
* See the COPYING.LIB file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2016-01-29 20:50:01 +03:00
|
|
|
#include "qemu/osdep.h"
|
2009-11-11 19:39:14 +03:00
|
|
|
#include "qemu-common.h"
|
2012-12-17 21:19:43 +04:00
|
|
|
#include "qapi/qmp/json-lexer.h"
|
2018-08-23 19:40:00 +03:00
|
|
|
#include "qapi/qmp/json-streamer.h"
|
2009-11-11 19:39:14 +03:00
|
|
|
|
2011-06-01 21:14:52 +04:00
|
|
|
#define MAX_TOKEN_SIZE (64ULL << 20)
|
|
|
|
|
2009-11-11 19:39:14 +03:00
|
|
|
/*
|
2018-08-23 19:39:46 +03:00
|
|
|
* From RFC 8259 "The JavaScript Object Notation (JSON) Data
|
|
|
|
* Interchange Format", with [comments in brackets]:
|
2016-06-10 05:48:06 +03:00
|
|
|
*
|
2018-08-23 19:39:46 +03:00
|
|
|
* The set of tokens includes six structural characters, strings,
|
|
|
|
* numbers, and three literal names.
|
2016-06-10 05:48:06 +03:00
|
|
|
*
|
2018-08-23 19:39:46 +03:00
|
|
|
* These are the six structural characters:
|
2016-06-10 05:48:06 +03:00
|
|
|
*
|
2018-08-23 19:39:46 +03:00
|
|
|
* begin-array = ws %x5B ws ; [ left square bracket
|
|
|
|
* begin-object = ws %x7B ws ; { left curly bracket
|
|
|
|
* end-array = ws %x5D ws ; ] right square bracket
|
|
|
|
* end-object = ws %x7D ws ; } right curly bracket
|
|
|
|
* name-separator = ws %x3A ws ; : colon
|
|
|
|
* value-separator = ws %x2C ws ; , comma
|
2016-06-10 05:48:06 +03:00
|
|
|
*
|
2018-08-23 19:39:46 +03:00
|
|
|
* Insignificant whitespace is allowed before or after any of the six
|
|
|
|
* structural characters.
|
|
|
|
* [This lexer accepts it before or after any token, which is actually
|
|
|
|
* the same, as the grammar always has structural characters between
|
|
|
|
* other tokens.]
|
2016-06-10 05:48:06 +03:00
|
|
|
*
|
2018-08-23 19:39:46 +03:00
|
|
|
* ws = *(
|
|
|
|
* %x20 / ; Space
|
|
|
|
* %x09 / ; Horizontal tab
|
|
|
|
* %x0A / ; Line feed or New line
|
|
|
|
* %x0D ) ; Carriage return
|
2009-11-11 19:39:14 +03:00
|
|
|
*
|
2018-08-23 19:39:46 +03:00
|
|
|
* [...] three literal names:
|
|
|
|
* false null true
|
|
|
|
* [This lexer accepts [a-z]+, and leaves rejecting unknown literal
|
|
|
|
* names to the parser.]
|
|
|
|
*
|
|
|
|
* [Numbers:]
|
|
|
|
*
|
|
|
|
* number = [ minus ] int [ frac ] [ exp ]
|
|
|
|
* decimal-point = %x2E ; .
|
|
|
|
* digit1-9 = %x31-39 ; 1-9
|
|
|
|
* e = %x65 / %x45 ; e E
|
|
|
|
* exp = e [ minus / plus ] 1*DIGIT
|
|
|
|
* frac = decimal-point 1*DIGIT
|
|
|
|
* int = zero / ( digit1-9 *DIGIT )
|
|
|
|
* minus = %x2D ; -
|
|
|
|
* plus = %x2B ; +
|
|
|
|
* zero = %x30 ; 0
|
|
|
|
*
|
|
|
|
* [Strings:]
|
|
|
|
* string = quotation-mark *char quotation-mark
|
|
|
|
*
|
|
|
|
* char = unescaped /
|
|
|
|
* escape (
|
|
|
|
* %x22 / ; " quotation mark U+0022
|
|
|
|
* %x5C / ; \ reverse solidus U+005C
|
|
|
|
* %x2F / ; / solidus U+002F
|
|
|
|
* %x62 / ; b backspace U+0008
|
|
|
|
* %x66 / ; f form feed U+000C
|
|
|
|
* %x6E / ; n line feed U+000A
|
|
|
|
* %x72 / ; r carriage return U+000D
|
|
|
|
* %x74 / ; t tab U+0009
|
|
|
|
* %x75 4HEXDIG ) ; uXXXX U+XXXX
|
|
|
|
* escape = %x5C ; \
|
|
|
|
* quotation-mark = %x22 ; "
|
|
|
|
* unescaped = %x20-21 / %x23-5B / %x5D-10FFFF
|
2018-08-23 19:39:53 +03:00
|
|
|
* [This lexer accepts any non-control character after escape, and
|
|
|
|
* leaves rejecting invalid ones to the parser.]
|
2018-08-23 19:39:46 +03:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* Extensions over RFC 8259:
|
|
|
|
* - Extra escape sequence in strings:
|
|
|
|
* 0x27 (apostrophe) is recognized after escape, too
|
|
|
|
* - Single-quoted strings:
|
|
|
|
* Like double-quoted strings, except they're delimited by %x27
|
|
|
|
* (apostrophe) instead of %x22 (quotation mark), and can't contain
|
|
|
|
* unescaped apostrophe, but can contain unescaped quotation mark.
|
|
|
|
* - Interpolation:
|
|
|
|
* interpolation = %((l|ll|I64)[du]|[ipsf])
|
|
|
|
*
|
|
|
|
* Note:
|
2018-08-23 19:39:52 +03:00
|
|
|
* - Input must be encoded in modified UTF-8.
|
2018-08-23 19:39:46 +03:00
|
|
|
* - Decoding and validating is left to the parser.
|
2009-11-11 19:39:14 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
enum json_lexer_state {
|
2015-11-26 00:23:25 +03:00
|
|
|
IN_ERROR = 0, /* must really be 0, see json_lexer[] */
|
2009-11-11 19:39:14 +03:00
|
|
|
IN_DQ_STRING_ESCAPE,
|
|
|
|
IN_DQ_STRING,
|
|
|
|
IN_SQ_STRING_ESCAPE,
|
|
|
|
IN_SQ_STRING,
|
|
|
|
IN_ZERO,
|
|
|
|
IN_DIGITS,
|
|
|
|
IN_DIGIT,
|
|
|
|
IN_EXP_E,
|
|
|
|
IN_MANTISSA,
|
|
|
|
IN_MANTISSA_DIGITS,
|
|
|
|
IN_NONZERO_NUMBER,
|
|
|
|
IN_NEG_NONZERO_NUMBER,
|
|
|
|
IN_KEYWORD,
|
2018-08-23 19:40:04 +03:00
|
|
|
IN_INTERP,
|
|
|
|
IN_INTERP_L,
|
|
|
|
IN_INTERP_LL,
|
|
|
|
IN_INTERP_I,
|
|
|
|
IN_INTERP_I6,
|
|
|
|
IN_INTERP_I64,
|
2009-11-11 19:39:14 +03:00
|
|
|
IN_WHITESPACE,
|
|
|
|
IN_START,
|
|
|
|
};
|
|
|
|
|
2015-11-26 00:23:25 +03:00
|
|
|
QEMU_BUILD_BUG_ON((int)JSON_MIN <= (int)IN_START);
|
|
|
|
|
2009-11-11 19:39:14 +03:00
|
|
|
#define TERMINAL(state) [0 ... 0x7F] = (state)
|
|
|
|
|
2010-05-24 11:39:52 +04:00
|
|
|
/* Return whether TERMINAL is a terminal state and the transition to it
|
|
|
|
from OLD_STATE required lookahead. This happens whenever the table
|
|
|
|
below uses the TERMINAL macro. */
|
|
|
|
#define TERMINAL_NEEDED_LOOKAHEAD(old_state, terminal) \
|
2018-08-23 19:39:44 +03:00
|
|
|
(terminal != IN_ERROR && json_lexer[(old_state)][0] == (terminal))
|
2010-05-24 11:39:52 +04:00
|
|
|
|
2009-11-11 19:39:14 +03:00
|
|
|
static const uint8_t json_lexer[][256] = {
|
2015-11-26 00:23:25 +03:00
|
|
|
/* Relies on default initialization to IN_ERROR! */
|
|
|
|
|
2009-11-11 19:39:14 +03:00
|
|
|
/* double quote string */
|
|
|
|
[IN_DQ_STRING_ESCAPE] = {
|
2018-08-23 19:39:53 +03:00
|
|
|
[0x20 ... 0xFD] = IN_DQ_STRING,
|
2009-11-11 19:39:14 +03:00
|
|
|
},
|
|
|
|
[IN_DQ_STRING] = {
|
json: Leave rejecting invalid UTF-8 to parser
Both the lexer and the parser (attempt to) validate UTF-8 in JSON
strings.
The lexer rejects bytes that can't occur in valid UTF-8: \xC0..\xC1,
\xF5..\xFF. This rejects some, but not all invalid UTF-8. It also
rejects ASCII control characters \x00..\x1F, in accordance with RFC
8259 (see recent commit "json: Reject unescaped control characters").
When the lexer rejects, it ends the token right after the first bad
byte. Good when the bad byte is a newline. Not so good when it's
something like an overlong sequence in the middle of a string. For
instance, input
{"abc\xC0\xAFijk": 1}\n
produces the tokens
JSON_LCURLY {
JSON_ERROR "abc\xC0
JSON_ERROR \xAF
JSON_KEYWORD ijk
JSON_ERROR ": 1}\n
The parser then reports four errors
Invalid JSON syntax
Invalid JSON syntax
JSON parse error, invalid keyword 'ijk'
Invalid JSON syntax
before it recovers at the newline.
The commit before previous made the parser reject invalid UTF-8
sequences. Since then, anything the lexer rejects, the parser would
reject as well. Thus, the lexer's rejecting is unnecessary for
correctness, and harmful for error reporting.
However, we want to keep rejecting ASCII control characters in the
lexer, because that produces the behavior we want for unclosed
strings.
We also need to keep rejecting \xFF in the lexer, because we
documented that as a way to reset the JSON parser
(docs/interop/qmp-spec.txt section 2.6 QGA Synchronization), which
means we can't change how we recover from this error now. I wish we
hadn't done that.
I think we should treat \xFE the same as \xFF.
Change the lexer to accept \xC0..\xC1 and \xF5..\xFD. It now rejects
only \x00..\x1F and \xFE..\xFF. Error reporting for invalid UTF-8 in
strings is much improved, except for \xFE and \xFF. For the example
above, the lexer now produces
JSON_LCURLY {
JSON_STRING "abc\xC0\xAFijk"
JSON_COLON :
JSON_INTEGER 1
JSON_RCURLY
and the parser reports just
JSON parse error, invalid UTF-8 sequence in string
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20180823164025.12553-25-armbru@redhat.com>
2018-08-23 19:39:51 +03:00
|
|
|
[0x20 ... 0xFD] = IN_DQ_STRING,
|
2009-11-11 19:39:14 +03:00
|
|
|
['\\'] = IN_DQ_STRING_ESCAPE,
|
2010-05-24 11:39:53 +04:00
|
|
|
['"'] = JSON_STRING,
|
2009-11-11 19:39:14 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
/* single quote string */
|
|
|
|
[IN_SQ_STRING_ESCAPE] = {
|
2018-08-23 19:39:53 +03:00
|
|
|
[0x20 ... 0xFD] = IN_SQ_STRING,
|
2009-11-11 19:39:14 +03:00
|
|
|
},
|
|
|
|
[IN_SQ_STRING] = {
|
json: Leave rejecting invalid UTF-8 to parser
Both the lexer and the parser (attempt to) validate UTF-8 in JSON
strings.
The lexer rejects bytes that can't occur in valid UTF-8: \xC0..\xC1,
\xF5..\xFF. This rejects some, but not all invalid UTF-8. It also
rejects ASCII control characters \x00..\x1F, in accordance with RFC
8259 (see recent commit "json: Reject unescaped control characters").
When the lexer rejects, it ends the token right after the first bad
byte. Good when the bad byte is a newline. Not so good when it's
something like an overlong sequence in the middle of a string. For
instance, input
{"abc\xC0\xAFijk": 1}\n
produces the tokens
JSON_LCURLY {
JSON_ERROR "abc\xC0
JSON_ERROR \xAF
JSON_KEYWORD ijk
JSON_ERROR ": 1}\n
The parser then reports four errors
Invalid JSON syntax
Invalid JSON syntax
JSON parse error, invalid keyword 'ijk'
Invalid JSON syntax
before it recovers at the newline.
The commit before previous made the parser reject invalid UTF-8
sequences. Since then, anything the lexer rejects, the parser would
reject as well. Thus, the lexer's rejecting is unnecessary for
correctness, and harmful for error reporting.
However, we want to keep rejecting ASCII control characters in the
lexer, because that produces the behavior we want for unclosed
strings.
We also need to keep rejecting \xFF in the lexer, because we
documented that as a way to reset the JSON parser
(docs/interop/qmp-spec.txt section 2.6 QGA Synchronization), which
means we can't change how we recover from this error now. I wish we
hadn't done that.
I think we should treat \xFE the same as \xFF.
Change the lexer to accept \xC0..\xC1 and \xF5..\xFD. It now rejects
only \x00..\x1F and \xFE..\xFF. Error reporting for invalid UTF-8 in
strings is much improved, except for \xFE and \xFF. For the example
above, the lexer now produces
JSON_LCURLY {
JSON_STRING "abc\xC0\xAFijk"
JSON_COLON :
JSON_INTEGER 1
JSON_RCURLY
and the parser reports just
JSON parse error, invalid UTF-8 sequence in string
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Message-Id: <20180823164025.12553-25-armbru@redhat.com>
2018-08-23 19:39:51 +03:00
|
|
|
[0x20 ... 0xFD] = IN_SQ_STRING,
|
2009-11-11 19:39:14 +03:00
|
|
|
['\\'] = IN_SQ_STRING_ESCAPE,
|
2010-05-24 11:39:53 +04:00
|
|
|
['\''] = JSON_STRING,
|
2009-11-11 19:39:14 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
/* Zero */
|
|
|
|
[IN_ZERO] = {
|
|
|
|
TERMINAL(JSON_INTEGER),
|
2011-03-27 13:07:54 +04:00
|
|
|
['0' ... '9'] = IN_ERROR,
|
2009-11-11 19:39:14 +03:00
|
|
|
['.'] = IN_MANTISSA,
|
|
|
|
},
|
|
|
|
|
|
|
|
/* Float */
|
|
|
|
[IN_DIGITS] = {
|
|
|
|
TERMINAL(JSON_FLOAT),
|
|
|
|
['0' ... '9'] = IN_DIGITS,
|
|
|
|
},
|
|
|
|
|
|
|
|
[IN_DIGIT] = {
|
|
|
|
['0' ... '9'] = IN_DIGITS,
|
|
|
|
},
|
|
|
|
|
|
|
|
[IN_EXP_E] = {
|
|
|
|
['-'] = IN_DIGIT,
|
|
|
|
['+'] = IN_DIGIT,
|
|
|
|
['0' ... '9'] = IN_DIGITS,
|
|
|
|
},
|
|
|
|
|
|
|
|
[IN_MANTISSA_DIGITS] = {
|
|
|
|
TERMINAL(JSON_FLOAT),
|
|
|
|
['0' ... '9'] = IN_MANTISSA_DIGITS,
|
|
|
|
['e'] = IN_EXP_E,
|
|
|
|
['E'] = IN_EXP_E,
|
|
|
|
},
|
|
|
|
|
|
|
|
[IN_MANTISSA] = {
|
|
|
|
['0' ... '9'] = IN_MANTISSA_DIGITS,
|
|
|
|
},
|
|
|
|
|
|
|
|
/* Number */
|
|
|
|
[IN_NONZERO_NUMBER] = {
|
|
|
|
TERMINAL(JSON_INTEGER),
|
|
|
|
['0' ... '9'] = IN_NONZERO_NUMBER,
|
|
|
|
['e'] = IN_EXP_E,
|
|
|
|
['E'] = IN_EXP_E,
|
|
|
|
['.'] = IN_MANTISSA,
|
|
|
|
},
|
|
|
|
|
|
|
|
[IN_NEG_NONZERO_NUMBER] = {
|
|
|
|
['0'] = IN_ZERO,
|
|
|
|
['1' ... '9'] = IN_NONZERO_NUMBER,
|
|
|
|
},
|
|
|
|
|
|
|
|
/* keywords */
|
|
|
|
[IN_KEYWORD] = {
|
|
|
|
TERMINAL(JSON_KEYWORD),
|
|
|
|
['a' ... 'z'] = IN_KEYWORD,
|
|
|
|
},
|
|
|
|
|
|
|
|
/* whitespace */
|
|
|
|
[IN_WHITESPACE] = {
|
|
|
|
TERMINAL(JSON_SKIP),
|
|
|
|
[' '] = IN_WHITESPACE,
|
|
|
|
['\t'] = IN_WHITESPACE,
|
|
|
|
['\r'] = IN_WHITESPACE,
|
|
|
|
['\n'] = IN_WHITESPACE,
|
2016-06-10 05:48:06 +03:00
|
|
|
},
|
2009-11-11 19:39:14 +03:00
|
|
|
|
2018-08-23 19:40:04 +03:00
|
|
|
/* interpolation */
|
|
|
|
[IN_INTERP_LL] = {
|
|
|
|
['d'] = JSON_INTERP,
|
|
|
|
['u'] = JSON_INTERP,
|
2009-11-11 19:39:14 +03:00
|
|
|
},
|
|
|
|
|
2018-08-23 19:40:04 +03:00
|
|
|
[IN_INTERP_L] = {
|
|
|
|
['d'] = JSON_INTERP,
|
|
|
|
['l'] = IN_INTERP_LL,
|
|
|
|
['u'] = JSON_INTERP,
|
2009-11-11 19:39:14 +03:00
|
|
|
},
|
|
|
|
|
2018-08-23 19:40:04 +03:00
|
|
|
[IN_INTERP_I64] = {
|
|
|
|
['d'] = JSON_INTERP,
|
|
|
|
['u'] = JSON_INTERP,
|
2010-02-04 05:30:30 +03:00
|
|
|
},
|
|
|
|
|
2018-08-23 19:40:04 +03:00
|
|
|
[IN_INTERP_I6] = {
|
|
|
|
['4'] = IN_INTERP_I64,
|
2010-02-04 05:30:30 +03:00
|
|
|
},
|
|
|
|
|
2018-08-23 19:40:04 +03:00
|
|
|
[IN_INTERP_I] = {
|
|
|
|
['6'] = IN_INTERP_I6,
|
2010-02-04 05:30:30 +03:00
|
|
|
},
|
|
|
|
|
2018-08-23 19:40:04 +03:00
|
|
|
[IN_INTERP] = {
|
|
|
|
['d'] = JSON_INTERP,
|
|
|
|
['i'] = JSON_INTERP,
|
|
|
|
['p'] = JSON_INTERP,
|
|
|
|
['s'] = JSON_INTERP,
|
|
|
|
['u'] = JSON_INTERP,
|
|
|
|
['f'] = JSON_INTERP,
|
|
|
|
['l'] = IN_INTERP_L,
|
|
|
|
['I'] = IN_INTERP_I,
|
2009-11-11 19:39:14 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
/* top level rule */
|
|
|
|
[IN_START] = {
|
|
|
|
['"'] = IN_DQ_STRING,
|
|
|
|
['\''] = IN_SQ_STRING,
|
|
|
|
['0'] = IN_ZERO,
|
|
|
|
['1' ... '9'] = IN_NONZERO_NUMBER,
|
|
|
|
['-'] = IN_NEG_NONZERO_NUMBER,
|
2015-11-26 00:23:26 +03:00
|
|
|
['{'] = JSON_LCURLY,
|
|
|
|
['}'] = JSON_RCURLY,
|
|
|
|
['['] = JSON_LSQUARE,
|
|
|
|
[']'] = JSON_RSQUARE,
|
|
|
|
[','] = JSON_COMMA,
|
|
|
|
[':'] = JSON_COLON,
|
2009-11-11 19:39:14 +03:00
|
|
|
['a' ... 'z'] = IN_KEYWORD,
|
2018-08-23 19:40:04 +03:00
|
|
|
['%'] = IN_INTERP,
|
2009-11-11 19:39:14 +03:00
|
|
|
[' '] = IN_WHITESPACE,
|
|
|
|
['\t'] = IN_WHITESPACE,
|
|
|
|
['\r'] = IN_WHITESPACE,
|
|
|
|
['\n'] = IN_WHITESPACE,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2018-08-23 19:40:00 +03:00
|
|
|
void json_lexer_init(JSONLexer *lexer)
|
2009-11-11 19:39:14 +03:00
|
|
|
{
|
|
|
|
lexer->state = IN_START;
|
2015-11-26 00:23:29 +03:00
|
|
|
lexer->token = g_string_sized_new(3);
|
2010-05-18 00:50:01 +04:00
|
|
|
lexer->x = lexer->y = 0;
|
2009-11-11 19:39:14 +03:00
|
|
|
}
|
|
|
|
|
2018-08-23 19:39:58 +03:00
|
|
|
static void json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush)
|
2009-11-11 19:39:14 +03:00
|
|
|
{
|
2010-05-24 11:39:52 +04:00
|
|
|
int char_consumed, new_state;
|
|
|
|
|
2009-11-11 19:39:14 +03:00
|
|
|
lexer->x++;
|
|
|
|
if (ch == '\n') {
|
|
|
|
lexer->x = 0;
|
|
|
|
lexer->y++;
|
|
|
|
}
|
|
|
|
|
2010-05-24 11:39:52 +04:00
|
|
|
do {
|
2015-11-26 00:23:25 +03:00
|
|
|
assert(lexer->state <= ARRAY_SIZE(json_lexer));
|
2010-05-24 11:39:52 +04:00
|
|
|
new_state = json_lexer[lexer->state][(uint8_t)ch];
|
|
|
|
char_consumed = !TERMINAL_NEEDED_LOOKAHEAD(lexer->state, new_state);
|
2018-08-23 19:39:44 +03:00
|
|
|
if (char_consumed && !flush) {
|
2015-11-26 00:23:29 +03:00
|
|
|
g_string_append_c(lexer->token, ch);
|
2010-05-24 11:39:52 +04:00
|
|
|
}
|
2009-11-11 19:39:14 +03:00
|
|
|
|
2010-05-24 11:39:52 +04:00
|
|
|
switch (new_state) {
|
2015-11-26 00:23:26 +03:00
|
|
|
case JSON_LCURLY:
|
|
|
|
case JSON_RCURLY:
|
|
|
|
case JSON_LSQUARE:
|
|
|
|
case JSON_RSQUARE:
|
|
|
|
case JSON_COLON:
|
|
|
|
case JSON_COMMA:
|
2018-08-23 19:40:04 +03:00
|
|
|
case JSON_INTERP:
|
2010-05-24 11:39:52 +04:00
|
|
|
case JSON_INTEGER:
|
|
|
|
case JSON_FLOAT:
|
|
|
|
case JSON_KEYWORD:
|
|
|
|
case JSON_STRING:
|
2018-08-23 19:40:00 +03:00
|
|
|
json_message_process_token(lexer, lexer->token, new_state,
|
|
|
|
lexer->x, lexer->y);
|
2012-01-09 21:29:51 +04:00
|
|
|
/* fall through */
|
2010-05-24 11:39:52 +04:00
|
|
|
case JSON_SKIP:
|
2015-11-26 00:23:29 +03:00
|
|
|
g_string_truncate(lexer->token, 0);
|
2010-05-24 11:39:52 +04:00
|
|
|
new_state = IN_START;
|
|
|
|
break;
|
2011-03-27 13:07:54 +04:00
|
|
|
case IN_ERROR:
|
2011-06-01 21:14:58 +04:00
|
|
|
/* XXX: To avoid having previous bad input leaving the parser in an
|
|
|
|
* unresponsive state where we consume unpredictable amounts of
|
|
|
|
* subsequent "good" input, percolate this error state up to the
|
|
|
|
* tokenizer/parser by forcing a NULL object to be emitted, then
|
|
|
|
* reset state.
|
|
|
|
*
|
|
|
|
* Also note that this handling is required for reliable channel
|
|
|
|
* negotiation between QMP and the guest agent, since chr(0xFF)
|
|
|
|
* is placed at the beginning of certain events to ensure proper
|
|
|
|
* delivery when the channel is in an unknown state. chr(0xFF) is
|
|
|
|
* never a valid ASCII/UTF-8 sequence, so this should reliably
|
|
|
|
* induce an error/flush state.
|
|
|
|
*/
|
2018-08-23 19:40:00 +03:00
|
|
|
json_message_process_token(lexer, lexer->token, JSON_ERROR,
|
|
|
|
lexer->x, lexer->y);
|
2015-11-26 00:23:29 +03:00
|
|
|
g_string_truncate(lexer->token, 0);
|
2011-06-01 21:14:56 +04:00
|
|
|
new_state = IN_START;
|
2011-06-01 21:14:58 +04:00
|
|
|
lexer->state = new_state;
|
2018-08-23 19:39:58 +03:00
|
|
|
return;
|
2010-05-24 11:39:52 +04:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
lexer->state = new_state;
|
2011-06-01 21:14:57 +04:00
|
|
|
} while (!char_consumed && !flush);
|
2011-06-01 21:14:52 +04:00
|
|
|
|
|
|
|
/* Do not let a single token grow to an arbitrarily large size,
|
|
|
|
* this is a security consideration.
|
|
|
|
*/
|
2015-11-26 00:23:29 +03:00
|
|
|
if (lexer->token->len > MAX_TOKEN_SIZE) {
|
2018-08-23 19:40:00 +03:00
|
|
|
json_message_process_token(lexer, lexer->token, lexer->state,
|
|
|
|
lexer->x, lexer->y);
|
2015-11-26 00:23:29 +03:00
|
|
|
g_string_truncate(lexer->token, 0);
|
2011-06-01 21:14:52 +04:00
|
|
|
lexer->state = IN_START;
|
|
|
|
}
|
2009-11-11 19:39:14 +03:00
|
|
|
}
|
|
|
|
|
2018-08-23 19:39:58 +03:00
|
|
|
void json_lexer_feed(JSONLexer *lexer, const char *buffer, size_t size)
|
2009-11-11 19:39:14 +03:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
2018-08-23 19:39:58 +03:00
|
|
|
json_lexer_feed_char(lexer, buffer[i], false);
|
2009-11-11 19:39:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-23 19:39:58 +03:00
|
|
|
void json_lexer_flush(JSONLexer *lexer)
|
2009-11-11 19:39:14 +03:00
|
|
|
{
|
2018-08-23 19:39:58 +03:00
|
|
|
if (lexer->state != IN_START) {
|
|
|
|
json_lexer_feed_char(lexer, 0, true);
|
|
|
|
}
|
2009-11-11 19:39:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void json_lexer_destroy(JSONLexer *lexer)
|
|
|
|
{
|
2015-11-26 00:23:29 +03:00
|
|
|
g_string_free(lexer->token, true);
|
2009-11-11 19:39:14 +03:00
|
|
|
}
|