Merge pull request #294 from nodemcu/json

Json branch merged to master
This commit is contained in:
zeroday 2015-03-18 13:11:11 +08:00
commit a01bda03dd
41 changed files with 8897 additions and 6 deletions

View File

@ -7,6 +7,7 @@ version 0.9.5
###A lua based firmware for wifi-soc esp8266 ###A lua based firmware for wifi-soc esp8266
Build on [ESP8266 sdk 0.9.5](http://bbs.espressif.com/viewtopic.php?f=5&t=154)<br /> Build on [ESP8266 sdk 0.9.5](http://bbs.espressif.com/viewtopic.php?f=5&t=154)<br />
Lua core based on [eLua project](http://www.eluaproject.net/)<br /> Lua core based on [eLua project](http://www.eluaproject.net/)<br />
cjson based on [lua-cjson](https://github.com/mpx/lua-cjson)<br />
File system based on [spiffs](https://github.com/pellepl/spiffs)<br /> File system based on [spiffs](https://github.com/pellepl/spiffs)<br />
Open source development kit for NodeMCU [nodemcu-devkit](https://github.com/nodemcu/nodemcu-devkit)<br /> Open source development kit for NodeMCU [nodemcu-devkit](https://github.com/nodemcu/nodemcu-devkit)<br />
Flash tool for NodeMCU [nodemcu-flasher](https://github.com/nodemcu/nodemcu-flasher)<br /> Flash tool for NodeMCU [nodemcu-flasher](https://github.com/nodemcu/nodemcu-flasher)<br />
@ -34,6 +35,10 @@ Tencent QQ group: 309957875<br />
- cross compiler (done) - cross compiler (done)
# Change log # Change log
2015-03-17<br />
add cjson module, only cjson.encode() and cjson.decode() is implemented.<br />
read doc [here](https://github.com/nodemcu/nodemcu-firmware/blob/json/app/cjson/manual.txt)
2015-03-15<br /> 2015-03-15<br />
bugs fixed: #239, #273.<br /> bugs fixed: #239, #273.<br />
reduce coap module memory usage, add coap module to default built. reduce coap module memory usage, add coap module to default built.

View File

@ -36,7 +36,8 @@ SUBDIRS= \
smart \ smart \
wofs \ wofs \
modules \ modules \
spiffs spiffs \
cjson
endif # } PDIR endif # } PDIR
@ -84,6 +85,7 @@ COMPONENTS_eagle.app.v6 = \
smart/smart.a \ smart/smart.a \
wofs/wofs.a \ wofs/wofs.a \
spiffs/spiffs.a \ spiffs/spiffs.a \
cjson/libcjson.a \
modules/libmodules.a modules/libmodules.a
LINKFLAGS_eagle.app.v6 = \ LINKFLAGS_eagle.app.v6 = \

76
app/cjson/CMakeLists.txt Normal file
View File

@ -0,0 +1,76 @@
# If Lua is installed in a non-standard location, please set the LUA_DIR
# environment variable to point to prefix for the install. Eg:
# Unix: export LUA_DIR=/home/user/pkg
# Windows: set LUA_DIR=c:\lua51
project(lua-cjson C)
cmake_minimum_required(VERSION 2.6)
option(USE_INTERNAL_FPCONV "Use internal strtod() / g_fmt() code for performance")
option(MULTIPLE_THREADS "Support multi-threaded apps with internal fpconv - recommended" ON)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release CACHE STRING
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel."
FORCE)
endif()
find_package(Lua51 REQUIRED)
include_directories(${LUA_INCLUDE_DIR})
if(NOT USE_INTERNAL_FPCONV)
# Use libc number conversion routines (strtod(), sprintf())
set(FPCONV_SOURCES fpconv.c)
else()
# Use internal number conversion routines
add_definitions(-DUSE_INTERNAL_FPCONV)
set(FPCONV_SOURCES g_fmt.c dtoa.c)
include(TestBigEndian)
TEST_BIG_ENDIAN(IEEE_BIG_ENDIAN)
if(IEEE_BIG_ENDIAN)
add_definitions(-DIEEE_BIG_ENDIAN)
endif()
if(MULTIPLE_THREADS)
set(CMAKE_THREAD_PREFER_PTHREAD TRUE)
find_package(Threads REQUIRED)
if(NOT CMAKE_USE_PTHREADS_INIT)
message(FATAL_ERROR
"Pthreads not found - required by MULTIPLE_THREADS option")
endif()
add_definitions(-DMULTIPLE_THREADS)
endif()
endif()
# Handle platforms missing isinf() macro (Eg, some Solaris systems).
include(CheckSymbolExists)
CHECK_SYMBOL_EXISTS(isinf math.h HAVE_ISINF)
if(NOT HAVE_ISINF)
add_definitions(-DUSE_INTERNAL_ISINF)
endif()
set(_MODULE_LINK "${CMAKE_THREAD_LIBS_INIT}")
get_filename_component(_lua_lib_dir ${LUA_LIBRARY} PATH)
if(APPLE)
set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS
"${CMAKE_SHARED_MODULE_CREATE_C_FLAGS} -undefined dynamic_lookup")
endif()
if(WIN32)
# Win32 modules need to be linked to the Lua library.
set(_MODULE_LINK ${LUA_LIBRARY} ${_MODULE_LINK})
set(_lua_module_dir "${_lua_lib_dir}")
# Windows sprintf()/strtod() handle NaN/inf differently. Not supported.
add_definitions(-DDISABLE_INVALID_NUMBERS)
else()
set(_lua_module_dir "${_lua_lib_dir}/lua/5.1")
endif()
add_library(cjson MODULE lua_cjson.c strbuf.c ${FPCONV_SOURCES})
set_target_properties(cjson PROPERTIES PREFIX "")
target_link_libraries(cjson ${_MODULE_LINK})
install(TARGETS cjson DESTINATION "${_lua_module_dir}")
# vi:ai et sw=4 ts=4:

21
app/cjson/LICENSE Normal file
View File

@ -0,0 +1,21 @@
Copyright (c) 2010-2012 Mark Pulford <mark@kyne.com.au>
2015 Zeroday Hong <zeroday@nodemcu.com> nodemcu.com
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

47
app/cjson/Makefile Normal file
View File

@ -0,0 +1,47 @@
#############################################################
# Required variables for each makefile
# Discard this section from all parent makefiles
# Expected variables (with automatic defaults):
# CSRCS (all "C" files in the dir)
# SUBDIRS (all subdirs with a Makefile)
# GEN_LIBS - list of libs to be generated ()
# GEN_IMAGES - list of images to be generated ()
# COMPONENTS_xxx - a list of libs/objs in the form
# subdir/lib to be extracted and rolled up into
# a generated lib/image xxx.a ()
#
ifndef PDIR
GEN_LIBS = libcjson.a
endif
#############################################################
# Configuration i.e. compile options etc.
# Target specific stuff (defines etc.) goes in here!
# Generally values applying to a tree are captured in the
# makefile at its root level - these are then overridden
# for a subtree within the makefile rooted therein
#
#DEFINES +=
#############################################################
# Recursion Magic - Don't touch this!!
#
# Each subtree potentially has an include directory
# corresponding to the common APIs applicable to modules
# rooted at that subtree. Accordingly, the INCLUDE PATH
# of a module can only contain the include directories up
# its parent path, and not its siblings
#
# Required for each makefile to inherit from the parent
#
INCLUDES := $(INCLUDES) -I $(PDIR)include
INCLUDES += -I ./
INCLUDES += -I ../libc
PDIR := ../$(PDIR)
sinclude $(PDIR)Makefile

9
app/cjson/THANKS Normal file
View File

@ -0,0 +1,9 @@
The following people have helped with bug reports, testing and/or
suggestions:
- Louis-Philippe Perron (@loopole)
- Ondřej Jirman
- Steve Donovan <steve.j.donovan@gmail.com>
- Zhang "agentzh" Yichun <agentzh@gmail.com>
Thanks!

View File

@ -0,0 +1,50 @@
parser:
- call parse_value
- next_token
? <EOF> nop.
parse_value:
- next_token
? <OBJ_BEGIN> call parse_object.
? <ARR_BEGIN> call parse_array.
? <STRING> push. return.
? <BOOLEAN> push. return.
? <NULL> push. return.
? <NUMBER> push. return.
parse_object:
- push table
- next_token
? <STRING> push.
- next_token
? <COLON> nop.
- call parse_value
- set table
- next_token
? <OBJ_END> return.
? <COMMA> loop parse_object.
parse_array:
- push table
- call parse_value
- table append
- next_token
? <COMMA> loop parse_array.
? ] return.
next_token:
- check next character
? { return <OBJ_BEGIN>
? } return <OBJ_END>
? [ return <ARR_BEGIN>
? ] return <ARR_END>
? , return <COMMA>
? : return <COLON>
? [-0-9] gobble number. return <NUMBER>
? " gobble string. return <STRING>
? [ \t\n] eat whitespace.
? n Check "null". return <NULL> or <UNKNOWN>
? t Check "true". return <BOOLEAN> or <UNKNOWN>
? f Check "false". return <BOOLEAN> or <UNKNOWN>
? . return <UNKNOWN>
? \0 return <END>

4359
app/cjson/dtoa.c Normal file

File diff suppressed because it is too large Load Diff

74
app/cjson/dtoa_config.h Normal file
View File

@ -0,0 +1,74 @@
#ifndef _DTOA_CONFIG_H
#define _DTOA_CONFIG_H
#if 0
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
/* Ensure dtoa.c does not USE_LOCALE. Lua CJSON must not use locale
* aware conversion routines. */
#undef USE_LOCALE
/* dtoa.c should not touch errno, Lua CJSON does not use it, and it
* may not be threadsafe */
#define NO_ERRNO
#define Long int32_t
#define ULong uint32_t
#define Llong int64_t
#define ULLong uint64_t
#ifdef IEEE_BIG_ENDIAN
#define IEEE_MC68k
#else
#define IEEE_8087
#endif
#define MALLOC(n) xmalloc(n)
static void *xmalloc(size_t size)
{
void *p;
p = malloc(size);
if (!p) {
fprintf(stderr, "Out of memory");
abort();
}
return p;
}
#ifdef MULTIPLE_THREADS
/* Enable locking to support multi-threaded applications */
#include <pthread.h>
static pthread_mutex_t private_dtoa_lock[2] = {
PTHREAD_MUTEX_INITIALIZER,
PTHREAD_MUTEX_INITIALIZER
};
#define ACQUIRE_DTOA_LOCK(n) do { \
int r = pthread_mutex_lock(&private_dtoa_lock[n]); \
if (r) { \
fprintf(stderr, "pthread_mutex_lock failed with %d\n", r); \
abort(); \
} \
} while (0)
#define FREE_DTOA_LOCK(n) do { \
int r = pthread_mutex_unlock(&private_dtoa_lock[n]); \
if (r) { \
fprintf(stderr, "pthread_mutex_unlock failed with %d\n", r);\
abort(); \
} \
} while (0)
#endif /* MULTIPLE_THREADS */
#endif
#endif /* _DTOA_CONFIG_H */
/* vi:ai et sw=4 ts=4:
*/

208
app/cjson/fpconv.c Normal file
View File

@ -0,0 +1,208 @@
/* fpconv - Floating point conversion routines
*
* Copyright (c) 2011-2012 Mark Pulford <mark@kyne.com.au>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/* JSON uses a '.' decimal separator. strtod() / sprintf() under C libraries
* with locale support will break when the decimal separator is a comma.
*
* fpconv_* will around these issues with a translation buffer if required.
*/
#include "c_stdio.h"
#include "c_stdlib.h"
// #include <assert.h>
#include "c_string.h"
#include "fpconv.h"
/* Lua CJSON assumes the locale is the same for all threads within a
* process and doesn't change after initialisation.
*
* This avoids the need for per thread storage or expensive checks
* for call. */
static char locale_decimal_point = '.';
/* In theory multibyte decimal_points are possible, but
* Lua CJSON only supports UTF-8 and known locales only have
* single byte decimal points ([.,]).
*
* localconv() may not be thread safe (=>crash), and nl_langinfo() is
* not supported on some platforms. Use sprintf() instead - if the
* locale does change, at least Lua CJSON won't crash. */
static void fpconv_update_locale()
{
char buf[8];
c_sprintf(buf, "%g", 0.5);
/* Failing this test might imply the platform has a buggy dtoa
* implementation or wide characters */
if (buf[0] != '0' || buf[2] != '5' || buf[3] != 0) {
NODE_ERR("Error: wide characters found or printf() bug.");
return;
}
locale_decimal_point = buf[1];
}
/* Check for a valid number character: [-+0-9a-yA-Y.]
* Eg: -0.6e+5, infinity, 0xF0.F0pF0
*
* Used to find the probable end of a number. It doesn't matter if
* invalid characters are counted - strtod() will find the valid
* number if it exists. The risk is that slightly more memory might
* be allocated before a parse error occurs. */
static inline int valid_number_character(char ch)
{
char lower_ch;
if ('0' <= ch && ch <= '9')
return 1;
if (ch == '-' || ch == '+' || ch == '.')
return 1;
/* Hex digits, exponent (e), base (p), "infinity",.. */
lower_ch = ch | 0x20;
if ('a' <= lower_ch && lower_ch <= 'y')
return 1;
return 0;
}
/* Calculate the size of the buffer required for a strtod locale
* conversion. */
static int strtod_buffer_size(const char *s)
{
const char *p = s;
while (valid_number_character(*p))
p++;
return p - s;
}
/* Similar to strtod(), but must be passed the current locale's decimal point
* character. Guaranteed to be called at the start of any valid number in a string */
double fpconv_strtod(const char *nptr, char **endptr)
{
char localbuf[FPCONV_G_FMT_BUFSIZE];
char *buf, *endbuf, *dp;
int buflen;
double value;
/* System strtod() is fine when decimal point is '.' */
if (locale_decimal_point == '.')
return c_strtod(nptr, endptr);
buflen = strtod_buffer_size(nptr);
if (!buflen) {
/* No valid characters found, standard strtod() return */
*endptr = (char *)nptr;
return 0;
}
/* Duplicate number into buffer */
if (buflen >= FPCONV_G_FMT_BUFSIZE) {
/* Handle unusually large numbers */
buf = c_malloc(buflen + 1);
if (!buf) {
NODE_ERR("not enough memory\n");
return;
}
} else {
/* This is the common case.. */
buf = localbuf;
}
c_memcpy(buf, nptr, buflen);
buf[buflen] = 0;
/* Update decimal point character if found */
dp = c_strchr(buf, '.');
if (dp)
*dp = locale_decimal_point;
value = c_strtod(buf, &endbuf);
*endptr = (char *)&nptr[endbuf - buf];
if (buflen >= FPCONV_G_FMT_BUFSIZE)
c_free(buf);
return value;
}
/* "fmt" must point to a buffer of at least 6 characters */
static void set_number_format(char *fmt, int precision)
{
int d1, d2, i;
if(!(1 <= precision && precision <= 14)) return;
/* Create printf format (%.14g) from precision */
d1 = precision / 10;
d2 = precision % 10;
fmt[0] = '%';
fmt[1] = '.';
i = 2;
if (d1) {
fmt[i++] = '0' + d1;
}
fmt[i++] = '0' + d2;
fmt[i++] = 'g';
fmt[i] = 0;
}
/* Assumes there is always at least 32 characters available in the target buffer */
int fpconv_g_fmt(char *str, double num, int precision)
{
char buf[FPCONV_G_FMT_BUFSIZE];
char fmt[6];
int len;
char *b;
set_number_format(fmt, precision);
/* Pass through when decimal point character is dot. */
if (locale_decimal_point == '.'){
c_sprintf(str, fmt, num);
return c_strlen(str);
}
/* snprintf() to a buffer then translate for other decimal point characters */
c_sprintf(buf, fmt, num);
len = c_strlen(buf);
/* Copy into target location. Translate decimal point if required */
b = buf;
do {
*str++ = (*b == locale_decimal_point ? '.' : *b);
} while(*b++);
return len;
}
void fpconv_init()
{
fpconv_update_locale();
}
/* vi:ai et sw=4 ts=4:
*/

22
app/cjson/fpconv.h Normal file
View File

@ -0,0 +1,22 @@
/* Lua CJSON floating point conversion routines */
/* Buffer required to store the largest string representation of a double.
*
* Longest double printed with %.14g is 21 characters long:
* -1.7976931348623e+308 */
# define FPCONV_G_FMT_BUFSIZE 32
#ifdef USE_INTERNAL_FPCONV
static inline void fpconv_init()
{
/* Do nothing - not required */
}
#else
extern inline void fpconv_init();
#endif
extern int fpconv_g_fmt(char*, double, int);
extern double fpconv_strtod(const char*, char**);
/* vi:ai et sw=4 ts=4:
*/

112
app/cjson/g_fmt.c Normal file
View File

@ -0,0 +1,112 @@
/****************************************************************
*
* The author of this software is David M. Gay.
*
* Copyright (c) 1991, 1996 by Lucent Technologies.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose without fee is hereby granted, provided that this entire notice
* is included in all copies of any software which is or includes a copy
* or modification of this software and in all copies of the supporting
* documentation for such software.
*
* THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY
* REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
* OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
*
***************************************************************/
/* g_fmt(buf,x) stores the closest decimal approximation to x in buf;
* it suffices to declare buf
* char buf[32];
*/
#if 0
#ifdef __cplusplus
extern "C" {
#endif
extern char *dtoa(double, int, int, int *, int *, char **);
extern int g_fmt(char *, double, int);
extern void freedtoa(char*);
#ifdef __cplusplus
}
#endif
int
fpconv_g_fmt(char *b, double x, int precision)
{
register int i, k;
register char *s;
int decpt, j, sign;
char *b0, *s0, *se;
b0 = b;
#ifdef IGNORE_ZERO_SIGN
if (!x) {
*b++ = '0';
*b = 0;
goto done;
}
#endif
s = s0 = dtoa(x, 2, precision, &decpt, &sign, &se);
if (sign)
*b++ = '-';
if (decpt == 9999) /* Infinity or Nan */ {
while((*b++ = *s++));
/* "b" is used to calculate the return length. Decrement to exclude the
* Null terminator from the length */
b--;
goto done0;
}
if (decpt <= -4 || decpt > precision) {
*b++ = *s++;
if (*s) {
*b++ = '.';
while((*b = *s++))
b++;
}
*b++ = 'e';
/* sprintf(b, "%+.2d", decpt - 1); */
if (--decpt < 0) {
*b++ = '-';
decpt = -decpt;
}
else
*b++ = '+';
for(j = 2, k = 10; 10*k <= decpt; j++, k *= 10);
for(;;) {
i = decpt / k;
*b++ = i + '0';
if (--j <= 0)
break;
decpt -= i*k;
decpt *= 10;
}
*b = 0;
}
else if (decpt <= 0) {
*b++ = '0';
*b++ = '.';
for(; decpt < 0; decpt++)
*b++ = '0';
while((*b++ = *s++));
b--;
}
else {
while((*b = *s++)) {
b++;
if (--decpt == 0 && *s)
*b++ = '.';
}
for(; decpt > 0; decpt--)
*b++ = '0';
*b = 0;
}
done0:
freedtoa(s0);
#ifdef IGNORE_ZERO_SIGN
done:
#endif
return b - b0;
}
#endif

View File

@ -0,0 +1,271 @@
local json = require "cjson"
-- Various common routines used by the Lua CJSON package
--
-- Mark Pulford <mark@kyne.com.au>
-- Determine with a Lua table can be treated as an array.
-- Explicitly returns "not an array" for very sparse arrays.
-- Returns:
-- -1 Not an array
-- 0 Empty table
-- >0 Highest index in the array
local function is_array(table)
local max = 0
local count = 0
for k, v in pairs(table) do
if type(k) == "number" then
if k > max then max = k end
count = count + 1
else
return -1
end
end
if max > count * 2 then
return -1
end
return max
end
local serialise_value
local function serialise_table(value, indent, depth)
local spacing, spacing2, indent2
if indent then
spacing = "\n" .. indent
spacing2 = spacing .. " "
indent2 = indent .. " "
else
spacing, spacing2, indent2 = " ", " ", false
end
depth = depth + 1
if depth > 50 then
return "Cannot serialise any further: too many nested tables"
end
local max = is_array(value)
local comma = false
local fragment = { "{" .. spacing2 }
if max > 0 then
-- Serialise array
for i = 1, max do
if comma then
table.insert(fragment, "," .. spacing2)
end
table.insert(fragment, serialise_value(value[i], indent2, depth))
comma = true
end
elseif max < 0 then
-- Serialise table
for k, v in pairs(value) do
if comma then
table.insert(fragment, "," .. spacing2)
end
table.insert(fragment,
("[%s] = %s"):format(serialise_value(k, indent2, depth),
serialise_value(v, indent2, depth)))
comma = true
end
end
table.insert(fragment, spacing .. "}")
return table.concat(fragment)
end
function serialise_value(value, indent, depth)
if indent == nil then indent = "" end
if depth == nil then depth = 0 end
if value == json.null then
return "json.null"
elseif type(value) == "string" then
return ("%q"):format(value)
elseif type(value) == "nil" or type(value) == "number" or
type(value) == "boolean" then
return tostring(value)
elseif type(value) == "table" then
return serialise_table(value, indent, depth)
else
return "\"<" .. type(value) .. ">\""
end
end
local function file_load(filename)
local file
if filename == nil then
file = io.stdin
else
local err
file, err = io.open(filename, "rb")
if file == nil then
error(("Unable to read '%s': %s"):format(filename, err))
end
end
local data = file:read("*a")
if filename ~= nil then
file:close()
end
if data == nil then
error("Failed to read " .. filename)
end
return data
end
local function file_save(filename, data)
local file
if filename == nil then
file = io.stdout
else
local err
file, err = io.open(filename, "wb")
if file == nil then
error(("Unable to write '%s': %s"):format(filename, err))
end
end
file:write(data)
if filename ~= nil then
file:close()
end
end
local function compare_values(val1, val2)
local type1 = type(val1)
local type2 = type(val2)
if type1 ~= type2 then
return false
end
-- Check for NaN
if type1 == "number" and val1 ~= val1 and val2 ~= val2 then
return true
end
if type1 ~= "table" then
return val1 == val2
end
-- check_keys stores all the keys that must be checked in val2
local check_keys = {}
for k, _ in pairs(val1) do
check_keys[k] = true
end
for k, v in pairs(val2) do
if not check_keys[k] then
return false
end
if not compare_values(val1[k], val2[k]) then
return false
end
check_keys[k] = nil
end
for k, _ in pairs(check_keys) do
-- Not the same if any keys from val1 were not found in val2
return false
end
return true
end
local test_count_pass = 0
local test_count_total = 0
local function run_test_summary()
return test_count_pass, test_count_total
end
local function run_test(testname, func, input, should_work, output)
local function status_line(name, status, value)
local statusmap = { [true] = ":success", [false] = ":error" }
if status ~= nil then
name = name .. statusmap[status]
end
print(("[%s] %s"):format(name, serialise_value(value, false)))
end
local result = { pcall(func, unpack(input)) }
local success = table.remove(result, 1)
local correct = false
if success == should_work and compare_values(result, output) then
correct = true
test_count_pass = test_count_pass + 1
end
test_count_total = test_count_total + 1
local teststatus = { [true] = "PASS", [false] = "FAIL" }
print(("==> Test [%d] %s: %s"):format(test_count_total, testname,
teststatus[correct]))
status_line("Input", nil, input)
if not correct then
status_line("Expected", should_work, output)
end
status_line("Received", success, result)
print()
return correct, result
end
local function run_test_group(tests)
local function run_helper(name, func, input)
if type(name) == "string" and #name > 0 then
print("==> " .. name)
end
-- Not a protected call, these functions should never generate errors.
func(unpack(input or {}))
print()
end
for _, v in ipairs(tests) do
-- Run the helper if "should_work" is missing
if v[4] == nil then
run_helper(unpack(v))
else
run_test(unpack(v))
end
end
end
-- Run a Lua script in a separate environment
local function run_script(script, env)
local env = env or {}
local func
-- Use setfenv() if it exists, otherwise assume Lua 5.2 load() exists
if _G.setfenv then
func = loadstring(script)
if func then
setfenv(func, env)
end
else
func = load(script, nil, nil, env)
end
if func == nil then
error("Invalid syntax.")
end
func()
return env
end
-- Export functions
return {
serialise_value = serialise_value,
file_load = file_load,
file_save = file_save,
compare_values = compare_values,
run_test_summary = run_test_summary,
run_test = run_test,
run_test_group = run_test_group,
run_script = run_script
}
-- vi:ai et sw=4 ts=4:

View File

@ -0,0 +1,14 @@
#!/usr/bin/env lua
-- usage: json2lua.lua [json_file]
--
-- Eg:
-- echo '[ "testing" ]' | ./json2lua.lua
-- ./json2lua.lua test.json
local json = require "cjson"
local util = require "cjson.util"
local json_text = util.file_load(arg[1])
local t = json.decode(json_text)
print(util.serialise_value(t))

View File

@ -0,0 +1,20 @@
#!/usr/bin/env lua
-- usage: lua2json.lua [lua_file]
--
-- Eg:
-- echo '{ "testing" }' | ./lua2json.lua
-- ./lua2json.lua test.lua
local json = require "cjson"
local util = require "cjson.util"
local env = {
json = { null = json.null },
null = json.null
}
local t = util.run_script("data = " .. util.file_load(arg[1]), env)
print(json.encode(t.data))
-- vi:ai et sw=4 ts=4:

168
app/cjson/manual.txt Normal file
View File

@ -0,0 +1,168 @@
= Lua CJSON 2.1devel Manual =
Mark Pulford <mark@kyne.com.au>
:revdate: 1st March 2012
Overview
--------
The Lua CJSON module provides JSON support for Lua.
*Features*::
- Fast, standards compliant encoding/parsing routines
- Full support for JSON with UTF-8, including decoding surrogate pairs
- Optional run-time support for common exceptions to the JSON
specification (infinity, NaN,..)
- No dependencies on other libraries
*Caveats*::
- UTF-16 and UTF-32 are not supported
Lua CJSON is covered by the MIT license. Review the file +LICENSE+ for
details.
API (Functions)
---------------
Synopsis
~~~~~~~~
[source,lua]
------------
-- Translate Lua value to/from JSON
text = cjson.encode(value)
value = cjson.decode(text)
Module Instantiation
~~~~~~~~~~~~~~~~~~~~
decode
~~~~~~
[source,lua]
------------
value = cjson.decode(json_text)
------------
+cjson.decode+ will deserialise any UTF-8 JSON string into a Lua value
or table.
UTF-16 and UTF-32 JSON strings are not supported.
+cjson.decode+ requires that any NULL (ASCII 0) and double quote (ASCII
34) characters are escaped within strings. All escape codes will be
decoded and other bytes will be passed transparently. UTF-8 characters
are not validated during decoding and should be checked elsewhere if
required.
JSON +null+ will be converted to a NULL +lightuserdata+ value. This can
be compared with +cjson.null+ for convenience.
By default, numbers incompatible with the JSON specification (infinity,
NaN, hexadecimal) can be decoded. This default can be changed with
<<decode_invalid_numbers,+cjson.decode_invalid_numbers+>>.
.Example: Decoding
[source,lua]
json_text = '[ true, { "foo": "bar" } ]'
value = cjson.decode(json_text)
-- Returns: { true, { foo = "bar" } }
[CAUTION]
Care must be taken after decoding JSON objects with numeric keys. Each
numeric key will be stored as a Lua +string+. Any subsequent code
assuming type +number+ may break.
[[encode]]
encode
~~~~~~
[source,lua]
------------
json_text = cjson.encode(value)
------------
+cjson.encode+ will serialise a Lua value into a string containing the
JSON representation.
+cjson.encode+ supports the following types:
- +boolean+
- +lightuserdata+ (NULL value only)
- +nil+
- +number+
- +string+
- +table+
The remaining Lua types will generate an error:
- +function+
- +lightuserdata+ (non-NULL values)
- +thread+
- +userdata+
By default, numbers are encoded with 14 significant digits. Refer to
<<encode_number_precision,+cjson.encode_number_precision+>> for details.
Lua CJSON will escape the following characters within each UTF-8 string:
- Control characters (ASCII 0 - 31)
- Double quote (ASCII 34)
- Forward slash (ASCII 47)
- Blackslash (ASCII 92)
- Delete (ASCII 127)
All other bytes are passed transparently.
[CAUTION]
=========
Lua CJSON will successfully encode/decode binary strings, but this is
technically not supported by JSON and may not be compatible with other
JSON libraries. To ensure the output is valid JSON, applications should
ensure all Lua strings passed to +cjson.encode+ are UTF-8.
Base64 is commonly used to encode binary data as the most efficient
encoding under UTF-8 can only reduce the encoded size by a further
&#126;8%. Lua Base64 routines can be found in the
http://w3.impa.br/%7Ediego/software/luasocket/[LuaSocket] and
http://www.tecgraf.puc-rio.br/%7Elhf/ftp/lua/#lbase64[lbase64] packages.
=========
Lua CJSON uses a heuristic to determine whether to encode a Lua table as
a JSON array or an object. A Lua table with only positive integer keys
of type +number+ will be encoded as a JSON array. All other tables will
be encoded as a JSON object.
Lua CJSON does not use metamethods when serialising tables.
- +rawget+ is used to iterate over Lua arrays
- +next+ is used to iterate over Lua objects
Lua arrays with missing entries (_sparse arrays_) may optionally be
encoded in several different ways. Refer to
<<encode_sparse_array,+cjson.encode_sparse_array+>> for details.
JSON object keys are always strings. Hence +cjson.encode+ only supports
table keys which are type +number+ or +string+. All other types will
generate an error.
[NOTE]
Standards compliant JSON must be encapsulated in either an object (+{}+)
or an array (+[]+). If strictly standards compliant JSON is desired, a
table must be passed to +cjson.encode+.
By default, encoding the following Lua values will generate errors:
- Numbers incompatible with the JSON specification (infinity, NaN)
- Tables nested more than 1000 levels deep
- Excessively sparse Lua arrays
.Example: Encoding
[source,lua]
value = { true, { foo = "bar" } }
json_text = cjson.encode(value)
-- Returns: '[true,{"foo":"bar"}]'
// vi:ft=asciidoc tw=72:

563
app/cjson/rfc4627.txt Normal file
View File

@ -0,0 +1,563 @@
Network Working Group D. Crockford
Request for Comments: 4627 JSON.org
Category: Informational July 2006
The application/json Media Type for JavaScript Object Notation (JSON)
Status of This Memo
This memo provides information for the Internet community. It does
not specify an Internet standard of any kind. Distribution of this
memo is unlimited.
Copyright Notice
Copyright (C) The Internet Society (2006).
Abstract
JavaScript Object Notation (JSON) is a lightweight, text-based,
language-independent data interchange format. It was derived from
the ECMAScript Programming Language Standard. JSON defines a small
set of formatting rules for the portable representation of structured
data.
1. Introduction
JavaScript Object Notation (JSON) is a text format for the
serialization of structured data. It is derived from the object
literals of JavaScript, as defined in the ECMAScript Programming
Language Standard, Third Edition [ECMA].
JSON can represent four primitive types (strings, numbers, booleans,
and null) and two structured types (objects and arrays).
A string is a sequence of zero or more Unicode characters [UNICODE].
An object is an unordered collection of zero or more name/value
pairs, where a name is a string and a value is a string, number,
boolean, null, object, or array.
An array is an ordered sequence of zero or more values.
The terms "object" and "array" come from the conventions of
JavaScript.
JSON's design goals were for it to be minimal, portable, textual, and
a subset of JavaScript.
Crockford Informational [Page 1]
RFC 4627 JSON July 2006
1.1. Conventions Used in This Document
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
document are to be interpreted as described in [RFC2119].
The grammatical rules in this document are to be interpreted as
described in [RFC4234].
2. JSON Grammar
A JSON text is a sequence of tokens. The set of tokens includes six
structural characters, strings, numbers, and three literal names.
A JSON text is a serialized object or array.
JSON-text = object / array
These are the six structural characters:
begin-array = ws %x5B ws ; [ left square bracket
begin-object = ws %x7B ws ; { left curly bracket
end-array = ws %x5D ws ; ] right square bracket
end-object = ws %x7D ws ; } right curly bracket
name-separator = ws %x3A ws ; : colon
value-separator = ws %x2C ws ; , comma
Insignificant whitespace is allowed before or after any of the six
structural characters.
ws = *(
%x20 / ; Space
%x09 / ; Horizontal tab
%x0A / ; Line feed or New line
%x0D ; Carriage return
)
2.1. Values
A JSON value MUST be an object, array, number, or string, or one of
the following three literal names:
false null true
Crockford Informational [Page 2]
RFC 4627 JSON July 2006
The literal names MUST be lowercase. No other literal names are
allowed.
value = false / null / true / object / array / number / string
false = %x66.61.6c.73.65 ; false
null = %x6e.75.6c.6c ; null
true = %x74.72.75.65 ; true
2.2. Objects
An object structure is represented as a pair of curly brackets
surrounding zero or more name/value pairs (or members). A name is a
string. A single colon comes after each name, separating the name
from the value. A single comma separates a value from a following
name. The names within an object SHOULD be unique.
object = begin-object [ member *( value-separator member ) ]
end-object
member = string name-separator value
2.3. Arrays
An array structure is represented as square brackets surrounding zero
or more values (or elements). Elements are separated by commas.
array = begin-array [ value *( value-separator value ) ] end-array
2.4. Numbers
The representation of numbers is similar to that used in most
programming languages. A number contains an integer component that
may be prefixed with an optional minus sign, which may be followed by
a fraction part and/or an exponent part.
Octal and hex forms are not allowed. Leading zeros are not allowed.
A fraction part is a decimal point followed by one or more digits.
An exponent part begins with the letter E in upper or lowercase,
which may be followed by a plus or minus sign. The E and optional
sign are followed by one or more digits.
Numeric values that cannot be represented as sequences of digits
(such as Infinity and NaN) are not permitted.
Crockford Informational [Page 3]
RFC 4627 JSON July 2006
number = [ minus ] int [ frac ] [ exp ]
decimal-point = %x2E ; .
digit1-9 = %x31-39 ; 1-9
e = %x65 / %x45 ; e E
exp = e [ minus / plus ] 1*DIGIT
frac = decimal-point 1*DIGIT
int = zero / ( digit1-9 *DIGIT )
minus = %x2D ; -
plus = %x2B ; +
zero = %x30 ; 0
2.5. Strings
The representation of strings is similar to conventions used in the C
family of programming languages. A string begins and ends with
quotation marks. All Unicode characters may be placed within the
quotation marks except for the characters that must be escaped:
quotation mark, reverse solidus, and the control characters (U+0000
through U+001F).
Any character may be escaped. If the character is in the Basic
Multilingual Plane (U+0000 through U+FFFF), then it may be
represented as a six-character sequence: a reverse solidus, followed
by the lowercase letter u, followed by four hexadecimal digits that
encode the character's code point. The hexadecimal letters A though
F can be upper or lowercase. So, for example, a string containing
only a single reverse solidus character may be represented as
"\u005C".
Alternatively, there are two-character sequence escape
representations of some popular characters. So, for example, a
string containing only a single reverse solidus character may be
represented more compactly as "\\".
To escape an extended character that is not in the Basic Multilingual
Plane, the character is represented as a twelve-character sequence,
encoding the UTF-16 surrogate pair. So, for example, a string
containing only the G clef character (U+1D11E) may be represented as
"\uD834\uDD1E".
Crockford Informational [Page 4]
RFC 4627 JSON July 2006
string = quotation-mark *char quotation-mark
char = unescaped /
escape (
%x22 / ; " quotation mark U+0022
%x5C / ; \ reverse solidus U+005C
%x2F / ; / solidus U+002F
%x62 / ; b backspace U+0008
%x66 / ; f form feed U+000C
%x6E / ; n line feed U+000A
%x72 / ; r carriage return U+000D
%x74 / ; t tab U+0009
%x75 4HEXDIG ) ; uXXXX U+XXXX
escape = %x5C ; \
quotation-mark = %x22 ; "
unescaped = %x20-21 / %x23-5B / %x5D-10FFFF
3. Encoding
JSON text SHALL be encoded in Unicode. The default encoding is
UTF-8.
Since the first two characters of a JSON text will always be ASCII
characters [RFC0020], it is possible to determine whether an octet
stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking
at the pattern of nulls in the first four octets.
00 00 00 xx UTF-32BE
00 xx 00 xx UTF-16BE
xx 00 00 00 UTF-32LE
xx 00 xx 00 UTF-16LE
xx xx xx xx UTF-8
4. Parsers
A JSON parser transforms a JSON text into another representation. A
JSON parser MUST accept all texts that conform to the JSON grammar.
A JSON parser MAY accept non-JSON forms or extensions.
An implementation may set limits on the size of texts that it
accepts. An implementation may set limits on the maximum depth of
nesting. An implementation may set limits on the range of numbers.
An implementation may set limits on the length and character contents
of strings.
Crockford Informational [Page 5]
RFC 4627 JSON July 2006
5. Generators
A JSON generator produces JSON text. The resulting text MUST
strictly conform to the JSON grammar.
6. IANA Considerations
The MIME media type for JSON text is application/json.
Type name: application
Subtype name: json
Required parameters: n/a
Optional parameters: n/a
Encoding considerations: 8bit if UTF-8; binary if UTF-16 or UTF-32
JSON may be represented using UTF-8, UTF-16, or UTF-32. When JSON
is written in UTF-8, JSON is 8bit compatible. When JSON is
written in UTF-16 or UTF-32, the binary content-transfer-encoding
must be used.
Security considerations:
Generally there are security issues with scripting languages. JSON
is a subset of JavaScript, but it is a safe subset that excludes
assignment and invocation.
A JSON text can be safely passed into JavaScript's eval() function
(which compiles and executes a string) if all the characters not
enclosed in strings are in the set of characters that form JSON
tokens. This can be quickly determined in JavaScript with two
regular expressions and calls to the test and replace methods.
var my_JSON_object = !(/[^,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]/.test(
text.replace(/"(\\.|[^"\\])*"/g, ''))) &&
eval('(' + text + ')');
Interoperability considerations: n/a
Published specification: RFC 4627
Crockford Informational [Page 6]
RFC 4627 JSON July 2006
Applications that use this media type:
JSON has been used to exchange data between applications written
in all of these programming languages: ActionScript, C, C#,
ColdFusion, Common Lisp, E, Erlang, Java, JavaScript, Lua,
Objective CAML, Perl, PHP, Python, Rebol, Ruby, and Scheme.
Additional information:
Magic number(s): n/a
File extension(s): .json
Macintosh file type code(s): TEXT
Person & email address to contact for further information:
Douglas Crockford
douglas@crockford.com
Intended usage: COMMON
Restrictions on usage: none
Author:
Douglas Crockford
douglas@crockford.com
Change controller:
Douglas Crockford
douglas@crockford.com
7. Security Considerations
See Security Considerations in Section 6.
8. Examples
This is a JSON object:
{
"Image": {
"Width": 800,
"Height": 600,
"Title": "View from 15th Floor",
"Thumbnail": {
"Url": "http://www.example.com/image/481989943",
"Height": 125,
"Width": "100"
},
"IDs": [116, 943, 234, 38793]
Crockford Informational [Page 7]
RFC 4627 JSON July 2006
}
}
Its Image member is an object whose Thumbnail member is an object
and whose IDs member is an array of numbers.
This is a JSON array containing two objects:
[
{
"precision": "zip",
"Latitude": 37.7668,
"Longitude": -122.3959,
"Address": "",
"City": "SAN FRANCISCO",
"State": "CA",
"Zip": "94107",
"Country": "US"
},
{
"precision": "zip",
"Latitude": 37.371991,
"Longitude": -122.026020,
"Address": "",
"City": "SUNNYVALE",
"State": "CA",
"Zip": "94085",
"Country": "US"
}
]
9. References
9.1. Normative References
[ECMA] European Computer Manufacturers Association, "ECMAScript
Language Specification 3rd Edition", December 1999,
<http://www.ecma-international.org/publications/files/
ecma-st/ECMA-262.pdf>.
[RFC0020] Cerf, V., "ASCII format for network interchange", RFC 20,
October 1969.
[RFC2119] Bradner, S., "Key words for use in RFCs to Indicate
Requirement Levels", BCP 14, RFC 2119, March 1997.
[RFC4234] Crocker, D. and P. Overell, "Augmented BNF for Syntax
Specifications: ABNF", RFC 4234, October 2005.
Crockford Informational [Page 8]
RFC 4627 JSON July 2006
[UNICODE] The Unicode Consortium, "The Unicode Standard Version 4.0",
2003, <http://www.unicode.org/versions/Unicode4.1.0/>.
Author's Address
Douglas Crockford
JSON.org
EMail: douglas@crockford.com
Crockford Informational [Page 9]
RFC 4627 JSON July 2006
Full Copyright Statement
Copyright (C) The Internet Society (2006).
This document is subject to the rights, licenses and restrictions
contained in BCP 78, and except as set forth therein, the authors
retain all their rights.
This document and the information contained herein are provided on an
"AS IS" basis and THE CONTRIBUTOR, THE ORGANIZATION HE/SHE REPRESENTS
OR IS SPONSORED BY (IF ANY), THE INTERNET SOCIETY AND THE INTERNET
ENGINEERING TASK FORCE DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE
INFORMATION HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED
WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
Intellectual Property
The IETF takes no position regarding the validity or scope of any
Intellectual Property Rights or other rights that might be claimed to
pertain to the implementation or use of the technology described in
this document or the extent to which any license under such rights
might or might not be available; nor does it represent that it has
made any independent effort to identify any such rights. Information
on the procedures with respect to rights in RFC documents can be
found in BCP 78 and BCP 79.
Copies of IPR disclosures made to the IETF Secretariat and any
assurances of licenses to be made available, or the result of an
attempt made to obtain a general license or permission for the use of
such proprietary rights by implementers or users of this
specification can be obtained from the IETF on-line IPR repository at
http://www.ietf.org/ipr.
The IETF invites any interested party to bring to its attention any
copyrights, patents or patent applications, or other proprietary
rights that may cover technology that may be required to implement
this standard. Please address the information to the IETF at
ietf-ipr@ietf.org.
Acknowledgement
Funding for the RFC Editor function is provided by the IETF
Administrative Support Activity (IASA).
Crockford Informational [Page 10]

252
app/cjson/strbuf.c Normal file
View File

@ -0,0 +1,252 @@
/* strbuf - String buffer routines
*
* Copyright (c) 2010-2012 Mark Pulford <mark@kyne.com.au>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "c_stdio.h"
#include "c_stdlib.h"
#include "c_stdarg.h"
#include "c_string.h"
#include "strbuf.h"
int strbuf_init(strbuf_t *s, int len)
{
int size;
if (len <= 0)
size = STRBUF_DEFAULT_SIZE;
else
size = len + 1; /* \0 terminator */
s->buf = NULL;
s->size = size;
s->length = 0;
s->increment = STRBUF_DEFAULT_INCREMENT;
s->dynamic = 0;
s->reallocs = 0;
s->debug = 0;
s->buf = c_malloc(size);
if (!s->buf){
NODE_ERR("not enough memory\n");
return -1;
}
strbuf_ensure_null(s);
return 0;
}
strbuf_t *strbuf_new(int len)
{
strbuf_t *s;
s = c_malloc(sizeof(strbuf_t));
if (!s){
NODE_ERR("not enough memory\n");
return NULL;
}
strbuf_init(s, len);
/* Dynamic strbuf allocation / deallocation */
s->dynamic = 1;
return s;
}
int strbuf_set_increment(strbuf_t *s, int increment)
{
/* Increment > 0: Linear buffer growth rate
* Increment < -1: Exponential buffer growth rate */
if (increment == 0 || increment == -1){
NODE_ERR("BUG: Invalid string increment");
return -1;
}
s->increment = increment;
return 0;
}
static inline void debug_stats(strbuf_t *s)
{
if (s->debug) {
NODE_ERR("strbuf(%lx) reallocs: %d, length: %d, size: %d\n",
(long)s, s->reallocs, s->length, s->size);
}
}
/* If strbuf_t has not been dynamically allocated, strbuf_free() can
* be called any number of times strbuf_init() */
void strbuf_free(strbuf_t *s)
{
debug_stats(s);
if (s->buf) {
c_free(s->buf);
s->buf = NULL;
}
if (s->dynamic)
c_free(s);
}
char *strbuf_free_to_string(strbuf_t *s, int *len)
{
char *buf;
debug_stats(s);
strbuf_ensure_null(s);
buf = s->buf;
if (len)
*len = s->length;
if (s->dynamic)
c_free(s);
return buf;
}
static int calculate_new_size(strbuf_t *s, int len)
{
int reqsize, newsize;
if (len <= 0){
NODE_ERR("BUG: Invalid strbuf length requested");
return 0;
}
/* Ensure there is room for optional NULL termination */
reqsize = len + 1;
/* If the user has requested to shrink the buffer, do it exactly */
if (s->size > reqsize)
return reqsize;
newsize = s->size;
if (s->increment < 0) {
/* Exponential sizing */
while (newsize < reqsize)
newsize *= -s->increment;
} else {
/* Linear sizing */
newsize = ((newsize + s->increment - 1) / s->increment) * s->increment;
}
return newsize;
}
/* Ensure strbuf can handle a string length bytes long (ignoring NULL
* optional termination). */
int strbuf_resize(strbuf_t *s, int len)
{
int newsize;
newsize = calculate_new_size(s, len);
if (s->debug > 1) {
NODE_ERR("strbuf(%lx) resize: %d => %d\n",
(long)s, s->size, newsize);
}
s->buf = (char *)c_realloc(s->buf, newsize);
if (!s->buf){
NODE_ERR("not enough memory");
return -1;
}
s->size = newsize;
s->reallocs++;
return 0;
}
void strbuf_append_string(strbuf_t *s, const char *str)
{
int space, i;
space = strbuf_empty_length(s);
for (i = 0; str[i]; i++) {
if (space < 1) {
strbuf_resize(s, s->length + 1);
space = strbuf_empty_length(s);
}
s->buf[s->length] = str[i];
s->length++;
space--;
}
}
#if 0
/* strbuf_append_fmt() should only be used when an upper bound
* is known for the output string. */
void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...)
{
va_list arg;
int fmt_len;
strbuf_ensure_empty_length(s, len);
va_start(arg, fmt);
fmt_len = vsnprintf(s->buf + s->length, len, fmt, arg);
va_end(arg);
if (fmt_len < 0)
die("BUG: Unable to convert number"); /* This should never happen.. */
s->length += fmt_len;
}
/* strbuf_append_fmt_retry() can be used when the there is no known
* upper bound for the output string. */
void strbuf_append_fmt_retry(strbuf_t *s, const char *fmt, ...)
{
va_list arg;
int fmt_len, try;
int empty_len;
/* If the first attempt to append fails, resize the buffer appropriately
* and try again */
for (try = 0; ; try++) {
va_start(arg, fmt);
/* Append the new formatted string */
/* fmt_len is the length of the string required, excluding the
* trailing NULL */
empty_len = strbuf_empty_length(s);
/* Add 1 since there is also space to store the terminating NULL. */
fmt_len = vsnprintf(s->buf + s->length, empty_len + 1, fmt, arg);
va_end(arg);
if (fmt_len <= empty_len)
break; /* SUCCESS */
if (try > 0)
die("BUG: length of formatted string changed");
strbuf_resize(s, s->length + fmt_len);
}
s->length += fmt_len;
}
#endif
/* vi:ai et sw=4 ts=4:
*/

154
app/cjson/strbuf.h Normal file
View File

@ -0,0 +1,154 @@
/* strbuf - String buffer routines
*
* Copyright (c) 2010-2012 Mark Pulford <mark@kyne.com.au>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "c_stdlib.h"
#include "c_stdarg.h"
/* Size: Total bytes allocated to *buf
* Length: String length, excluding optional NULL terminator.
* Increment: Allocation increments when resizing the string buffer.
* Dynamic: True if created via strbuf_new()
*/
typedef struct {
char *buf;
int size;
int length;
int increment;
int dynamic;
int reallocs;
int debug;
} strbuf_t;
#ifndef STRBUF_DEFAULT_SIZE
#define STRBUF_DEFAULT_SIZE 1023
#endif
#ifndef STRBUF_DEFAULT_INCREMENT
#define STRBUF_DEFAULT_INCREMENT -2
#endif
/* Initialise */
extern strbuf_t *strbuf_new(int len);
extern int strbuf_init(strbuf_t *s, int len);
extern int strbuf_set_increment(strbuf_t *s, int increment);
/* Release */
extern void strbuf_free(strbuf_t *s);
extern char *strbuf_free_to_string(strbuf_t *s, int *len);
/* Management */
extern int strbuf_resize(strbuf_t *s, int len);
static int strbuf_empty_length(strbuf_t *s);
static int strbuf_length(strbuf_t *s);
static char *strbuf_string(strbuf_t *s, int *len);
static void strbuf_ensure_empty_length(strbuf_t *s, int len);
static char *strbuf_empty_ptr(strbuf_t *s);
static void strbuf_extend_length(strbuf_t *s, int len);
/* Update */
extern void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...);
extern void strbuf_append_fmt_retry(strbuf_t *s, const char *format, ...);
static void strbuf_append_mem(strbuf_t *s, const char *c, int len);
extern void strbuf_append_string(strbuf_t *s, const char *str);
static void strbuf_append_char(strbuf_t *s, const char c);
static void strbuf_ensure_null(strbuf_t *s);
/* Reset string for before use */
static inline void strbuf_reset(strbuf_t *s)
{
s->length = 0;
}
static inline int strbuf_allocated(strbuf_t *s)
{
return s->buf != NULL;
}
/* Return bytes remaining in the string buffer
* Ensure there is space for a NULL terminator. */
static inline int strbuf_empty_length(strbuf_t *s)
{
return s->size - s->length - 1;
}
static inline void strbuf_ensure_empty_length(strbuf_t *s, int len)
{
if (len > strbuf_empty_length(s))
strbuf_resize(s, s->length + len);
}
static inline char *strbuf_empty_ptr(strbuf_t *s)
{
return s->buf + s->length;
}
static inline void strbuf_extend_length(strbuf_t *s, int len)
{
s->length += len;
}
static inline int strbuf_length(strbuf_t *s)
{
return s->length;
}
static inline void strbuf_append_char(strbuf_t *s, const char c)
{
strbuf_ensure_empty_length(s, 1);
s->buf[s->length++] = c;
}
static inline void strbuf_append_char_unsafe(strbuf_t *s, const char c)
{
s->buf[s->length++] = c;
}
static inline void strbuf_append_mem(strbuf_t *s, const char *c, int len)
{
strbuf_ensure_empty_length(s, len);
c_memcpy(s->buf + s->length, c, len);
s->length += len;
}
static inline void strbuf_append_mem_unsafe(strbuf_t *s, const char *c, int len)
{
c_memcpy(s->buf + s->length, c, len);
s->length += len;
}
static inline void strbuf_ensure_null(strbuf_t *s)
{
s->buf[s->length] = 0;
}
static inline char *strbuf_string(strbuf_t *s, int *len)
{
if (len)
*len = s->length;
return s->buf;
}
/* vi:ai et sw=4 ts=4:
*/

4
app/cjson/tests/README Normal file
View File

@ -0,0 +1,4 @@
These JSON examples were taken from the JSON website
(http://json.org/example.html) and RFC 4627.
Used with permission.

131
app/cjson/tests/bench.lua Normal file
View File

@ -0,0 +1,131 @@
#!/usr/bin/env lua
-- This benchmark script measures wall clock time and should be
-- run on an unloaded system.
--
-- Your Mileage May Vary.
--
-- Mark Pulford <mark@kyne.com.au>
local json_module = os.getenv("JSON_MODULE") or "cjson"
require "socket"
local json = require(json_module)
local util = require "cjson.util"
local function find_func(mod, funcnames)
for _, v in ipairs(funcnames) do
if mod[v] then
return mod[v]
end
end
return nil
end
local json_encode = find_func(json, { "encode", "Encode", "to_string", "stringify", "json" })
local json_decode = find_func(json, { "decode", "Decode", "to_value", "parse" })
local function average(t)
local total = 0
for _, v in ipairs(t) do
total = total + v
end
return total / #t
end
function benchmark(tests, seconds, rep)
local function bench(func, iter)
-- Use socket.gettime() to measure microsecond resolution
-- wall clock time.
local t = socket.gettime()
for i = 1, iter do
func(i)
end
t = socket.gettime() - t
-- Don't trust any results when the run lasted for less than a
-- millisecond - return nil.
if t < 0.001 then
return nil
end
return (iter / t)
end
-- Roughly calculate the number of interations required
-- to obtain a particular time period.
local function calc_iter(func, seconds)
local iter = 1
local rate
-- Warm up the bench function first.
func()
while not rate do
rate = bench(func, iter)
iter = iter * 10
end
return math.ceil(seconds * rate)
end
local test_results = {}
for name, func in pairs(tests) do
-- k(number), v(string)
-- k(string), v(function)
-- k(number), v(function)
if type(func) == "string" then
name = func
func = _G[name]
end
local iter = calc_iter(func, seconds)
local result = {}
for i = 1, rep do
result[i] = bench(func, iter)
end
-- Remove the slowest half (round down) of the result set
table.sort(result)
for i = 1, math.floor(#result / 2) do
table.remove(result, 1)
end
test_results[name] = average(result)
end
return test_results
end
function bench_file(filename)
local data_json = util.file_load(filename)
local data_obj = json_decode(data_json)
local function test_encode()
json_encode(data_obj)
end
local function test_decode()
json_decode(data_json)
end
local tests = {}
if json_encode then tests.encode = test_encode end
if json_decode then tests.decode = test_decode end
return benchmark(tests, 0.1, 5)
end
-- Optionally load any custom configuration required for this module
local success, data = pcall(util.file_load, ("bench-%s.lua"):format(json_module))
if success then
util.run_script(data, _G)
configure(json)
end
for i = 1, #arg do
local results = bench_file(arg[i])
for k, v in pairs(results) do
print(("%s\t%s\t%d"):format(arg[i], k, v))
end
end
-- vi:ai et sw=4 ts=4:

View File

@ -0,0 +1,22 @@
{
"glossary": {
"title": "example glossary",
"GlossDiv": {
"title": "S",
"GlossList": {
"GlossEntry": {
"ID": "SGML",
"SortAs": "SGML",
"GlossTerm": "Standard Generalized Mark up Language",
"Acronym": "SGML",
"Abbrev": "ISO 8879:1986",
"GlossDef": {
"para": "A meta-markup language, used to create markup languages such as DocBook.",
"GlossSeeAlso": ["GML", "XML"]
},
"GlossSee": "markup"
}
}
}
}
}

View File

@ -0,0 +1,11 @@
{"menu": {
"id": "file",
"value": "File",
"popup": {
"menuitem": [
{"value": "New", "onclick": "CreateNewDoc()"},
{"value": "Open", "onclick": "OpenDoc()"},
{"value": "Close", "onclick": "CloseDoc()"}
]
}
}}

View File

@ -0,0 +1,26 @@
{"widget": {
"debug": "on",
"window": {
"title": "Sample Konfabulator Widget",
"name": "main_window",
"width": 500,
"height": 500
},
"image": {
"src": "Images/Sun.png",
"name": "sun1",
"hOffset": 250,
"vOffset": 250,
"alignment": "center"
},
"text": {
"data": "Click Here",
"size": 36,
"style": "bold",
"name": "text1",
"hOffset": 250,
"vOffset": 100,
"alignment": "center",
"onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
}
}}

View File

@ -0,0 +1,88 @@
{"web-app": {
"servlet": [
{
"servlet-name": "cofaxCDS",
"servlet-class": "org.cofax.cds.CDSServlet",
"init-param": {
"configGlossary:installationAt": "Philadelphia, PA",
"configGlossary:adminEmail": "ksm@pobox.com",
"configGlossary:poweredBy": "Cofax",
"configGlossary:poweredByIcon": "/images/cofax.gif",
"configGlossary:staticPath": "/content/static",
"templateProcessorClass": "org.cofax.WysiwygTemplate",
"templateLoaderClass": "org.cofax.FilesTemplateLoader",
"templatePath": "templates",
"templateOverridePath": "",
"defaultListTemplate": "listTemplate.htm",
"defaultFileTemplate": "articleTemplate.htm",
"useJSP": false,
"jspListTemplate": "listTemplate.jsp",
"jspFileTemplate": "articleTemplate.jsp",
"cachePackageTagsTrack": 200,
"cachePackageTagsStore": 200,
"cachePackageTagsRefresh": 60,
"cacheTemplatesTrack": 100,
"cacheTemplatesStore": 50,
"cacheTemplatesRefresh": 15,
"cachePagesTrack": 200,
"cachePagesStore": 100,
"cachePagesRefresh": 10,
"cachePagesDirtyRead": 10,
"searchEngineListTemplate": "forSearchEnginesList.htm",
"searchEngineFileTemplate": "forSearchEngines.htm",
"searchEngineRobotsDb": "WEB-INF/robots.db",
"useDataStore": true,
"dataStoreClass": "org.cofax.SqlDataStore",
"redirectionClass": "org.cofax.SqlRedirection",
"dataStoreName": "cofax",
"dataStoreDriver": "com.microsoft.jdbc.sqlserver.SQLServerDriver",
"dataStoreUrl": "jdbc:microsoft:sqlserver://LOCALHOST:1433;DatabaseName=goon",
"dataStoreUser": "sa",
"dataStorePassword": "dataStoreTestQuery",
"dataStoreTestQuery": "SET NOCOUNT ON;select test='test';",
"dataStoreLogFile": "/usr/local/tomcat/logs/datastore.log",
"dataStoreInitConns": 10,
"dataStoreMaxConns": 100,
"dataStoreConnUsageLimit": 100,
"dataStoreLogLevel": "debug",
"maxUrlLength": 500}},
{
"servlet-name": "cofaxEmail",
"servlet-class": "org.cofax.cds.EmailServlet",
"init-param": {
"mailHost": "mail1",
"mailHostOverride": "mail2"}},
{
"servlet-name": "cofaxAdmin",
"servlet-class": "org.cofax.cds.AdminServlet"},
{
"servlet-name": "fileServlet",
"servlet-class": "org.cofax.cds.FileServlet"},
{
"servlet-name": "cofaxTools",
"servlet-class": "org.cofax.cms.CofaxToolsServlet",
"init-param": {
"templatePath": "toolstemplates/",
"log": 1,
"logLocation": "/usr/local/tomcat/logs/CofaxTools.log",
"logMaxSize": "",
"dataLog": 1,
"dataLogLocation": "/usr/local/tomcat/logs/dataLog.log",
"dataLogMaxSize": "",
"removePageCache": "/content/admin/remove?cache=pages&id=",
"removeTemplateCache": "/content/admin/remove?cache=templates&id=",
"fileTransferFolder": "/usr/local/tomcat/webapps/content/fileTransferFolder",
"lookInContext": 1,
"adminGroupID": 4,
"betaServer": true}}],
"servlet-mapping": {
"cofaxCDS": "/",
"cofaxEmail": "/cofaxutil/aemail/*",
"cofaxAdmin": "/admin/*",
"fileServlet": "/static/*",
"cofaxTools": "/tools/*"},
"taglib": {
"taglib-uri": "cofax.tld",
"taglib-location": "/WEB-INF/tlds/cofax.tld"}}}

View File

@ -0,0 +1,27 @@
{"menu": {
"header": "SVG Viewer",
"items": [
{"id": "Open"},
{"id": "OpenNew", "label": "Open New"},
null,
{"id": "ZoomIn", "label": "Zoom In"},
{"id": "ZoomOut", "label": "Zoom Out"},
{"id": "OriginalView", "label": "Original View"},
null,
{"id": "Quality"},
{"id": "Pause"},
{"id": "Mute"},
null,
{"id": "Find", "label": "Find..."},
{"id": "FindAgain", "label": "Find Again"},
{"id": "Copy"},
{"id": "CopyAgain", "label": "Copy Again"},
{"id": "CopySVG", "label": "Copy SVG"},
{"id": "ViewSVG", "label": "View SVG"},
{"id": "ViewSource", "label": "View Source"},
{"id": "SaveAs", "label": "Save As"},
null,
{"id": "Help"},
{"id": "About", "label": "About Adobe CVG Viewer..."}
]
}}

View File

@ -0,0 +1,23 @@
#!/usr/bin/env perl
# Create test comparison data using a different UTF-8 implementation.
# The generated utf8.dat file must have the following MD5 sum:
# cff03b039d850f370a7362f3313e5268
use strict;
# 0xD800 - 0xDFFF are used to encode supplementary codepoints
# 0x10000 - 0x10FFFF are supplementary codepoints
my (@codepoints) = (0 .. 0xD7FF, 0xE000 .. 0x10FFFF);
my $utf8 = pack("U*", @codepoints);
defined($utf8) or die "Unable create UTF-8 string\n";
open(FH, ">:utf8", "utf8.dat")
or die "Unable to open utf8.dat: $!\n";
print FH $utf8
or die "Unable to write utf8.dat\n";
close(FH);
# vi:ai et sw=4 ts=4:

View File

@ -0,0 +1,7 @@
[ 0.110001,
0.12345678910111,
0.412454033640,
2.6651441426902,
2.718281828459,
3.1415926535898,
2.1406926327793 ]

View File

@ -0,0 +1 @@
"\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f !\"#$%&'()*+,-.\/0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007f€亗儎厗噲墛媽崕彁憭摂晼棙櫄洔潪煚、¥ウЖ┆<D096><E29486><EFBFBD>辈炒刀犯购患骄坷谅媚牌侨墒颂臀闲岩釉罩棕仝圮蒉哙徕沅彐玷殛腱眍镳耱篝貊鼬<E8B28A><E9BCAC><EFBFBD><EFBFBD>"

View File

@ -0,0 +1,13 @@
{
"Image": {
"Width": 800,
"Height": 600,
"Title": "View from 15th Floor",
"Thumbnail": {
"Url": "http://www.example.com/image/481989943",
"Height": 125,
"Width": "100"
},
"IDs": [116, 943, 234, 38793]
}
}

View File

@ -0,0 +1,22 @@
[
{
"precision": "zip",
"Latitude": 37.7668,
"Longitude": -122.3959,
"Address": "",
"City": "SAN FRANCISCO",
"State": "CA",
"Zip": "94107",
"Country": "US"
},
{
"precision": "zip",
"Latitude": 37.371991,
"Longitude": -122.026020,
"Address": "",
"City": "SUNNYVALE",
"State": "CA",
"Zip": "94085",
"Country": "US"
}
]

425
app/cjson/tests/test.lua Normal file
View File

@ -0,0 +1,425 @@
#!/usr/bin/env lua
-- Lua CJSON tests
--
-- Mark Pulford <mark@kyne.com.au>
--
-- Note: The output of this script is easier to read with "less -S"
local json = require "cjson"
local json_safe = require "cjson.safe"
local util = require "cjson.util"
local function gen_raw_octets()
local chars = {}
for i = 0, 255 do chars[i + 1] = string.char(i) end
return table.concat(chars)
end
-- Generate every UTF-16 codepoint, including supplementary codes
local function gen_utf16_escaped()
-- Create raw table escapes
local utf16_escaped = {}
local count = 0
local function append_escape(code)
local esc = ('\\u%04X'):format(code)
table.insert(utf16_escaped, esc)
end
table.insert(utf16_escaped, '"')
for i = 0, 0xD7FF do
append_escape(i)
end
-- Skip 0xD800 - 0xDFFF since they are used to encode supplementary
-- codepoints
for i = 0xE000, 0xFFFF do
append_escape(i)
end
-- Append surrogate pair for each supplementary codepoint
for high = 0xD800, 0xDBFF do
for low = 0xDC00, 0xDFFF do
append_escape(high)
append_escape(low)
end
end
table.insert(utf16_escaped, '"')
return table.concat(utf16_escaped)
end
function load_testdata()
local data = {}
-- Data for 8bit raw <-> escaped octets tests
data.octets_raw = gen_raw_octets()
data.octets_escaped = util.file_load("octets-escaped.dat")
-- Data for \uXXXX -> UTF-8 test
data.utf16_escaped = gen_utf16_escaped()
-- Load matching data for utf16_escaped
local utf8_loaded
utf8_loaded, data.utf8_raw = pcall(util.file_load, "utf8.dat")
if not utf8_loaded then
data.utf8_raw = "Failed to load utf8.dat - please run genutf8.pl"
end
data.table_cycle = {}
data.table_cycle[1] = data.table_cycle
local big = {}
for i = 1, 1100 do
big = { { 10, false, true, json.null }, "string", a = big }
end
data.deeply_nested_data = big
return data
end
function test_decode_cycle(filename)
local obj1 = json.decode(util.file_load(filename))
local obj2 = json.decode(json.encode(obj1))
return util.compare_values(obj1, obj2)
end
-- Set up data used in tests
local Inf = math.huge;
local NaN = math.huge * 0;
local testdata = load_testdata()
local cjson_tests = {
-- Test API variables
{ "Check module name, version",
function () return json._NAME, json._VERSION end, { },
true, { "cjson", "2.1devel" } },
-- Test decoding simple types
{ "Decode string",
json.decode, { '"test string"' }, true, { "test string" } },
{ "Decode numbers",
json.decode, { '[ 0.0, -5e3, -1, 0.3e-3, 1023.2, 0e10 ]' },
true, { { 0.0, -5000, -1, 0.0003, 1023.2, 0 } } },
{ "Decode null",
json.decode, { 'null' }, true, { json.null } },
{ "Decode true",
json.decode, { 'true' }, true, { true } },
{ "Decode false",
json.decode, { 'false' }, true, { false } },
{ "Decode object with numeric keys",
json.decode, { '{ "1": "one", "3": "three" }' },
true, { { ["1"] = "one", ["3"] = "three" } } },
{ "Decode object with string keys",
json.decode, { '{ "a": "a", "b": "b" }' },
true, { { a = "a", b = "b" } } },
{ "Decode array",
json.decode, { '[ "one", null, "three" ]' },
true, { { "one", json.null, "three" } } },
-- Test decoding errors
{ "Decode UTF-16BE [throw error]",
json.decode, { '\0"\0"' },
false, { "JSON parser does not support UTF-16 or UTF-32" } },
{ "Decode UTF-16LE [throw error]",
json.decode, { '"\0"\0' },
false, { "JSON parser does not support UTF-16 or UTF-32" } },
{ "Decode UTF-32BE [throw error]",
json.decode, { '\0\0\0"' },
false, { "JSON parser does not support UTF-16 or UTF-32" } },
{ "Decode UTF-32LE [throw error]",
json.decode, { '"\0\0\0' },
false, { "JSON parser does not support UTF-16 or UTF-32" } },
{ "Decode partial JSON [throw error]",
json.decode, { '{ "unexpected eof": ' },
false, { "Expected value but found T_END at character 21" } },
{ "Decode with extra comma [throw error]",
json.decode, { '{ "extra data": true }, false' },
false, { "Expected the end but found T_COMMA at character 23" } },
{ "Decode invalid escape code [throw error]",
json.decode, { [[ { "bad escape \q code" } ]] },
false, { "Expected object key string but found invalid escape code at character 16" } },
{ "Decode invalid unicode escape [throw error]",
json.decode, { [[ { "bad unicode \u0f6 escape" } ]] },
false, { "Expected object key string but found invalid unicode escape code at character 17" } },
{ "Decode invalid keyword [throw error]",
json.decode, { ' [ "bad barewood", test ] ' },
false, { "Expected value but found invalid token at character 20" } },
{ "Decode invalid number #1 [throw error]",
json.decode, { '[ -+12 ]' },
false, { "Expected value but found invalid number at character 3" } },
{ "Decode invalid number #2 [throw error]",
json.decode, { '-v' },
false, { "Expected value but found invalid number at character 1" } },
{ "Decode invalid number exponent [throw error]",
json.decode, { '[ 0.4eg10 ]' },
false, { "Expected comma or array end but found invalid token at character 6" } },
-- Test decoding nested arrays / objects
{ "Set decode_max_depth(5)",
json.decode_max_depth, { 5 }, true, { 5 } },
{ "Decode array at nested limit",
json.decode, { '[[[[[ "nested" ]]]]]' },
true, { {{{{{ "nested" }}}}} } },
{ "Decode array over nested limit [throw error]",
json.decode, { '[[[[[[ "nested" ]]]]]]' },
false, { "Found too many nested data structures (6) at character 6" } },
{ "Decode object at nested limit",
json.decode, { '{"a":{"b":{"c":{"d":{"e":"nested"}}}}}' },
true, { {a={b={c={d={e="nested"}}}}} } },
{ "Decode object over nested limit [throw error]",
json.decode, { '{"a":{"b":{"c":{"d":{"e":{"f":"nested"}}}}}}' },
false, { "Found too many nested data structures (6) at character 26" } },
{ "Set decode_max_depth(1000)",
json.decode_max_depth, { 1000 }, true, { 1000 } },
{ "Decode deeply nested array [throw error]",
json.decode, { string.rep("[", 1100) .. '1100' .. string.rep("]", 1100)},
false, { "Found too many nested data structures (1001) at character 1001" } },
-- Test encoding nested tables
{ "Set encode_max_depth(5)",
json.encode_max_depth, { 5 }, true, { 5 } },
{ "Encode nested table as array at nested limit",
json.encode, { {{{{{"nested"}}}}} }, true, { '[[[[["nested"]]]]]' } },
{ "Encode nested table as array after nested limit [throw error]",
json.encode, { { {{{{{"nested"}}}}} } },
false, { "Cannot serialise, excessive nesting (6)" } },
{ "Encode nested table as object at nested limit",
json.encode, { {a={b={c={d={e="nested"}}}}} },
true, { '{"a":{"b":{"c":{"d":{"e":"nested"}}}}}' } },
{ "Encode nested table as object over nested limit [throw error]",
json.encode, { {a={b={c={d={e={f="nested"}}}}}} },
false, { "Cannot serialise, excessive nesting (6)" } },
{ "Encode table with cycle [throw error]",
json.encode, { testdata.table_cycle },
false, { "Cannot serialise, excessive nesting (6)" } },
{ "Set encode_max_depth(1000)",
json.encode_max_depth, { 1000 }, true, { 1000 } },
{ "Encode deeply nested data [throw error]",
json.encode, { testdata.deeply_nested_data },
false, { "Cannot serialise, excessive nesting (1001)" } },
-- Test encoding simple types
{ "Encode null",
json.encode, { json.null }, true, { 'null' } },
{ "Encode true",
json.encode, { true }, true, { 'true' } },
{ "Encode false",
json.encode, { false }, true, { 'false' } },
{ "Encode empty object",
json.encode, { { } }, true, { '{}' } },
{ "Encode integer",
json.encode, { 10 }, true, { '10' } },
{ "Encode string",
json.encode, { "hello" }, true, { '"hello"' } },
{ "Encode Lua function [throw error]",
json.encode, { function () end },
false, { "Cannot serialise function: type not supported" } },
-- Test decoding invalid numbers
{ "Set decode_invalid_numbers(true)",
json.decode_invalid_numbers, { true }, true, { true } },
{ "Decode hexadecimal",
json.decode, { '0x6.ffp1' }, true, { 13.9921875 } },
{ "Decode numbers with leading zero",
json.decode, { '[ 0123, 00.33 ]' }, true, { { 123, 0.33 } } },
{ "Decode +-Inf",
json.decode, { '[ +Inf, Inf, -Inf ]' }, true, { { Inf, Inf, -Inf } } },
{ "Decode +-Infinity",
json.decode, { '[ +Infinity, Infinity, -Infinity ]' },
true, { { Inf, Inf, -Inf } } },
{ "Decode +-NaN",
json.decode, { '[ +NaN, NaN, -NaN ]' }, true, { { NaN, NaN, NaN } } },
{ "Decode Infrared (not infinity) [throw error]",
json.decode, { 'Infrared' },
false, { "Expected the end but found invalid token at character 4" } },
{ "Decode Noodle (not NaN) [throw error]",
json.decode, { 'Noodle' },
false, { "Expected value but found invalid token at character 1" } },
{ "Set decode_invalid_numbers(false)",
json.decode_invalid_numbers, { false }, true, { false } },
{ "Decode hexadecimal [throw error]",
json.decode, { '0x6' },
false, { "Expected value but found invalid number at character 1" } },
{ "Decode numbers with leading zero [throw error]",
json.decode, { '[ 0123, 00.33 ]' },
false, { "Expected value but found invalid number at character 3" } },
{ "Decode +-Inf [throw error]",
json.decode, { '[ +Inf, Inf, -Inf ]' },
false, { "Expected value but found invalid token at character 3" } },
{ "Decode +-Infinity [throw error]",
json.decode, { '[ +Infinity, Infinity, -Infinity ]' },
false, { "Expected value but found invalid token at character 3" } },
{ "Decode +-NaN [throw error]",
json.decode, { '[ +NaN, NaN, -NaN ]' },
false, { "Expected value but found invalid token at character 3" } },
{ 'Set decode_invalid_numbers("on")',
json.decode_invalid_numbers, { "on" }, true, { true } },
-- Test encoding invalid numbers
{ "Set encode_invalid_numbers(false)",
json.encode_invalid_numbers, { false }, true, { false } },
{ "Encode NaN [throw error]",
json.encode, { NaN },
false, { "Cannot serialise number: must not be NaN or Infinity" } },
{ "Encode Infinity [throw error]",
json.encode, { Inf },
false, { "Cannot serialise number: must not be NaN or Infinity" } },
{ "Set encode_invalid_numbers(\"null\")",
json.encode_invalid_numbers, { "null" }, true, { "null" } },
{ "Encode NaN as null",
json.encode, { NaN }, true, { "null" } },
{ "Encode Infinity as null",
json.encode, { Inf }, true, { "null" } },
{ "Set encode_invalid_numbers(true)",
json.encode_invalid_numbers, { true }, true, { true } },
{ "Encode NaN",
json.encode, { NaN }, true, { "NaN" } },
{ "Encode +Infinity",
json.encode, { Inf }, true, { "Infinity" } },
{ "Encode -Infinity",
json.encode, { -Inf }, true, { "-Infinity" } },
{ 'Set encode_invalid_numbers("off")',
json.encode_invalid_numbers, { "off" }, true, { false } },
-- Test encoding tables
{ "Set encode_sparse_array(true, 2, 3)",
json.encode_sparse_array, { true, 2, 3 }, true, { true, 2, 3 } },
{ "Encode sparse table as array #1",
json.encode, { { [3] = "sparse test" } },
true, { '[null,null,"sparse test"]' } },
{ "Encode sparse table as array #2",
json.encode, { { [1] = "one", [4] = "sparse test" } },
true, { '["one",null,null,"sparse test"]' } },
{ "Encode sparse array as object",
json.encode, { { [1] = "one", [5] = "sparse test" } },
true, { '{"1":"one","5":"sparse test"}' } },
{ "Encode table with numeric string key as object",
json.encode, { { ["2"] = "numeric string key test" } },
true, { '{"2":"numeric string key test"}' } },
{ "Set encode_sparse_array(false)",
json.encode_sparse_array, { false }, true, { false, 2, 3 } },
{ "Encode table with incompatible key [throw error]",
json.encode, { { [false] = "wrong" } },
false, { "Cannot serialise boolean: table key must be a number or string" } },
-- Test escaping
{ "Encode all octets (8-bit clean)",
json.encode, { testdata.octets_raw }, true, { testdata.octets_escaped } },
{ "Decode all escaped octets",
json.decode, { testdata.octets_escaped }, true, { testdata.octets_raw } },
{ "Decode single UTF-16 escape",
json.decode, { [["\uF800"]] }, true, { "\239\160\128" } },
{ "Decode all UTF-16 escapes (including surrogate combinations)",
json.decode, { testdata.utf16_escaped }, true, { testdata.utf8_raw } },
{ "Decode swapped surrogate pair [throw error]",
json.decode, { [["\uDC00\uD800"]] },
false, { "Expected value but found invalid unicode escape code at character 2" } },
{ "Decode duplicate high surrogate [throw error]",
json.decode, { [["\uDB00\uDB00"]] },
false, { "Expected value but found invalid unicode escape code at character 2" } },
{ "Decode duplicate low surrogate [throw error]",
json.decode, { [["\uDB00\uDB00"]] },
false, { "Expected value but found invalid unicode escape code at character 2" } },
{ "Decode missing low surrogate [throw error]",
json.decode, { [["\uDB00"]] },
false, { "Expected value but found invalid unicode escape code at character 2" } },
{ "Decode invalid low surrogate [throw error]",
json.decode, { [["\uDB00\uD"]] },
false, { "Expected value but found invalid unicode escape code at character 2" } },
-- Test locale support
--
-- The standard Lua interpreter is ANSI C online doesn't support locales
-- by default. Force a known problematic locale to test strtod()/sprintf().
{ "Set locale to cs_CZ (comma separator)", function ()
os.setlocale("cs_CZ")
json.new()
end },
{ "Encode number under comma locale",
json.encode, { 1.5 }, true, { '1.5' } },
{ "Decode number in array under comma locale",
json.decode, { '[ 10, "test" ]' }, true, { { 10, "test" } } },
{ "Revert locale to POSIX", function ()
os.setlocale("C")
json.new()
end },
-- Test encode_keep_buffer() and enable_number_precision()
{ "Set encode_keep_buffer(false)",
json.encode_keep_buffer, { false }, true, { false } },
{ "Set encode_number_precision(3)",
json.encode_number_precision, { 3 }, true, { 3 } },
{ "Encode number with precision 3",
json.encode, { 1/3 }, true, { "0.333" } },
{ "Set encode_number_precision(14)",
json.encode_number_precision, { 14 }, true, { 14 } },
{ "Set encode_keep_buffer(true)",
json.encode_keep_buffer, { true }, true, { true } },
-- Test config API errors
-- Function is listed as '?' due to pcall
{ "Set encode_number_precision(0) [throw error]",
json.encode_number_precision, { 0 },
false, { "bad argument #1 to '?' (expected integer between 1 and 14)" } },
{ "Set encode_number_precision(\"five\") [throw error]",
json.encode_number_precision, { "five" },
false, { "bad argument #1 to '?' (number expected, got string)" } },
{ "Set encode_keep_buffer(nil, true) [throw error]",
json.encode_keep_buffer, { nil, true },
false, { "bad argument #2 to '?' (found too many arguments)" } },
{ "Set encode_max_depth(\"wrong\") [throw error]",
json.encode_max_depth, { "wrong" },
false, { "bad argument #1 to '?' (number expected, got string)" } },
{ "Set decode_max_depth(0) [throw error]",
json.decode_max_depth, { "0" },
false, { "bad argument #1 to '?' (expected integer between 1 and 2147483647)" } },
{ "Set encode_invalid_numbers(-2) [throw error]",
json.encode_invalid_numbers, { -2 },
false, { "bad argument #1 to '?' (invalid option '-2')" } },
{ "Set decode_invalid_numbers(true, false) [throw error]",
json.decode_invalid_numbers, { true, false },
false, { "bad argument #2 to '?' (found too many arguments)" } },
{ "Set encode_sparse_array(\"not quite on\") [throw error]",
json.encode_sparse_array, { "not quite on" },
false, { "bad argument #1 to '?' (invalid option 'not quite on')" } },
{ "Reset Lua CJSON configuration", function () json = json.new() end },
-- Wrap in a function to ensure the table returned by json.new() is used
{ "Check encode_sparse_array()",
function (...) return json.encode_sparse_array(...) end, { },
true, { false, 2, 10 } },
{ "Encode (safe) simple value",
json_safe.encode, { true },
true, { "true" } },
{ "Encode (safe) argument validation [throw error]",
json_safe.encode, { "arg1", "arg2" },
false, { "bad argument #1 to '?' (expected 1 argument)" } },
{ "Decode (safe) error generation",
json_safe.decode, { "Oops" },
true, { nil, "Expected value but found invalid token at character 1" } },
{ "Decode (safe) error generation after new()",
function(...) return json_safe.new().decode(...) end, { "Oops" },
true, { nil, "Expected value but found invalid token at character 1" } },
}
print(("==> Testing Lua CJSON version %s\n"):format(json._VERSION))
util.run_test_group(cjson_tests)
for _, filename in ipairs(arg) do
util.run_test("Decode cycle " .. filename, test_decode_cycle, { filename },
true, { true })
end
local pass, total = util.run_test_summary()
if pass == total then
print("==> Summary: all tests succeeded")
else
print(("==> Summary: %d/%d tests failed"):format(total - pass, total))
os.exit(1)
end
-- vi:ai et sw=4 ts=4:

View File

@ -0,0 +1 @@
{ "array": [ 10, true, null ] }

View File

@ -4,7 +4,7 @@
#define LUA_USE_BUILTIN_STRING // for string.xxx() #define LUA_USE_BUILTIN_STRING // for string.xxx()
#define LUA_USE_BUILTIN_TABLE // for table.xxx() #define LUA_USE_BUILTIN_TABLE // for table.xxx()
#define LUA_USE_BUILTIN_COROUTINE // for coroutine.xxx() #define LUA_USE_BUILTIN_COROUTINE // for coroutine.xxx()
#define LUA_USE_BUILTIN_MATH // for math.xxx(), partially work // #define LUA_USE_BUILTIN_MATH // for math.xxx(), partially work
// #define LUA_USE_BUILTIN_IO // for io.xxx(), partially work // #define LUA_USE_BUILTIN_IO // for io.xxx(), partially work
// #define LUA_USE_BUILTIN_OS // for os.xxx(), not work // #define LUA_USE_BUILTIN_OS // for os.xxx(), not work
@ -30,6 +30,7 @@
#define LUA_USE_MODULES_COAP #define LUA_USE_MODULES_COAP
#define LUA_USE_MODULES_U8G #define LUA_USE_MODULES_U8G
#define LUA_USE_MODULES_WS2812 #define LUA_USE_MODULES_WS2812
#define LUA_USE_MODULES_CJSON
#endif /* LUA_USE_MODULES */ #endif /* LUA_USE_MODULES */
#endif /* __USER_MODULES_H__ */ #endif /* __USER_MODULES_H__ */

View File

@ -7,6 +7,6 @@
#define NODE_VERSION_INTERNAL 0U #define NODE_VERSION_INTERNAL 0U
#define NODE_VERSION "NodeMCU 0.9.5" #define NODE_VERSION "NodeMCU 0.9.5"
#define BUILD_DATE "build 20150315" #define BUILD_DATE "build 20150317"
#endif /* __USER_VERSION_H__ */ #endif /* __USER_VERSION_H__ */

View File

@ -25,7 +25,7 @@
#define c_strncmp os_strncmp #define c_strncmp os_strncmp
#define c_strncpy os_strncpy #define c_strncpy os_strncpy
// #define c_strstr os_strstr // #define c_strstr os_strstr
#define c_strncasecmp c_strcmp #define c_strncasecmp strncasecmp
#define c_strstr strstr #define c_strstr strstr
#define c_strncat strncat #define c_strncat strncat

View File

@ -47,6 +47,7 @@ INCLUDES += -I ../platform
INCLUDES += -I ../wofs INCLUDES += -I ../wofs
INCLUDES += -I ../spiffs INCLUDES += -I ../spiffs
INCLUDES += -I ../smart INCLUDES += -I ../smart
INCLUDES += -I ../cjson
PDIR := ../$(PDIR) PDIR := ../$(PDIR)
sinclude $(PDIR)Makefile sinclude $(PDIR)Makefile

View File

@ -79,6 +79,9 @@ LUALIB_API int ( luaopen_file )( lua_State *L );
#define AUXLIB_OW "ow" #define AUXLIB_OW "ow"
LUALIB_API int ( luaopen_ow )( lua_State *L ); LUALIB_API int ( luaopen_ow )( lua_State *L );
#define AUXLIB_CJSON "cjson"
LUALIB_API int ( luaopen_ow )( lua_State *L );
// Helper macros // Helper macros
#define MOD_CHECK_ID( mod, id )\ #define MOD_CHECK_ID( mod, id )\
if( !platform_ ## mod ## _exists( id ) )\ if( !platform_ ## mod ## _exists( id ) )\

1650
app/modules/cjson.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -338,7 +338,7 @@ LUALIB_API int luaopen_file( lua_State *L )
#if LUA_OPTIMIZE_MEMORY > 0 #if LUA_OPTIMIZE_MEMORY > 0
return 0; return 0;
#else // #if LUA_OPTIMIZE_MEMORY > 0 #else // #if LUA_OPTIMIZE_MEMORY > 0
luaL_register( L, AUXLIB_NODE, file_map ); luaL_register( L, AUXLIB_FILE, file_map );
// Add constants // Add constants
return 1; return 1;

View File

@ -141,6 +141,13 @@
#define ROM_MODULES_WS2812 #define ROM_MODULES_WS2812
#endif #endif
#if defined(LUA_USE_MODULES_CJSON)
#define MODULES_CJSON "cjson"
#define ROM_MODULES_CJSON \
_ROM(MODULES_CJSON, luaopen_cjson, cjson_map)
#else
#define ROM_MODULES_CJSON
#endif
#define LUA_MODULES_ROM \ #define LUA_MODULES_ROM \
ROM_MODULES_GPIO \ ROM_MODULES_GPIO \
@ -159,7 +166,8 @@
ROM_MODULES_UART \ ROM_MODULES_UART \
ROM_MODULES_OW \ ROM_MODULES_OW \
ROM_MODULES_BIT \ ROM_MODULES_BIT \
ROM_MODULES_WS2812 ROM_MODULES_WS2812 \
ROM_MODULES_CJSON
#endif #endif