Fix ARM compile errors on g++ 7.4 (#354)

* Fix ARM compilation errors

* Update singleheader
This commit is contained in:
John Keiser 2019-11-04 07:36:34 -08:00 committed by Daniel Lemire
parent b1224a77db
commit c97eb41dc6
5 changed files with 873 additions and 881 deletions

View File

@ -1,4 +1,4 @@
/* auto-generated on Sun Oct 13 11:11:50 DST 2019. Do not edit! */
/* auto-generated on Sun Nov 3 14:09:55 STD 2019. Do not edit! */
#include <iostream>
#include "simdjson.h"

View File

@ -1,4 +1,4 @@
/* auto-generated on Sun Oct 13 11:11:50 DST 2019. Do not edit! */
/* auto-generated on Sun Nov 3 14:09:55 STD 2019. Do not edit! */
#include "simdjson.h"
/* used for http://dmalloc.com/ Dmalloc - Debug Malloc Library */
@ -312,6 +312,7 @@ inline size_t codepoint_to_utf8(uint32_t cp, uint8_t *c) {
#define SIMDJSON_NUMBERPARSING_H
#include <cmath>
#include <limits>
#ifdef JSON_TEST_NUMBERS // for unit testing
void found_invalid_number(const uint8_t *buf);
@ -321,7 +322,7 @@ void found_float(double result, const uint8_t *buf);
#endif
namespace simdjson {
// Allowable floating-point values range from
// Allowable floating-point values range
// std::numeric_limits<double>::lowest() to std::numeric_limits<double>::max(),
// so from -1.7976e308 all the way to 1.7975e308 in binary64. The lowest
// non-zero normal values is std::numeric_limits<double>::min() or
@ -621,6 +622,13 @@ static never_inline bool parse_float(const uint8_t *const buf, ParsedJson &pj,
if (is_not_structural_or_whitespace(*p)) {
return false;
}
// check that we can go from long double to double safely.
if(i > std::numeric_limits<double>::max()) {
#ifdef JSON_TEST_NUMBERS // for unit testing
found_invalid_number(buf + offset);
#endif
return false;
}
double d = negative ? -i : i;
pj.write_tape_double(d);
#ifdef JSON_TEST_NUMBERS // for unit testing
@ -36037,6 +36045,7 @@ char *allocate_padded_buffer(size_t length) {
// However, we might as well align to cache lines...
size_t totalpaddedlength = length + SIMDJSON_PADDING;
char *padded_buffer = aligned_malloc_char(64, totalpaddedlength);
memset(padded_buffer + length, 0, totalpaddedlength - length);
return padded_buffer;
}
@ -36421,7 +36430,7 @@ int json_parse_dispatch(const uint8_t *buf, size_t len, ParsedJson &pj,
return json_parse_ptr.load(std::memory_order_relaxed)(buf, len, pj, realloc);
}
std::atomic<json_parse_functype *> json_parse_ptr = &json_parse_dispatch;
std::atomic<json_parse_functype *> json_parse_ptr{&json_parse_dispatch};
WARN_UNUSED
ParsedJson build_parsed_json(const uint8_t *buf, size_t len,
@ -37434,77 +37443,82 @@ really_inline void find_whitespace_and_operators(
}).to_bitmask();
}
// This file contains a non-architecture-specific version of "flatten" used in stage1.
// This file contains the common code every implementation uses in stage1
// It is intended to be included multiple times and compiled multiple times
// We assume the file in which it is include already includes
// We assume the file in which it is included already includes
// "simdjson/stage1_find_marks.h" (this simplifies amalgation)
#ifdef SIMDJSON_NAIVE_FLATTEN // useful for benchmarking
static const size_t STEP_SIZE = 128;
// This is just a naive implementation. It should be normally
// disable, but can be used for research purposes to compare
// again our optimized version.
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
uint32_t *out_ptr = base_ptr + base;
idx -= 64;
while (bits != 0) {
out_ptr[0] = idx + trailing_zeroes(bits);
bits = bits & (bits - 1);
out_ptr++;
}
base = (out_ptr - base_ptr);
}
class bit_indexer {
public:
uint32_t *tail;
#else // SIMDJSON_NAIVE_FLATTEN
bit_indexer(uint32_t *index_buf) : tail(index_buf) {}
// flatten out values in 'bits' assuming that they are are to have values of idx
// plus their position in the bitvector, and store these indexes at
// base_ptr[base] incrementing base as we go
// will potentially store extra values beyond end of valid bits, so base_ptr
// needs to be large enough to handle this
really_inline void flatten_bits(uint32_t *&base_ptr, uint32_t idx, uint64_t bits) {
really_inline void write_indexes(uint32_t idx, uint64_t bits) {
// In some instances, the next branch is expensive because it is mispredicted.
// Unfortunately, in other cases,
// it helps tremendously.
if (bits == 0)
return;
uint32_t cnt = hamming(bits);
idx -= 64;
// Do the first 8 all together
for (int i=0; i<8; i++) {
base_ptr[i] = idx + trailing_zeroes(bits);
bits = bits & (bits - 1);
this->tail[i] = idx + trailing_zeroes(bits);
bits = clear_lowest_bit(bits);
}
// Do the next 8 all together (we hope in most cases it won't happen at all
// and the branch is easily predicted).
if (unlikely(cnt > 8)) {
for (int i=8; i<16; i++) {
base_ptr[i] = idx + trailing_zeroes(bits);
bits = bits & (bits - 1);
this->tail[i] = idx + trailing_zeroes(bits);
bits = clear_lowest_bit(bits);
}
// Most files don't have 16+ structurals per block, so we take several basically guaranteed
// branch mispredictions here. 16+ structurals per block means either punctuation ({} [] , :)
// or the start of a value ("abc" true 123) every 4 characters.
// or the start of a value ("abc" true 123) every four characters.
if (unlikely(cnt > 16)) {
uint32_t i = 16;
do {
base_ptr[i] = idx + trailing_zeroes(bits);
bits = bits & (bits - 1);
this->tail[i] = idx + trailing_zeroes(bits);
bits = clear_lowest_bit(bits);
i++;
} while (i < cnt);
}
}
base_ptr += cnt;
this->tail += cnt;
}
#endif // SIMDJSON_NAIVE_FLATTEN
// This file contains the common code every implementation uses in stage1
// It is intended to be included multiple times and compiled multiple times
// We assume the file in which it is included already includes
// "simdjson/stage1_find_marks.h" (this simplifies amalgation)
};
class json_structural_scanner {
public:
// Whether the first character of the next iteration is escaped.
uint64_t prev_escaped = 0ULL;
// Whether the last iteration was still inside a string (all 1's = true, all 0's = false).
uint64_t prev_in_string = 0ULL;
// Whether the last character of the previous iteration is a primitive value character
// (anything except whitespace, braces, comma or colon).
uint64_t prev_primitive = 0ULL;
// Mask of structural characters from the last iteration.
// Kept around for performance reasons, so we can call flatten_bits to soak up some unused
// CPU capacity while the next iteration is busy with an expensive clmul in compute_quote_mask.
uint64_t prev_structurals = 0;
// Errors with unescaped characters in strings (ASCII codepoints < 0x20)
uint64_t unescaped_chars_error = 0;
bit_indexer structural_indexes;
json_structural_scanner(uint32_t *_structural_indexes) : structural_indexes{_structural_indexes} {}
// return a bitvector indicating where we have characters that end an odd-length
// sequence of backslashes (and thus change the behavior of the next character
@ -37572,9 +37586,7 @@ really_inline uint64_t follows(const uint64_t match, const uint64_t filler, uint
return result;
}
really_inline ErrorValues detect_errors_on_eof(
uint64_t &unescaped_chars_error,
const uint64_t prev_in_string) {
really_inline ErrorValues detect_errors_on_eof() {
if (prev_in_string) {
return UNCLOSED_STRING;
}
@ -37592,7 +37604,7 @@ really_inline ErrorValues detect_errors_on_eof(
//
// Backslash sequences outside of quotes will be detected in stage 2.
//
really_inline uint64_t find_strings(const simd_input in, uint64_t &prev_escaped, uint64_t &prev_in_string) {
really_inline uint64_t find_strings(const simd_input in) {
const uint64_t backslash = in.eq('\\');
const uint64_t escaped = follows_odd_sequence_of(backslash, prev_escaped);
const uint64_t quote = in.eq('"') & ~escaped;
@ -37631,7 +37643,7 @@ really_inline uint64_t invalid_string_bytes(const uint64_t unescaped, const uint
// contents of a string the same as content outside. Errors and structurals inside the string or on
// the trailing quote will need to be removed later when the correct string information is known.
//
really_inline uint64_t find_potential_structurals(const simd_input in, uint64_t &prev_primitive) {
really_inline uint64_t find_potential_structurals(const simd_input in) {
// These use SIMD so let's kick them off before running the regular 64-bit stuff ...
uint64_t whitespace, op;
find_whitespace_and_operators(in, whitespace, op);
@ -37646,12 +37658,8 @@ really_inline uint64_t find_potential_structurals(const simd_input in, uint64_t
return op | start_primitive;
}
static const size_t STEP_SIZE = 128;
//
// Find the important bits of JSON in a 128-byte chunk, and add them to :
//
//
// Find the important bits of JSON in a 128-byte chunk, and add them to structural_indexes.
//
// PERF NOTES:
// We pipe 2 inputs through these stages:
@ -37669,13 +37677,7 @@ static const size_t STEP_SIZE = 128;
// available capacity with just one input. Running 2 at a time seems to give the CPU a good enough
// workout.
//
really_inline void find_structural_bits_128(
const uint8_t *buf, const size_t idx, uint32_t *&base_ptr,
uint64_t &prev_escaped, uint64_t &prev_in_string,
uint64_t &prev_primitive,
uint64_t &prev_structurals,
uint64_t &unescaped_chars_error,
utf8_checker &utf8_state) {
really_inline void scan_step(const uint8_t *buf, const size_t idx, utf8_checker &utf8_checker) {
//
// Load up all 128 bytes into SIMD registers
//
@ -37688,10 +37690,10 @@ really_inline void find_structural_bits_128(
// This will include false structurals that are *inside* strings--we'll filter strings out
// before we return.
//
uint64_t string_1 = find_strings(in_1, prev_escaped, prev_in_string);
uint64_t structurals_1 = find_potential_structurals(in_1, prev_primitive);
uint64_t string_2 = find_strings(in_2, prev_escaped, prev_in_string);
uint64_t structurals_2 = find_potential_structurals(in_2, prev_primitive);
uint64_t string_1 = this->find_strings(in_1);
uint64_t structurals_1 = this->find_potential_structurals(in_1);
uint64_t string_2 = this->find_strings(in_2);
uint64_t structurals_2 = this->find_potential_structurals(in_2);
//
// Do miscellaneous work while the processor is busy calculating strings and structurals.
@ -37699,49 +37701,24 @@ really_inline void find_structural_bits_128(
// After that, weed out structurals that are inside strings and find invalid string characters.
//
uint64_t unescaped_1 = in_1.lteq(0x1F);
utf8_state.check_next_input(in_1);
flatten_bits(base_ptr, idx, prev_structurals); // Output *last* iteration's structurals to ParsedJson
prev_structurals = structurals_1 & ~string_1;
unescaped_chars_error |= unescaped_1 & string_1;
utf8_checker.check_next_input(in_1);
this->structural_indexes.write_indexes(idx-64, prev_structurals); // Output *last* iteration's structurals to ParsedJson
this->prev_structurals = structurals_1 & ~string_1;
this->unescaped_chars_error |= unescaped_1 & string_1;
uint64_t unescaped_2 = in_2.lteq(0x1F);
utf8_state.check_next_input(in_2);
flatten_bits(base_ptr, idx+64, prev_structurals); // Output *last* iteration's structurals to ParsedJson
prev_structurals = structurals_2 & ~string_2;
unescaped_chars_error |= unescaped_2 & string_2;
utf8_checker.check_next_input(in_2);
this->structural_indexes.write_indexes(idx, prev_structurals); // Output *last* iteration's structurals to ParsedJson
this->prev_structurals = structurals_2 & ~string_2;
this->unescaped_chars_error |= unescaped_2 & string_2;
}
int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &pj) {
if (unlikely(len > pj.byte_capacity)) {
std::cerr << "Your ParsedJson object only supports documents up to "
<< pj.byte_capacity << " bytes but you are trying to process "
<< len << " bytes" << std::endl;
return simdjson::CAPACITY;
}
uint32_t *base_ptr = pj.structural_indexes;
utf8_checker utf8_state;
// Whether the first character of the next iteration is escaped.
uint64_t prev_escaped = 0ULL;
// Whether the last iteration was still inside a string (all 1's = true, all 0's = false).
uint64_t prev_in_string = 0ULL;
// Whether the last character of the previous iteration is a primitive value character
// (anything except whitespace, braces, comma or colon).
uint64_t prev_primitive = 0ULL;
// Mask of structural characters from the last iteration.
// Kept around for performance reasons, so we can call flatten_bits to soak up some unused
// CPU capacity while the next iteration is busy with an expensive clmul in compute_quote_mask.
uint64_t structurals = 0;
really_inline void scan(const uint8_t *buf, const size_t len, utf8_checker &utf8_checker) {
size_t lenminusstep = len < STEP_SIZE ? 0 : len - STEP_SIZE;
size_t idx = 0;
// Errors with unescaped characters in strings (ASCII codepoints < 0x20)
uint64_t unescaped_chars_error = 0;
for (; idx < lenminusstep; idx += STEP_SIZE) {
find_structural_bits_128(&buf[idx], idx, base_ptr,
prev_escaped, prev_in_string, prev_primitive,
structurals, unescaped_chars_error, utf8_state);
this->scan_step(&buf[idx], idx, utf8_checker);
}
/* If we have a final chunk of less than 64 bytes, pad it to 64 with
@ -37751,21 +37728,33 @@ int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &p
uint8_t tmp_buf[STEP_SIZE];
memset(tmp_buf, 0x20, STEP_SIZE);
memcpy(tmp_buf, buf + idx, len - idx);
find_structural_bits_128(&tmp_buf[0], idx, base_ptr,
prev_escaped, prev_in_string, prev_primitive,
structurals, unescaped_chars_error, utf8_state);
this->scan_step(&tmp_buf[0], idx, utf8_checker);
idx += STEP_SIZE;
}
/* finally, flatten out the remaining structurals from the last iteration */
flatten_bits(base_ptr, idx, structurals);
this->structural_indexes.write_indexes(idx-64, this->prev_structurals);
}
simdjson::ErrorValues error = detect_errors_on_eof(unescaped_chars_error, prev_in_string);
};
int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &pj) {
if (unlikely(len > pj.byte_capacity)) {
std::cerr << "Your ParsedJson object only supports documents up to "
<< pj.byte_capacity << " bytes but you are trying to process "
<< len << " bytes" << std::endl;
return simdjson::CAPACITY;
}
utf8_checker utf8_checker{};
json_structural_scanner scanner{pj.structural_indexes};
scanner.scan(buf, len, utf8_checker);
simdjson::ErrorValues error = scanner.detect_errors_on_eof();
if (unlikely(error != simdjson::SUCCESS)) {
return error;
}
pj.n_structural_indexes = base_ptr - pj.structural_indexes;
pj.n_structural_indexes = scanner.structural_indexes.tail - pj.structural_indexes;
/* a valid JSON file cannot have zero structural indexes - we should have
* found something */
if (unlikely(pj.n_structural_indexes == 0u)) {
@ -37781,7 +37770,7 @@ int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &p
}
/* make it safe to dereference one beyond this array */
pj.structural_indexes[pj.n_structural_indexes] = 0;
return utf8_state.errors();
return utf8_checker.errors();
}
} // namespace simdjson::arm64
@ -37881,32 +37870,44 @@ really_inline void find_whitespace_and_operators(
#endif // else SIMDJSON_NAIVE_STRUCTURAL
}
// This file contains the common code every implementation uses in stage1
// It is intended to be included multiple times and compiled multiple times
// We assume the file in which it is included already includes
// "simdjson/stage1_find_marks.h" (this simplifies amalgation)
static const size_t STEP_SIZE = 128;
class bit_indexer {
public:
uint32_t *tail;
bit_indexer(uint32_t *index_buf) : tail(index_buf) {}
// flatten out values in 'bits' assuming that they are are to have values of idx
// plus their position in the bitvector, and store these indexes at
// base_ptr[base] incrementing base as we go
// will potentially store extra values beyond end of valid bits, so base_ptr
// needs to be large enough to handle this
really_inline void flatten_bits(uint32_t *&base_ptr, uint32_t idx, uint64_t bits) {
really_inline void write_indexes(uint32_t idx, uint64_t bits) {
// In some instances, the next branch is expensive because it is mispredicted.
// Unfortunately, in other cases,
// it helps tremendously.
if (bits == 0)
return;
uint32_t cnt = _mm_popcnt_u64(bits);
idx -= 64;
uint32_t cnt = hamming(bits);
// Do the first 8 all together
for (int i=0; i<8; i++) {
base_ptr[i] = idx + trailing_zeroes(bits);
bits = _blsr_u64(bits);
this->tail[i] = idx + trailing_zeroes(bits);
bits = clear_lowest_bit(bits);
}
// Do the next 8 all together (we hope in most cases it won't happen at all
// and the branch is easily predicted).
if (unlikely(cnt > 8)) {
for (int i=8; i<16; i++) {
base_ptr[i] = idx + trailing_zeroes(bits);
bits = _blsr_u64(bits);
this->tail[i] = idx + trailing_zeroes(bits);
bits = clear_lowest_bit(bits);
}
// Most files don't have 16+ structurals per block, so we take several basically guaranteed
@ -37915,20 +37916,36 @@ really_inline void flatten_bits(uint32_t *&base_ptr, uint32_t idx, uint64_t bits
if (unlikely(cnt > 16)) {
uint32_t i = 16;
do {
base_ptr[i] = idx + trailing_zeroes(bits);
bits = _blsr_u64(bits);
this->tail[i] = idx + trailing_zeroes(bits);
bits = clear_lowest_bit(bits);
i++;
} while (i < cnt);
}
}
base_ptr += cnt;
this->tail += cnt;
}
};
// This file contains the common code every implementation uses in stage1
// It is intended to be included multiple times and compiled multiple times
// We assume the file in which it is included already includes
// "simdjson/stage1_find_marks.h" (this simplifies amalgation)
class json_structural_scanner {
public:
// Whether the first character of the next iteration is escaped.
uint64_t prev_escaped = 0ULL;
// Whether the last iteration was still inside a string (all 1's = true, all 0's = false).
uint64_t prev_in_string = 0ULL;
// Whether the last character of the previous iteration is a primitive value character
// (anything except whitespace, braces, comma or colon).
uint64_t prev_primitive = 0ULL;
// Mask of structural characters from the last iteration.
// Kept around for performance reasons, so we can call flatten_bits to soak up some unused
// CPU capacity while the next iteration is busy with an expensive clmul in compute_quote_mask.
uint64_t prev_structurals = 0;
// Errors with unescaped characters in strings (ASCII codepoints < 0x20)
uint64_t unescaped_chars_error = 0;
bit_indexer structural_indexes;
json_structural_scanner(uint32_t *_structural_indexes) : structural_indexes{_structural_indexes} {}
// return a bitvector indicating where we have characters that end an odd-length
// sequence of backslashes (and thus change the behavior of the next character
@ -37996,9 +38013,7 @@ really_inline uint64_t follows(const uint64_t match, const uint64_t filler, uint
return result;
}
really_inline ErrorValues detect_errors_on_eof(
uint64_t &unescaped_chars_error,
const uint64_t prev_in_string) {
really_inline ErrorValues detect_errors_on_eof() {
if (prev_in_string) {
return UNCLOSED_STRING;
}
@ -38016,7 +38031,7 @@ really_inline ErrorValues detect_errors_on_eof(
//
// Backslash sequences outside of quotes will be detected in stage 2.
//
really_inline uint64_t find_strings(const simd_input in, uint64_t &prev_escaped, uint64_t &prev_in_string) {
really_inline uint64_t find_strings(const simd_input in) {
const uint64_t backslash = in.eq('\\');
const uint64_t escaped = follows_odd_sequence_of(backslash, prev_escaped);
const uint64_t quote = in.eq('"') & ~escaped;
@ -38055,7 +38070,7 @@ really_inline uint64_t invalid_string_bytes(const uint64_t unescaped, const uint
// contents of a string the same as content outside. Errors and structurals inside the string or on
// the trailing quote will need to be removed later when the correct string information is known.
//
really_inline uint64_t find_potential_structurals(const simd_input in, uint64_t &prev_primitive) {
really_inline uint64_t find_potential_structurals(const simd_input in) {
// These use SIMD so let's kick them off before running the regular 64-bit stuff ...
uint64_t whitespace, op;
find_whitespace_and_operators(in, whitespace, op);
@ -38070,12 +38085,8 @@ really_inline uint64_t find_potential_structurals(const simd_input in, uint64_t
return op | start_primitive;
}
static const size_t STEP_SIZE = 128;
//
// Find the important bits of JSON in a 128-byte chunk, and add them to :
//
//
// Find the important bits of JSON in a 128-byte chunk, and add them to structural_indexes.
//
// PERF NOTES:
// We pipe 2 inputs through these stages:
@ -38093,13 +38104,7 @@ static const size_t STEP_SIZE = 128;
// available capacity with just one input. Running 2 at a time seems to give the CPU a good enough
// workout.
//
really_inline void find_structural_bits_128(
const uint8_t *buf, const size_t idx, uint32_t *&base_ptr,
uint64_t &prev_escaped, uint64_t &prev_in_string,
uint64_t &prev_primitive,
uint64_t &prev_structurals,
uint64_t &unescaped_chars_error,
utf8_checker &utf8_state) {
really_inline void scan_step(const uint8_t *buf, const size_t idx, utf8_checker &utf8_checker) {
//
// Load up all 128 bytes into SIMD registers
//
@ -38112,10 +38117,10 @@ really_inline void find_structural_bits_128(
// This will include false structurals that are *inside* strings--we'll filter strings out
// before we return.
//
uint64_t string_1 = find_strings(in_1, prev_escaped, prev_in_string);
uint64_t structurals_1 = find_potential_structurals(in_1, prev_primitive);
uint64_t string_2 = find_strings(in_2, prev_escaped, prev_in_string);
uint64_t structurals_2 = find_potential_structurals(in_2, prev_primitive);
uint64_t string_1 = this->find_strings(in_1);
uint64_t structurals_1 = this->find_potential_structurals(in_1);
uint64_t string_2 = this->find_strings(in_2);
uint64_t structurals_2 = this->find_potential_structurals(in_2);
//
// Do miscellaneous work while the processor is busy calculating strings and structurals.
@ -38123,49 +38128,24 @@ really_inline void find_structural_bits_128(
// After that, weed out structurals that are inside strings and find invalid string characters.
//
uint64_t unescaped_1 = in_1.lteq(0x1F);
utf8_state.check_next_input(in_1);
flatten_bits(base_ptr, idx, prev_structurals); // Output *last* iteration's structurals to ParsedJson
prev_structurals = structurals_1 & ~string_1;
unescaped_chars_error |= unescaped_1 & string_1;
utf8_checker.check_next_input(in_1);
this->structural_indexes.write_indexes(idx-64, prev_structurals); // Output *last* iteration's structurals to ParsedJson
this->prev_structurals = structurals_1 & ~string_1;
this->unescaped_chars_error |= unescaped_1 & string_1;
uint64_t unescaped_2 = in_2.lteq(0x1F);
utf8_state.check_next_input(in_2);
flatten_bits(base_ptr, idx+64, prev_structurals); // Output *last* iteration's structurals to ParsedJson
prev_structurals = structurals_2 & ~string_2;
unescaped_chars_error |= unescaped_2 & string_2;
utf8_checker.check_next_input(in_2);
this->structural_indexes.write_indexes(idx, prev_structurals); // Output *last* iteration's structurals to ParsedJson
this->prev_structurals = structurals_2 & ~string_2;
this->unescaped_chars_error |= unescaped_2 & string_2;
}
int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &pj) {
if (unlikely(len > pj.byte_capacity)) {
std::cerr << "Your ParsedJson object only supports documents up to "
<< pj.byte_capacity << " bytes but you are trying to process "
<< len << " bytes" << std::endl;
return simdjson::CAPACITY;
}
uint32_t *base_ptr = pj.structural_indexes;
utf8_checker utf8_state;
// Whether the first character of the next iteration is escaped.
uint64_t prev_escaped = 0ULL;
// Whether the last iteration was still inside a string (all 1's = true, all 0's = false).
uint64_t prev_in_string = 0ULL;
// Whether the last character of the previous iteration is a primitive value character
// (anything except whitespace, braces, comma or colon).
uint64_t prev_primitive = 0ULL;
// Mask of structural characters from the last iteration.
// Kept around for performance reasons, so we can call flatten_bits to soak up some unused
// CPU capacity while the next iteration is busy with an expensive clmul in compute_quote_mask.
uint64_t structurals = 0;
really_inline void scan(const uint8_t *buf, const size_t len, utf8_checker &utf8_checker) {
size_t lenminusstep = len < STEP_SIZE ? 0 : len - STEP_SIZE;
size_t idx = 0;
// Errors with unescaped characters in strings (ASCII codepoints < 0x20)
uint64_t unescaped_chars_error = 0;
for (; idx < lenminusstep; idx += STEP_SIZE) {
find_structural_bits_128(&buf[idx], idx, base_ptr,
prev_escaped, prev_in_string, prev_primitive,
structurals, unescaped_chars_error, utf8_state);
this->scan_step(&buf[idx], idx, utf8_checker);
}
/* If we have a final chunk of less than 64 bytes, pad it to 64 with
@ -38175,21 +38155,33 @@ int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &p
uint8_t tmp_buf[STEP_SIZE];
memset(tmp_buf, 0x20, STEP_SIZE);
memcpy(tmp_buf, buf + idx, len - idx);
find_structural_bits_128(&tmp_buf[0], idx, base_ptr,
prev_escaped, prev_in_string, prev_primitive,
structurals, unescaped_chars_error, utf8_state);
this->scan_step(&tmp_buf[0], idx, utf8_checker);
idx += STEP_SIZE;
}
/* finally, flatten out the remaining structurals from the last iteration */
flatten_bits(base_ptr, idx, structurals);
this->structural_indexes.write_indexes(idx-64, this->prev_structurals);
}
simdjson::ErrorValues error = detect_errors_on_eof(unescaped_chars_error, prev_in_string);
};
int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &pj) {
if (unlikely(len > pj.byte_capacity)) {
std::cerr << "Your ParsedJson object only supports documents up to "
<< pj.byte_capacity << " bytes but you are trying to process "
<< len << " bytes" << std::endl;
return simdjson::CAPACITY;
}
utf8_checker utf8_checker{};
json_structural_scanner scanner{pj.structural_indexes};
scanner.scan(buf, len, utf8_checker);
simdjson::ErrorValues error = scanner.detect_errors_on_eof();
if (unlikely(error != simdjson::SUCCESS)) {
return error;
}
pj.n_structural_indexes = base_ptr - pj.structural_indexes;
pj.n_structural_indexes = scanner.structural_indexes.tail - pj.structural_indexes;
/* a valid JSON file cannot have zero structural indexes - we should have
* found something */
if (unlikely(pj.n_structural_indexes == 0u)) {
@ -38205,7 +38197,7 @@ int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &p
}
/* make it safe to dereference one beyond this array */
pj.structural_indexes[pj.n_structural_indexes] = 0;
return utf8_state.errors();
return utf8_checker.errors();
}
} // namespace haswell
@ -38264,77 +38256,82 @@ really_inline void find_whitespace_and_operators(
}).to_bitmask();
}
// This file contains a non-architecture-specific version of "flatten" used in stage1.
// This file contains the common code every implementation uses in stage1
// It is intended to be included multiple times and compiled multiple times
// We assume the file in which it is include already includes
// We assume the file in which it is included already includes
// "simdjson/stage1_find_marks.h" (this simplifies amalgation)
#ifdef SIMDJSON_NAIVE_FLATTEN // useful for benchmarking
static const size_t STEP_SIZE = 128;
// This is just a naive implementation. It should be normally
// disable, but can be used for research purposes to compare
// again our optimized version.
really_inline void flatten_bits(uint32_t *base_ptr, uint32_t &base, uint32_t idx, uint64_t bits) {
uint32_t *out_ptr = base_ptr + base;
idx -= 64;
while (bits != 0) {
out_ptr[0] = idx + trailing_zeroes(bits);
bits = bits & (bits - 1);
out_ptr++;
}
base = (out_ptr - base_ptr);
}
class bit_indexer {
public:
uint32_t *tail;
#else // SIMDJSON_NAIVE_FLATTEN
bit_indexer(uint32_t *index_buf) : tail(index_buf) {}
// flatten out values in 'bits' assuming that they are are to have values of idx
// plus their position in the bitvector, and store these indexes at
// base_ptr[base] incrementing base as we go
// will potentially store extra values beyond end of valid bits, so base_ptr
// needs to be large enough to handle this
really_inline void flatten_bits(uint32_t *&base_ptr, uint32_t idx, uint64_t bits) {
really_inline void write_indexes(uint32_t idx, uint64_t bits) {
// In some instances, the next branch is expensive because it is mispredicted.
// Unfortunately, in other cases,
// it helps tremendously.
if (bits == 0)
return;
uint32_t cnt = hamming(bits);
idx -= 64;
// Do the first 8 all together
for (int i=0; i<8; i++) {
base_ptr[i] = idx + trailing_zeroes(bits);
bits = bits & (bits - 1);
this->tail[i] = idx + trailing_zeroes(bits);
bits = clear_lowest_bit(bits);
}
// Do the next 8 all together (we hope in most cases it won't happen at all
// and the branch is easily predicted).
if (unlikely(cnt > 8)) {
for (int i=8; i<16; i++) {
base_ptr[i] = idx + trailing_zeroes(bits);
bits = bits & (bits - 1);
this->tail[i] = idx + trailing_zeroes(bits);
bits = clear_lowest_bit(bits);
}
// Most files don't have 16+ structurals per block, so we take several basically guaranteed
// branch mispredictions here. 16+ structurals per block means either punctuation ({} [] , :)
// or the start of a value ("abc" true 123) every 4 characters.
// or the start of a value ("abc" true 123) every four characters.
if (unlikely(cnt > 16)) {
uint32_t i = 16;
do {
base_ptr[i] = idx + trailing_zeroes(bits);
bits = bits & (bits - 1);
this->tail[i] = idx + trailing_zeroes(bits);
bits = clear_lowest_bit(bits);
i++;
} while (i < cnt);
}
}
base_ptr += cnt;
this->tail += cnt;
}
#endif // SIMDJSON_NAIVE_FLATTEN
// This file contains the common code every implementation uses in stage1
// It is intended to be included multiple times and compiled multiple times
// We assume the file in which it is included already includes
// "simdjson/stage1_find_marks.h" (this simplifies amalgation)
};
class json_structural_scanner {
public:
// Whether the first character of the next iteration is escaped.
uint64_t prev_escaped = 0ULL;
// Whether the last iteration was still inside a string (all 1's = true, all 0's = false).
uint64_t prev_in_string = 0ULL;
// Whether the last character of the previous iteration is a primitive value character
// (anything except whitespace, braces, comma or colon).
uint64_t prev_primitive = 0ULL;
// Mask of structural characters from the last iteration.
// Kept around for performance reasons, so we can call flatten_bits to soak up some unused
// CPU capacity while the next iteration is busy with an expensive clmul in compute_quote_mask.
uint64_t prev_structurals = 0;
// Errors with unescaped characters in strings (ASCII codepoints < 0x20)
uint64_t unescaped_chars_error = 0;
bit_indexer structural_indexes;
json_structural_scanner(uint32_t *_structural_indexes) : structural_indexes{_structural_indexes} {}
// return a bitvector indicating where we have characters that end an odd-length
// sequence of backslashes (and thus change the behavior of the next character
@ -38402,9 +38399,7 @@ really_inline uint64_t follows(const uint64_t match, const uint64_t filler, uint
return result;
}
really_inline ErrorValues detect_errors_on_eof(
uint64_t &unescaped_chars_error,
const uint64_t prev_in_string) {
really_inline ErrorValues detect_errors_on_eof() {
if (prev_in_string) {
return UNCLOSED_STRING;
}
@ -38422,7 +38417,7 @@ really_inline ErrorValues detect_errors_on_eof(
//
// Backslash sequences outside of quotes will be detected in stage 2.
//
really_inline uint64_t find_strings(const simd_input in, uint64_t &prev_escaped, uint64_t &prev_in_string) {
really_inline uint64_t find_strings(const simd_input in) {
const uint64_t backslash = in.eq('\\');
const uint64_t escaped = follows_odd_sequence_of(backslash, prev_escaped);
const uint64_t quote = in.eq('"') & ~escaped;
@ -38461,7 +38456,7 @@ really_inline uint64_t invalid_string_bytes(const uint64_t unescaped, const uint
// contents of a string the same as content outside. Errors and structurals inside the string or on
// the trailing quote will need to be removed later when the correct string information is known.
//
really_inline uint64_t find_potential_structurals(const simd_input in, uint64_t &prev_primitive) {
really_inline uint64_t find_potential_structurals(const simd_input in) {
// These use SIMD so let's kick them off before running the regular 64-bit stuff ...
uint64_t whitespace, op;
find_whitespace_and_operators(in, whitespace, op);
@ -38476,12 +38471,8 @@ really_inline uint64_t find_potential_structurals(const simd_input in, uint64_t
return op | start_primitive;
}
static const size_t STEP_SIZE = 128;
//
// Find the important bits of JSON in a 128-byte chunk, and add them to :
//
//
// Find the important bits of JSON in a 128-byte chunk, and add them to structural_indexes.
//
// PERF NOTES:
// We pipe 2 inputs through these stages:
@ -38499,13 +38490,7 @@ static const size_t STEP_SIZE = 128;
// available capacity with just one input. Running 2 at a time seems to give the CPU a good enough
// workout.
//
really_inline void find_structural_bits_128(
const uint8_t *buf, const size_t idx, uint32_t *&base_ptr,
uint64_t &prev_escaped, uint64_t &prev_in_string,
uint64_t &prev_primitive,
uint64_t &prev_structurals,
uint64_t &unescaped_chars_error,
utf8_checker &utf8_state) {
really_inline void scan_step(const uint8_t *buf, const size_t idx, utf8_checker &utf8_checker) {
//
// Load up all 128 bytes into SIMD registers
//
@ -38518,10 +38503,10 @@ really_inline void find_structural_bits_128(
// This will include false structurals that are *inside* strings--we'll filter strings out
// before we return.
//
uint64_t string_1 = find_strings(in_1, prev_escaped, prev_in_string);
uint64_t structurals_1 = find_potential_structurals(in_1, prev_primitive);
uint64_t string_2 = find_strings(in_2, prev_escaped, prev_in_string);
uint64_t structurals_2 = find_potential_structurals(in_2, prev_primitive);
uint64_t string_1 = this->find_strings(in_1);
uint64_t structurals_1 = this->find_potential_structurals(in_1);
uint64_t string_2 = this->find_strings(in_2);
uint64_t structurals_2 = this->find_potential_structurals(in_2);
//
// Do miscellaneous work while the processor is busy calculating strings and structurals.
@ -38529,49 +38514,24 @@ really_inline void find_structural_bits_128(
// After that, weed out structurals that are inside strings and find invalid string characters.
//
uint64_t unescaped_1 = in_1.lteq(0x1F);
utf8_state.check_next_input(in_1);
flatten_bits(base_ptr, idx, prev_structurals); // Output *last* iteration's structurals to ParsedJson
prev_structurals = structurals_1 & ~string_1;
unescaped_chars_error |= unescaped_1 & string_1;
utf8_checker.check_next_input(in_1);
this->structural_indexes.write_indexes(idx-64, prev_structurals); // Output *last* iteration's structurals to ParsedJson
this->prev_structurals = structurals_1 & ~string_1;
this->unescaped_chars_error |= unescaped_1 & string_1;
uint64_t unescaped_2 = in_2.lteq(0x1F);
utf8_state.check_next_input(in_2);
flatten_bits(base_ptr, idx+64, prev_structurals); // Output *last* iteration's structurals to ParsedJson
prev_structurals = structurals_2 & ~string_2;
unescaped_chars_error |= unescaped_2 & string_2;
utf8_checker.check_next_input(in_2);
this->structural_indexes.write_indexes(idx, prev_structurals); // Output *last* iteration's structurals to ParsedJson
this->prev_structurals = structurals_2 & ~string_2;
this->unescaped_chars_error |= unescaped_2 & string_2;
}
int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &pj) {
if (unlikely(len > pj.byte_capacity)) {
std::cerr << "Your ParsedJson object only supports documents up to "
<< pj.byte_capacity << " bytes but you are trying to process "
<< len << " bytes" << std::endl;
return simdjson::CAPACITY;
}
uint32_t *base_ptr = pj.structural_indexes;
utf8_checker utf8_state;
// Whether the first character of the next iteration is escaped.
uint64_t prev_escaped = 0ULL;
// Whether the last iteration was still inside a string (all 1's = true, all 0's = false).
uint64_t prev_in_string = 0ULL;
// Whether the last character of the previous iteration is a primitive value character
// (anything except whitespace, braces, comma or colon).
uint64_t prev_primitive = 0ULL;
// Mask of structural characters from the last iteration.
// Kept around for performance reasons, so we can call flatten_bits to soak up some unused
// CPU capacity while the next iteration is busy with an expensive clmul in compute_quote_mask.
uint64_t structurals = 0;
really_inline void scan(const uint8_t *buf, const size_t len, utf8_checker &utf8_checker) {
size_t lenminusstep = len < STEP_SIZE ? 0 : len - STEP_SIZE;
size_t idx = 0;
// Errors with unescaped characters in strings (ASCII codepoints < 0x20)
uint64_t unescaped_chars_error = 0;
for (; idx < lenminusstep; idx += STEP_SIZE) {
find_structural_bits_128(&buf[idx], idx, base_ptr,
prev_escaped, prev_in_string, prev_primitive,
structurals, unescaped_chars_error, utf8_state);
this->scan_step(&buf[idx], idx, utf8_checker);
}
/* If we have a final chunk of less than 64 bytes, pad it to 64 with
@ -38581,21 +38541,33 @@ int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &p
uint8_t tmp_buf[STEP_SIZE];
memset(tmp_buf, 0x20, STEP_SIZE);
memcpy(tmp_buf, buf + idx, len - idx);
find_structural_bits_128(&tmp_buf[0], idx, base_ptr,
prev_escaped, prev_in_string, prev_primitive,
structurals, unescaped_chars_error, utf8_state);
this->scan_step(&tmp_buf[0], idx, utf8_checker);
idx += STEP_SIZE;
}
/* finally, flatten out the remaining structurals from the last iteration */
flatten_bits(base_ptr, idx, structurals);
this->structural_indexes.write_indexes(idx-64, this->prev_structurals);
}
simdjson::ErrorValues error = detect_errors_on_eof(unescaped_chars_error, prev_in_string);
};
int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &pj) {
if (unlikely(len > pj.byte_capacity)) {
std::cerr << "Your ParsedJson object only supports documents up to "
<< pj.byte_capacity << " bytes but you are trying to process "
<< len << " bytes" << std::endl;
return simdjson::CAPACITY;
}
utf8_checker utf8_checker{};
json_structural_scanner scanner{pj.structural_indexes};
scanner.scan(buf, len, utf8_checker);
simdjson::ErrorValues error = scanner.detect_errors_on_eof();
if (unlikely(error != simdjson::SUCCESS)) {
return error;
}
pj.n_structural_indexes = base_ptr - pj.structural_indexes;
pj.n_structural_indexes = scanner.structural_indexes.tail - pj.structural_indexes;
/* a valid JSON file cannot have zero structural indexes - we should have
* found something */
if (unlikely(pj.n_structural_indexes == 0u)) {
@ -38611,7 +38583,7 @@ int find_structural_bits(const uint8_t *buf, size_t len, simdjson::ParsedJson &p
}
/* make it safe to dereference one beyond this array */
pj.structural_indexes[pj.n_structural_indexes] = 0;
return utf8_state.errors();
return utf8_checker.errors();
}
} // namespace westmere
@ -38635,7 +38607,7 @@ UNTARGET_REGION
namespace {
// for when clmul is unavailable
[[maybe_unused]] uint64_t portable_compute_quote_mask(uint64_t quote_bits) {
[[maybe_unused]] really_inline uint64_t portable_compute_quote_mask(uint64_t quote_bits) {
uint64_t quote_mask = quote_bits ^ (quote_bits << 1);
quote_mask = quote_mask ^ (quote_mask << 2);
quote_mask = quote_mask ^ (quote_mask << 4);
@ -40990,8 +40962,11 @@ bool ParsedJson::allocate_capacity(size_t len, size_t max_depth) {
uint32_t max_structures = ROUNDUP_N(len, 64) + 2 + 7;
structural_indexes = new (std::nothrow) uint32_t[max_structures];
// a pathological input like "[[[[..." would generate len tape elements, so
// need a capacity of len + 1
size_t local_tape_capacity = ROUNDUP_N(len + 1, 64);
// need a capacity of at least len + 1, but it is also possible to do
// worse with "[7,7,7,7,6,7,7,7,6,7,7,6,[7,7,7,7,6,7,7,7,6,7,7,6,7,7,7,7,7,7,6"
//where len + 1 tape elements are
// generated, see issue https://github.com/lemire/simdjson/issues/345
size_t local_tape_capacity = ROUNDUP_N(len + 2, 64);
// a document with only zero-length strings... could have len/3 string
// and we would need len/3 * 5 bytes on the string buffer
size_t local_string_capacity = ROUNDUP_N(5 * len / 3 + 32, 64);

View File

@ -1,4 +1,4 @@
/* auto-generated on Sun Oct 13 11:11:50 DST 2019. Do not edit! */
/* auto-generated on Sun Nov 3 14:09:55 STD 2019. Do not edit! */
/* begin file include/simdjson/simdjson_version.h */
// /include/simdjson/simdjson_version.h automatically generated by release.py,
// do not change by hand
@ -101,6 +101,10 @@ static inline int trailing_zeroes(uint64_t input_num) {
return static_cast<int>(_tzcnt_u64(input_num));
}
static inline uint64_t clear_lowest_bit(uint64_t input_num) {
return _blsr_u64(input_num);
}
static inline int leading_zeroes(uint64_t input_num) {
return static_cast<int>(_lzcnt_u64(input_num));
}
@ -139,6 +143,15 @@ static inline NO_SANITIZE_UNDEFINED int trailing_zeroes(uint64_t input_num) {
#endif
}
/* result might be undefined when input_num is zero */
static inline uint64_t clear_lowest_bit(uint64_t input_num) {
#ifdef __BMI__ // blsr is BMI1
return _blsr_u64(input_num);
#else
return input_num & (input_num-1);
#endif
}
/* result might be undefined when input_num is zero */
static inline int leading_zeroes(uint64_t input_num) {
#ifdef __BMI2__
@ -1781,6 +1794,9 @@ int json_parse_implementation(const uint8_t *buf, size_t len, ParsedJson &pj,
} // if(realloc_if_needed) {
int stage1_is_ok = simdjson::find_structural_bits<T>(buf, len, pj);
if (stage1_is_ok != simdjson::SUCCESS) {
if (reallocated) { // must free before we exit
aligned_free((void *)buf);
}
pj.error_code = stage1_is_ok;
return pj.error_code;
}

View File

@ -75,7 +75,7 @@ int json_parse_dispatch(const uint8_t *buf, size_t len, ParsedJson &pj,
return json_parse_ptr.load(std::memory_order_relaxed)(buf, len, pj, realloc);
}
std::atomic<json_parse_functype *> json_parse_ptr = &json_parse_dispatch;
std::atomic<json_parse_functype *> json_parse_ptr{&json_parse_dispatch};
WARN_UNUSED
ParsedJson build_parsed_json(const uint8_t *buf, size_t len,

View File

@ -6,6 +6,7 @@
#include "simdjson/portability.h"
#include "jsoncharutils.h"
#include <cmath>
#include <limits>
#ifdef JSON_TEST_NUMBERS // for unit testing
void found_invalid_number(const uint8_t *buf);
@ -15,7 +16,7 @@ void found_float(double result, const uint8_t *buf);
#endif
namespace simdjson {
// Allowable floating-point values range from
// Allowable floating-point values range
// std::numeric_limits<double>::lowest() to std::numeric_limits<double>::max(),
// so from -1.7976e308 all the way to 1.7975e308 in binary64. The lowest
// non-zero normal values is std::numeric_limits<double>::min() or