From 4623fe7ead015652d9d8f2dc6f03203c8d8bb2d6 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Wed, 31 Dec 2025 17:15:39 +0800 Subject: [PATCH 01/34] Fix CoreML backend batched evaluation bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add dedicated mask buffer to fix incorrect mask offset calculation in batched inference. The Swift code assumed mask buffer stride of H*W per batch element, but was receiving spatial input buffer with stride of numInputChannels*H*W, causing batch elements > 0 to read garbage data. Changes: - Add userInputMaskBuffer to InputBuffers with correct stride - Copy first channel of spatial input (mask) to dedicated buffer - Pass mask buffer to Swift instead of reusing spatial buffer Batched winrate error: 19% → 0.037% (now matches single evaluation) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.cpp | 649 ++++++++++++++++++++++++++++++ cpp/neuralnet/coremlbackend.h | 250 ++++++++++++ cpp/neuralnet/coremlbackend.swift | 447 ++++++++++++++++++++ 3 files changed, 1346 insertions(+) create mode 100644 cpp/neuralnet/coremlbackend.cpp create mode 100644 cpp/neuralnet/coremlbackend.h create mode 100644 cpp/neuralnet/coremlbackend.swift diff --git a/cpp/neuralnet/coremlbackend.cpp b/cpp/neuralnet/coremlbackend.cpp new file mode 100644 index 000000000..c58e639c6 --- /dev/null +++ b/cpp/neuralnet/coremlbackend.cpp @@ -0,0 +1,649 @@ +#ifdef USE_COREML_BACKEND + +#include "../neuralnet/modelversion.h" +#include "../neuralnet/nneval.h" +#include "../neuralnet/nninputs.h" +#include "../neuralnet/nninterface.h" +#include "../neuralnet/coremlbackend.h" + +#include + +using namespace std; + +//------------------------------------------------------------------------------ +// LoadedModel implementation +//------------------------------------------------------------------------------ + +LoadedModel::LoadedModel(const string& fileName, const string& expectedSha256) { + modelPath = fileName; + ModelDesc::loadFromFileMaybeGZipped(fileName, modelDesc, expectedSha256); +} + +//------------------------------------------------------------------------------ +// NeuralNet namespace - Global functions +//------------------------------------------------------------------------------ + +void NeuralNet::globalInitialize() { + // No global initialization needed for Core ML +} + +void NeuralNet::globalCleanup() { + // No global cleanup needed for Core ML +} + +LoadedModel* NeuralNet::loadModelFile(const string& file, const string& expectedSha256) { + LoadedModel* loadedModel = new LoadedModel(file, expectedSha256); + return loadedModel; +} + +void NeuralNet::freeLoadedModel(LoadedModel* loadedModel) { + delete loadedModel; +} + +const ModelDesc& NeuralNet::getModelDesc(const LoadedModel* loadedModel) { + return loadedModel->modelDesc; +} + +//------------------------------------------------------------------------------ +// ComputeContext implementation +//------------------------------------------------------------------------------ + +ComputeContext::ComputeContext(int nnX, int nnY, enabled_t useFP16Mode, enabled_t useNHWCMode): +coremlContext(createCoreMLComputeContext(nnX, nnY)) { + this->useFP16Mode = useFP16Mode; + this->nnXLen = nnX; + this->nnYLen = nnY; + (void)useNHWCMode; +} + +ComputeContext::~ComputeContext() { +} + +ComputeContext* NeuralNet::createComputeContext( + const vector& gpuIdxs, + Logger* logger, + int nnXLen, + int nnYLen, + const string& openCLTunerFile, + const string& homeDataDirOverride, + bool openCLReTunePerBoardSize, + enabled_t useFP16Mode, + enabled_t useNHWCMode, + const LoadedModel* loadedModel) { + + (void)gpuIdxs; + (void)logger; + (void)openCLTunerFile; + (void)homeDataDirOverride; + (void)openCLReTunePerBoardSize; + (void)loadedModel; + + return new ComputeContext(nnXLen, nnYLen, useFP16Mode, useNHWCMode); +} + +void NeuralNet::freeComputeContext(ComputeContext* computeContext) { + delete computeContext; +} + +//------------------------------------------------------------------------------ +// ComputeHandle implementation +//------------------------------------------------------------------------------ + +static mutex computeHandleMutex; + +ComputeHandle::ComputeHandle( + ComputeContext* context, + const LoadedModel* loadedModel, + bool inputsUseNHWC, + int gpuIdx, + int serverThreadIdx, + bool requireExactNNLen): +coremlHandle(createCoreMLComputeHandle( + swift::String(loadedModel->modelPath), + swift::String(loadedModel->modelDesc.sha256), + serverThreadIdx, + requireExactNNLen, + loadedModel->modelDesc.numInputChannels, + loadedModel->modelDesc.numInputGlobalChannels, + loadedModel->modelDesc.numInputMetaChannels, + loadedModel->modelDesc.policyHead.p2Conv.outChannels, + loadedModel->modelDesc.numValueChannels, + loadedModel->modelDesc.numScoreValueChannels, + loadedModel->modelDesc.numOwnershipChannels, + context->coremlContext)) { + + const ModelDesc* modelDesc = &loadedModel->modelDesc; + auto coremlContext = context->coremlContext; + + nnXLen = coremlContext.getNnXLen(); + nnYLen = coremlContext.getNnYLen(); + gpuIndex = gpuIdx; + version = modelDesc->modelVersion; + metaEncoderVersion = modelDesc->metaEncoderVersion; + this->inputsUseNHWC = inputsUseNHWC; + this->requireExactNNLen = requireExactNNLen; + + // Core ML uses FP16 by default + useFP16 = (context->useFP16Mode != enabled_t::False); +} + +ComputeHandle::~ComputeHandle() { +} + +ComputeHandle* NeuralNet::createComputeHandle( + ComputeContext* context, + const LoadedModel* loadedModel, + Logger* logger, + int maxBatchSize, + bool requireExactNNLen, + bool inputsUseNHWC, + int gpuIdxForThisThread, + int serverThreadIdx) { + + (void)logger; + (void)maxBatchSize; + + int gpuIdx = (gpuIdxForThisThread == -1) ? 0 : gpuIdxForThisThread; + ComputeHandle* handle = nullptr; + + { + lock_guard lock(computeHandleMutex); + handle = new ComputeHandle(context, loadedModel, inputsUseNHWC, gpuIdx, serverThreadIdx, requireExactNNLen); + } + + return handle; +} + +void NeuralNet::freeComputeHandle(ComputeHandle* handle) { + delete handle; +} + +bool NeuralNet::isUsingFP16(const ComputeHandle* handle) { + return handle->useFP16; +} + +//------------------------------------------------------------------------------ +// Device information +//------------------------------------------------------------------------------ + +void NeuralNet::printDevices() { + printCoreMLDevices(); +} + +//------------------------------------------------------------------------------ +// InputBuffers implementation +//------------------------------------------------------------------------------ + +InputBuffers::InputBuffers(const LoadedModel* loadedModel, int maxBatchSz, int nnXLen, int nnYLen) { + const ModelDesc& m = loadedModel->modelDesc; + + maxBatchSize = maxBatchSz; + policyResultChannels = m.policyHead.p2Conv.outChannels; + + assert(((m.modelVersion < 16) || (policyResultChannels == 4)) && + ((m.modelVersion >= 16) || (m.modelVersion < 12) || (policyResultChannels == 2)) && + ((m.modelVersion >= 12) || (policyResultChannels == 1))); + + singleSpatialElts = (size_t)m.numInputChannels * nnXLen * nnYLen; + singleInputElts = (size_t)m.numInputChannels * nnXLen * nnYLen; + singleInputGlobalElts = (size_t)m.numInputGlobalChannels; + singleInputMetaElts = (size_t)m.numInputMetaChannels; + singlePolicyResultElts = (size_t)(nnXLen * nnYLen); + singlePolicyPassResultElts = 1; + singlePolicyProbsElts = (size_t)((nnXLen * nnYLen) + 1); + singleValueResultElts = (size_t)m.numValueChannels; + singleOwnershipResultElts = (size_t)m.numOwnershipChannels * nnXLen * nnYLen; + singleOwnerMapElts = (size_t)m.numOwnershipChannels * nnXLen * nnYLen; + singleScoreValuesResultElts = (size_t)m.numScoreValueChannels; + singleMaskElts = (size_t)nnXLen * nnYLen; + + assert(NNModelVersion::getNumSpatialFeatures(m.modelVersion) == m.numInputChannels); + assert(NNModelVersion::getNumGlobalFeatures(m.modelVersion) == m.numInputGlobalChannels); + assert(singleValueResultElts == 3); + + rowSpatialBufferElts = (size_t)maxBatchSz * singleSpatialElts; + userInputBufferElts = (size_t)maxBatchSize * singleInputElts; + userInputGlobalBufferElts = (size_t)maxBatchSize * singleInputGlobalElts; + userInputMetaBufferElts = (size_t)maxBatchSize * singleInputMetaElts; + policyResultBufferElts = (size_t)maxBatchSize * singlePolicyResultElts * policyResultChannels; + policyPassResultBufferElts = (size_t)maxBatchSize * singlePolicyPassResultElts * policyResultChannels; + policyProbsBufferElts = (size_t)maxBatchSize * singlePolicyProbsElts * policyResultChannels; + valueResultBufferElts = (size_t)maxBatchSize * singleValueResultElts; + ownershipResultBufferElts = (size_t)maxBatchSize * singleOwnershipResultElts; + ownerMapBufferElts = (size_t)maxBatchSz * singleOwnerMapElts; + scoreValuesResultBufferElts = (size_t)maxBatchSize * singleScoreValuesResultElts; + userInputMaskBufferElts = (size_t)maxBatchSize * singleMaskElts; + + rowSpatialBuffer = new float[rowSpatialBufferElts]; + userInputBuffer = new float[userInputBufferElts]; + memset(&userInputBuffer[0], 0, userInputBufferElts * sizeof(userInputBuffer[0])); + + userInputGlobalBuffer = new float[userInputGlobalBufferElts]; + userInputMetaBuffer = new float[userInputMetaBufferElts]; + policyResults = new float[policyResultBufferElts]; + policyPassResults = new float[policyPassResultBufferElts]; + policyProbsBuffer = new float[policyProbsBufferElts]; + valueResults = new float[valueResultBufferElts]; + ownershipResults = new float[ownershipResultBufferElts]; + ownerMapBuffer = new float[ownerMapBufferElts]; + scoreValuesResults = new float[scoreValuesResultBufferElts]; + userInputMaskBuffer = new float[userInputMaskBufferElts]; + memset(&userInputMaskBuffer[0], 0, userInputMaskBufferElts * sizeof(userInputMaskBuffer[0])); +} + +InputBuffers::~InputBuffers() { + delete[] rowSpatialBuffer; + delete[] userInputBuffer; + delete[] userInputGlobalBuffer; + delete[] userInputMetaBuffer; + delete[] policyResults; + delete[] policyPassResults; + delete[] policyProbsBuffer; + delete[] valueResults; + delete[] ownershipResults; + delete[] ownerMapBuffer; + delete[] scoreValuesResults; + delete[] userInputMaskBuffer; +} + +InputBuffers* NeuralNet::createInputBuffers(const LoadedModel* loadedModel, int maxBatchSize, int nnXLen, int nnYLen) { + return new InputBuffers(loadedModel, maxBatchSize, nnXLen, nnYLen); +} + +void NeuralNet::freeInputBuffers(InputBuffers* inputBuffers) { + delete inputBuffers; +} + +//------------------------------------------------------------------------------ +// CoreMLProcess namespace - Helper functions +//------------------------------------------------------------------------------ + +void CoreMLProcess::copyRowData(float* dest, const float* src, size_t numElements) { + copy(src, src + numElements, dest); +} + +void CoreMLProcess::convertNCHW( + float* rowSpatialInput, + const int C, + const int H, + const int W, + const bool inputsUseNHWC) { + + if ((!inputsUseNHWC) || (C <= 0) || (H <= 0) || (W <= 0)) { + return; + } + + const int totalSize = H * W * C; + + if (totalSize <= 1) + return; + + const int HW = H * W; + + auto get_nchw_target_index = [C, W, HW](int nhwc_index) -> int { + int c = nhwc_index % C; + int temp = nhwc_index / C; + int x = temp % W; + int y = temp / W; + return (c * HW) + (y * W) + x; + }; + + std::vector processed(totalSize, false); + + for (int i = 0; i < totalSize; ++i) { + if (processed[i]) + continue; + + int target_i = get_nchw_target_index(i); + + if (target_i == i) { + processed[i] = true; + continue; + } + + int current_idx = i; + float value_in_hand = rowSpatialInput[i]; + + while (true) { + int target_idx = get_nchw_target_index(current_idx); + float value_at_target = rowSpatialInput[target_idx]; + rowSpatialInput[target_idx] = value_in_hand; + processed[target_idx] = true; + value_in_hand = value_at_target; + current_idx = target_idx; + + if (current_idx == i) + break; + } + } +} + +void CoreMLProcess::processRowData(size_t row, ComputeHandle* gpuHandle, InputBuffers* inputBuffers, NNResultBuf** inputBufs) { + int nnXLen = gpuHandle->nnXLen; + int nnYLen = gpuHandle->nnYLen; + int numSpatialFeatures = NNModelVersion::getNumSpatialFeatures(gpuHandle->version); + + float* rowSpatialInput = &inputBuffers->userInputBuffer[inputBuffers->singleSpatialElts * row]; + float* rowGlobalInput = &inputBuffers->userInputGlobalBuffer[inputBuffers->singleInputGlobalElts * row]; + float* rowMetaInput = &inputBuffers->userInputMetaBuffer[inputBuffers->singleInputMetaElts * row]; + const float* rowGlobal = inputBufs[row]->rowGlobalBuf.data(); + const float* rowSpatial = inputBufs[row]->rowSpatialBuf.data(); + const float* rowMeta = inputBufs[row]->rowMetaBuf.data(); + + CoreMLProcess::copyRowData(rowGlobalInput, rowGlobal, inputBuffers->singleInputGlobalElts); + CoreMLProcess::copyRowData(rowMetaInput, rowMeta, inputBuffers->singleInputMetaElts); + + SymmetryHelpers::copyInputsWithSymmetry( + rowSpatial, + rowSpatialInput, + 1, + nnYLen, + nnXLen, + numSpatialFeatures, + gpuHandle->inputsUseNHWC, + inputBufs[row]->symmetry); + + CoreMLProcess::convertNCHW( + rowSpatialInput, + numSpatialFeatures, + nnYLen, + nnXLen, + gpuHandle->inputsUseNHWC); + + // Copy first channel of spatial input (mask) to dedicated mask buffer + // After NCHW conversion, the first nnXLen*nnYLen elements are the mask channel + float* rowMaskInput = &inputBuffers->userInputMaskBuffer[inputBuffers->singleMaskElts * row]; + copy(rowSpatialInput, rowSpatialInput + inputBuffers->singleMaskElts, rowMaskInput); +} + +float CoreMLProcess::policyOptimismCalc(const double policyOptimism, const float p, const float pOpt) { + return p + ((pOpt - p) * policyOptimism); +} + +void CoreMLProcess::processOptimism( + InputBuffers* inputBuffers, + NNOutput* currentOutput, + const double policyOptimism, + size_t row) { + auto& buffers = *inputBuffers; + const auto singlePolicyResultElts = buffers.singlePolicyResultElts; + float* targetBuffer = &buffers.policyProbsBuffer[row * singlePolicyResultElts]; + float* policyOutputBuf = &buffers.policyResults[row * singlePolicyResultElts * buffers.policyResultChannels]; + + for(size_t i = 0; i < singlePolicyResultElts; ++i) { + const float p = policyOutputBuf[i]; + const float pOpt = policyOutputBuf[i + singlePolicyResultElts]; + targetBuffer[i] = CoreMLProcess::policyOptimismCalc(policyOptimism, p, pOpt); + } + + const auto p = buffers.policyPassResults[row * buffers.policyResultChannels]; + const auto pOpt = buffers.policyPassResults[row * buffers.policyResultChannels + 1]; + currentOutput->policyProbs[buffers.singlePolicyProbsElts - 1] = CoreMLProcess::policyOptimismCalc(policyOptimism, p, pOpt); +} + +void CoreMLProcess::processPolicy( + InputBuffers* inputBuffers, + NNOutput* currentOutput, + const ComputeHandle* gpuHandle, + NNResultBuf* inputBuf, + size_t row) { + auto& buffers = *inputBuffers; + float* targetBuffer = &buffers.policyResults[row * buffers.singlePolicyResultElts * buffers.policyResultChannels]; + const auto policyOptimism = inputBuf->policyOptimism; + + if(buffers.policyResultChannels == 1) { + currentOutput->policyProbs[buffers.singlePolicyProbsElts - 1] = + buffers.policyPassResults[row * buffers.policyResultChannels]; + } else { + CoreMLProcess::processOptimism(inputBuffers, currentOutput, policyOptimism, row); + targetBuffer = &buffers.policyProbsBuffer[row * buffers.singlePolicyResultElts]; + } + + SymmetryHelpers::copyOutputsWithSymmetry( + targetBuffer, currentOutput->policyProbs, 1, gpuHandle->nnYLen, gpuHandle->nnXLen, inputBuf->symmetry); +} + +void CoreMLProcess::processValue( + const InputBuffers* inputBuffers, + NNOutput* currentOutput, + const size_t row) { + const size_t singleValueResultElts = inputBuffers->singleValueResultElts; + assert(singleValueResultElts == 3); + const float* valueOutputBuf = &inputBuffers->valueResults[row * singleValueResultElts]; + currentOutput->whiteWinProb = valueOutputBuf[0]; + currentOutput->whiteLossProb = valueOutputBuf[1]; + currentOutput->whiteNoResultProb = valueOutputBuf[2]; +} + +void CoreMLProcess::processOwnership( + const InputBuffers* inputBuffers, + NNOutput* currentOutput, + const ComputeHandle* gpuHandle, + const int symmetry, + const size_t row) { + const int nnXLen = gpuHandle->nnXLen; + const int nnYLen = gpuHandle->nnYLen; + const size_t singleOwnershipResultElts = inputBuffers->singleOwnershipResultElts; + const size_t ownershipOutputBufOffset = row * singleOwnershipResultElts; + + if(currentOutput->whiteOwnerMap != nullptr) { + const float* ownershipOutputBuf = &inputBuffers->ownershipResults[ownershipOutputBufOffset]; + SymmetryHelpers::copyOutputsWithSymmetry( + ownershipOutputBuf, currentOutput->whiteOwnerMap, 1, nnYLen, nnXLen, symmetry); + } +} + +void CoreMLProcess::processScoreValues( + const InputBuffers* inputBuffers, + NNOutput* currentOutput, + const int modelVersion, + const size_t row) { + const size_t offset = row * inputBuffers->singleScoreValuesResultElts; + const float* currentScoreValueData = &inputBuffers->scoreValuesResults[offset]; + + if(modelVersion >= 9) { + size_t numScoreValueChannels = inputBuffers->singleScoreValuesResultElts; + assert(numScoreValueChannels == 6); + currentOutput->whiteScoreMean = currentScoreValueData[0]; + currentOutput->whiteScoreMeanSq = currentScoreValueData[1]; + currentOutput->whiteLead = currentScoreValueData[2]; + currentOutput->varTimeLeft = currentScoreValueData[3]; + currentOutput->shorttermWinlossError = currentScoreValueData[4]; + currentOutput->shorttermScoreError = currentScoreValueData[5]; + } + else if(modelVersion >= 8) { + size_t numScoreValueChannels = inputBuffers->singleScoreValuesResultElts; + assert(numScoreValueChannels == 4); + currentOutput->whiteScoreMean = currentScoreValueData[0]; + currentOutput->whiteScoreMeanSq = currentScoreValueData[1]; + currentOutput->whiteLead = currentScoreValueData[2]; + currentOutput->varTimeLeft = currentScoreValueData[3]; + currentOutput->shorttermWinlossError = 0; + currentOutput->shorttermScoreError = 0; + } + else if(modelVersion >= 4) { + size_t numScoreValueChannels = inputBuffers->singleScoreValuesResultElts; + assert(numScoreValueChannels == 2); + currentOutput->whiteScoreMean = currentScoreValueData[0]; + currentOutput->whiteScoreMeanSq = currentScoreValueData[1]; + currentOutput->whiteLead = currentOutput->whiteScoreMean; + currentOutput->varTimeLeft = 0; + currentOutput->shorttermWinlossError = 0; + currentOutput->shorttermScoreError = 0; + } + else { + assert(modelVersion >= 3); + size_t numScoreValueChannels = inputBuffers->singleScoreValuesResultElts; + assert(numScoreValueChannels == 1); + currentOutput->whiteScoreMean = currentScoreValueData[0]; + currentOutput->whiteScoreMeanSq = currentOutput->whiteScoreMean * currentOutput->whiteScoreMean; + currentOutput->whiteLead = currentOutput->whiteScoreMean; + currentOutput->varTimeLeft = 0; + currentOutput->shorttermWinlossError = 0; + currentOutput->shorttermScoreError = 0; + } +} + +void CoreMLProcess::processRow( + size_t row, + const ComputeHandle* gpuHandle, + InputBuffers* inputBuffers, + NNResultBuf** inputBufs, + vector& outputs) { + NNOutput* currentOutput = outputs[row]; + assert(currentOutput->nnXLen == gpuHandle->nnXLen); + assert(currentOutput->nnYLen == gpuHandle->nnYLen); + CoreMLProcess::processPolicy(inputBuffers, currentOutput, gpuHandle, inputBufs[row], row); + CoreMLProcess::processValue(inputBuffers, currentOutput, row); + CoreMLProcess::processOwnership(inputBuffers, currentOutput, gpuHandle, inputBufs[row]->symmetry, row); + CoreMLProcess::processScoreValues(inputBuffers, currentOutput, gpuHandle->version, row); +} + +void CoreMLProcess::getCoreMLOutput( + ComputeHandle* gpuHandle, + InputBuffers* inputBuffers, + int numBatchEltsFilled, + NNResultBuf** inputBufs, + vector& outputs) { + assert(numBatchEltsFilled > 0); + + int batchSize = numBatchEltsFilled; + + assert(batchSize <= inputBuffers->maxBatchSize); + assert((NNModelVersion::getNumSpatialFeatures(gpuHandle->version) * gpuHandle->nnXLen * gpuHandle->nnYLen) <= (int)inputBuffers->singleInputElts); + assert(NNModelVersion::getNumGlobalFeatures(gpuHandle->version) == (int)inputBuffers->singleInputGlobalElts); + + if(gpuHandle->metaEncoderVersion > 0) { + assert(SGFMetadata::METADATA_INPUT_NUM_CHANNELS == (int)inputBuffers->singleInputMetaElts); + } + + assert(inputBuffers->singleValueResultElts == 3); + + for(int row = 0; row < batchSize; row++) { + CoreMLProcess::processRowData(row, gpuHandle, inputBuffers, inputBufs); + } + + auto coremlHandle = gpuHandle->coremlHandle; + assert(coremlHandle); + + // Call Core ML inference + // Mask buffer has correct stride (singleMaskElts = H*W per batch element) + // When requireExactNNLen is true, mask operations can be optimized (optimize_identity_mask) + coremlHandle.get().apply( + inputBuffers->userInputBuffer, + inputBuffers->userInputGlobalBuffer, + inputBuffers->userInputMetaBuffer, + inputBuffers->userInputMaskBuffer, // Dedicated mask buffer with correct stride + inputBuffers->policyResults, + inputBuffers->policyPassResults, + inputBuffers->valueResults, + inputBuffers->scoreValuesResults, + inputBuffers->ownershipResults, + batchSize); + + for(int row = 0; row < batchSize; row++) { + CoreMLProcess::processRow(row, gpuHandle, inputBuffers, inputBufs, outputs); + } +} + +void NeuralNet::getOutput( + ComputeHandle* gpuHandle, + InputBuffers* inputBuffers, + int numBatchEltsFilled, + NNResultBuf** inputBufs, + vector& outputs) { + + CoreMLProcess::getCoreMLOutput(gpuHandle, inputBuffers, numBatchEltsFilled, inputBufs, outputs); +} + +//------------------------------------------------------------------------------ +// Test functions - not supported for Core ML backend +//------------------------------------------------------------------------------ + +bool NeuralNet::testEvaluateConv( + const ConvLayerDesc* desc, + int batchSize, + int nnXLen, + int nnYLen, + bool useFP16, + bool useNHWC, + const vector& inputBuffer, + vector& outputBuffer) { + (void)desc; + (void)batchSize; + (void)nnXLen; + (void)nnYLen; + (void)useFP16; + (void)useNHWC; + (void)inputBuffer; + (void)outputBuffer; + return false; +} + +bool NeuralNet::testEvaluateBatchNorm( + const BatchNormLayerDesc* desc, + int batchSize, + int nnXLen, + int nnYLen, + bool useFP16, + bool useNHWC, + const vector& inputBuffer, + const vector& maskBuffer, + vector& outputBuffer) { + (void)desc; + (void)batchSize; + (void)nnXLen; + (void)nnYLen; + (void)useFP16; + (void)useNHWC; + (void)inputBuffer; + (void)maskBuffer; + (void)outputBuffer; + return false; +} + +bool NeuralNet::testEvaluateResidualBlock( + const ResidualBlockDesc* desc, + int batchSize, + int nnXLen, + int nnYLen, + bool useFP16, + bool useNHWC, + const vector& inputBuffer, + const vector& maskBuffer, + vector& outputBuffer) { + (void)desc; + (void)batchSize; + (void)nnXLen; + (void)nnYLen; + (void)useFP16; + (void)useNHWC; + (void)inputBuffer; + (void)maskBuffer; + (void)outputBuffer; + return false; +} + +bool NeuralNet::testEvaluateGlobalPoolingResidualBlock( + const GlobalPoolingResidualBlockDesc* desc, + int batchSize, + int nnXLen, + int nnYLen, + bool useFP16, + bool useNHWC, + const vector& inputBuffer, + const vector& maskBuffer, + vector& outputBuffer) { + (void)desc; + (void)batchSize; + (void)nnXLen; + (void)nnYLen; + (void)useFP16; + (void)useNHWC; + (void)inputBuffer; + (void)maskBuffer; + (void)outputBuffer; + return false; +} + +#endif // USE_COREML_BACKEND diff --git a/cpp/neuralnet/coremlbackend.h b/cpp/neuralnet/coremlbackend.h new file mode 100644 index 000000000..9e201550e --- /dev/null +++ b/cpp/neuralnet/coremlbackend.h @@ -0,0 +1,250 @@ +#pragma once + +#ifdef USE_COREML_BACKEND + +#include +#include "desc.h" +#include "../core/commontypes.h" +#include "../neuralnet/modelversion.h" +#include "../neuralnet/nneval.h" +#include "../neuralnet/nninputs.h" +#include "../neuralnet/nninterface.h" +#include + +using namespace std; +using namespace KataGoCoreML; + +namespace CoreMLProcess { + +void copyRowData(float* dest, const float* src, size_t numElements); +void convertNCHW(float* rowSpatialInput, int C, int H, int W, bool inputsUseNHWC); +void processRowData(size_t row, ComputeHandle* gpuHandle, InputBuffers* inputBuffers, NNResultBuf** inputBufs); +float policyOptimismCalc(const double policyOptimism, const float p, const float pOpt); +void processOptimism(InputBuffers* inputBuffers, NNOutput* currentOutput, const double policyOptimism, size_t row); + +void processPolicy(InputBuffers* inputBuffers, + NNOutput* currentOutput, + const ComputeHandle* gpuHandle, + NNResultBuf* inputBuf, + size_t row); + +void processValue(const InputBuffers* inputBuffers, NNOutput* currentOutput, const size_t row); + +void processOwnership(const InputBuffers* inputBuffers, + NNOutput* currentOutput, + const ComputeHandle* gpuHandle, + const int symmetry, + const size_t row); + +void +processScoreValues(const InputBuffers* inputBuffers, NNOutput* currentOutput, const int modelVersion, const size_t row); + +void processRow(size_t row, + const ComputeHandle* gpuHandle, + InputBuffers* inputBuffers, + NNResultBuf** inputBufs, + vector& outputs); + +void getCoreMLOutput(ComputeHandle* gpuHandle, + InputBuffers* inputBuffers, + int numBatchEltsFilled, + NNResultBuf** inputBufs, + vector& outputs); +}; + +/** + * @brief Represents a loaded neural network model. + * A LoadedModel object contains a ModelDesc object that describes the characteristics of the loaded model. + * For Core ML backend, we also store the model path for on-demand conversion. + */ +struct LoadedModel { + /** + * @brief The description of the loaded model. + */ + ModelDesc modelDesc; + + /** + * @brief Path to the original .bin.gz model file for conversion. + */ + string modelPath; + + /** + * @brief Construct a new Loaded Model object + * This constructor loads a machine learning model from a file and sets the modelDesc field. + * @param fileName The name of the file containing the machine learning model. + * @param expectedSha256 The expected SHA-256 hash of the model file. + */ + LoadedModel(const string& fileName, const string& expectedSha256); + + LoadedModel() = delete; + LoadedModel(const LoadedModel&) = delete; + LoadedModel& operator=(const LoadedModel&) = delete; +}; + +/** + * @brief Context for computing neural network operations using Core ML. + * Contains global configuration settings for neural network computations. + */ +struct ComputeContext { + /** + * @brief Whether to use FP16 mode for computations. + */ + enabled_t useFP16Mode; + + /** + * @brief The width of the neural network input. + */ + int nnXLen; + + /** + * @brief The height of the neural network input. + */ + int nnYLen; + + /** + * @brief Core ML compute context instance from Swift. + */ + CoreMLComputeContext coremlContext; + + /** + * @brief Constructs a ComputeContext object. + * @param nnX The width of the input tensor. + * @param nnY The height of the input tensor. + * @param useFP16Mode Whether to use half-precision floating-point (FP16) mode. + * @param useNHWCMode Whether to use the NHWC format for input tensors. + */ + ComputeContext(int nnX, int nnY, enabled_t useFP16Mode, enabled_t useNHWCMode); + + ~ComputeContext(); + ComputeContext() = delete; + ComputeContext(const ComputeContext&) = delete; + ComputeContext& operator=(const ComputeContext&) = delete; +}; + +/** + * @brief A handle for performing neural network computations using Core ML. + * This struct represents a per-thread handle for computing neural network operations. + */ +struct ComputeHandle { + /** + * @brief The x length of the neural network. + */ + int nnXLen; + + /** + * @brief The y length of the neural network. + */ + int nnYLen; + + /** + * @brief The index of the GPU to use for computation. + */ + int gpuIndex; + + /** + * @brief The version of the loaded model. + */ + int version; + + /** + * @brief The version of the metadata encoder. + */ + int metaEncoderVersion; + + /** + * @brief Whether the input data uses NHWC format. + */ + bool inputsUseNHWC; + + /** + * @brief Whether to use 16-bit floating-point precision. + */ + bool useFP16; + + /** + * @brief Whether exact neural net length is required (enables mask optimization). + */ + bool requireExactNNLen; + + /** + * @brief The Core ML handle instance from Swift. + */ + swift::Optional coremlHandle; + + /** + * @brief Construct a new ComputeHandle object. + * @param context The ComputeContext object to use for computation. + * @param loadedModel A pointer to the LoadedModel object. + * @param inputsUseNHWC Whether the input data uses NHWC format. + * @param gpuIdx The index of the GPU to use. + * @param serverThreadIdx The index of the server thread. + * @param requireExactNNLen Whether exact NN length is required. + */ + ComputeHandle( + ComputeContext* context, + const LoadedModel* loadedModel, + bool inputsUseNHWC, + int gpuIdx, + int serverThreadIdx, + bool requireExactNNLen); + + ~ComputeHandle(); + ComputeHandle() = delete; + ComputeHandle(const ComputeHandle&) = delete; + ComputeHandle& operator=(const ComputeHandle&) = delete; +}; + +/** + * @brief Input and output buffers for neural network inference. + */ +struct InputBuffers { + int maxBatchSize; + size_t policyResultChannels; + + size_t singleSpatialElts; + size_t singleInputElts; + size_t singleInputGlobalElts; + size_t singleInputMetaElts; + size_t singlePolicyResultElts; + size_t singlePolicyPassResultElts; + size_t singlePolicyProbsElts; + size_t singleValueResultElts; + size_t singleOwnershipResultElts; + size_t singleOwnerMapElts; + size_t singleScoreValuesResultElts; + size_t singleMaskElts; + + size_t rowSpatialBufferElts; + size_t userInputBufferElts; + size_t userInputGlobalBufferElts; + size_t userInputMetaBufferElts; + size_t policyResultBufferElts; + size_t policyPassResultBufferElts; + size_t policyProbsBufferElts; + size_t valueResultBufferElts; + size_t ownershipResultBufferElts; + size_t ownerMapBufferElts; + size_t scoreValuesResultBufferElts; + size_t userInputMaskBufferElts; + + float* rowSpatialBuffer; + float* userInputBuffer; + float* userInputGlobalBuffer; + float* userInputMetaBuffer; + float* policyResults; + float* policyPassResults; + float* policyProbsBuffer; + float* valueResults; + float* ownershipResults; + float* ownerMapBuffer; + float* scoreValuesResults; + float* userInputMaskBuffer; + + InputBuffers(const LoadedModel* loadedModel, int maxBatchSz, int nnXLen, int nnYLen); + ~InputBuffers(); + InputBuffers() = delete; + InputBuffers(const InputBuffers&) = delete; + InputBuffers& operator=(const InputBuffers&) = delete; +}; + +#endif // USE_COREML_BACKEND diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift new file mode 100644 index 000000000..c0412272c --- /dev/null +++ b/cpp/neuralnet/coremlbackend.swift @@ -0,0 +1,447 @@ +import Foundation +import CoreML + +/// A class that handles output to standard error. +class StandardError: TextOutputStream { + func write(_ string: String) { + try? FileHandle.standardError.write(contentsOf: Data(string.utf8)) + } +} + +/// Print to standard error +func printError(_ item: Any) { + var instance = StandardError() + print(item, to: &instance) +} + +/// Manages caching of converted Core ML models +struct ModelCacheManager { + /// Cache directory using documents directory + static var cacheDirectory: URL { + FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] + .appendingPathComponent("KataGo") + .appendingPathComponent("CoreMLModels") + } + + /// Generate cache key from model SHA256 hash and board dimensions + static func cacheKey(modelSHA256: String, boardX: Int32, boardY: Int32, + eliminateMask: Bool) -> String { + return "\(modelSHA256)_\(boardX)x\(boardY)_\(eliminateMask ? "nomask" : "mask")" + } + + /// Get cached model path if it exists + static func getCachedModelPath(key: String) -> URL? { + let path = cacheDirectory.appendingPathComponent("\(key).mlpackage") + return FileManager.default.fileExists(atPath: path.path) ? path : nil + } + + /// Get destination path for new cached model + static func getModelCachePath(key: String) -> URL { + try? FileManager.default.createDirectory( + at: cacheDirectory, + withIntermediateDirectories: true) + return cacheDirectory.appendingPathComponent("\(key).mlpackage") + } +} + +/// Converts KataGo .bin.gz model to Core ML .mlpackage using Python +struct CoreMLConverter { + + enum ConversionError: Error { + case pythonNotFound + case conversionFailed(String) + case modelLoadFailed(String) + } + + /// Convert model using KataGoCoremltools + static func convert( + modelPath: String, + outputPath: URL, + boardXSize: Int32, + boardYSize: Int32, + optimizeIdentityMask: Bool + ) throws { + // Escape the paths for Python string + let escapedModelPath = modelPath.replacingOccurrences(of: "'", with: "\\'") + let escapedOutputPath = outputPath.path.replacingOccurrences(of: "'", with: "\\'") + + let pythonScript = """ + import coremltools as ct + mlmodel = ct.converters.katago.convert( + '\(escapedModelPath)', + board_x_size=\(boardXSize), + board_y_size=\(boardYSize), + minimum_deployment_target=ct.target.iOS18, + compute_precision=ct.precision.FLOAT16, + optimize_identity_mask=\(optimizeIdentityMask ? "True" : "False") + ) + mlmodel.save('\(escapedOutputPath)') + """ + + let process = Process() + process.executableURL = URL(fileURLWithPath: "/Users/chinchangyang/Code/KataGoCoremltools/envs/KataGoCoremltools-py3.11/bin/python") + process.arguments = ["-c", pythonScript] + + let errorPipe = Pipe() + let outputPipe = Pipe() + process.standardError = errorPipe + process.standardOutput = outputPipe + + do { + try process.run() + } catch { + throw ConversionError.pythonNotFound + } + + process.waitUntilExit() + + if process.terminationStatus != 0 { + let errorData = errorPipe.fileHandleForReading.readDataToEndOfFile() + let errorString = String(data: errorData, encoding: .utf8) ?? "Unknown error" + throw ConversionError.conversionFailed(errorString) + } + } +} + +/// Context storing board dimensions and settings +public class CoreMLComputeContext { + public let nnXLen: Int32 + public let nnYLen: Int32 + + init(nnXLen: Int32, nnYLen: Int32) { + self.nnXLen = nnXLen + self.nnYLen = nnYLen + } +} + +/// Create a Core ML compute context +public func createCoreMLComputeContext( + nnXLen: Int32, + nnYLen: Int32 +) -> CoreMLComputeContext { + return CoreMLComputeContext(nnXLen: nnXLen, nnYLen: nnYLen) +} + +/// Handle that wraps the loaded MLModel for inference +public class CoreMLComputeHandle { + let model: MLModel + let nnXLen: Int32 + let nnYLen: Int32 + let optimizeIdentityMask: Bool + let numInputChannels: Int + let numInputGlobalChannels: Int + let numInputMetaChannels: Int + let numPolicyChannels: Int + let numValueChannels: Int + let numScoreValueChannels: Int + let numOwnershipChannels: Int + + /// Model input/output names matching KataGoCoremltools output + struct IONames { + static let spatialInput = "spatial_input" + static let globalInput = "global_input" + static let inputMask = "input_mask" + static let metaInput = "meta_input" + + static let policyOutput = "policy_p2_conv" + static let policyPassOutput = "policy_pass_mul2" + static let valueOutput = "value_v3_bias" + static let ownershipOutput = "value_ownership_conv" + static let scoreValueOutput = "value_sv3_bias" + } + + init(model: MLModel, nnXLen: Int32, nnYLen: Int32, + optimizeIdentityMask: Bool, + numInputChannels: Int, + numInputGlobalChannels: Int, + numInputMetaChannels: Int, + numPolicyChannels: Int, + numValueChannels: Int, + numScoreValueChannels: Int, + numOwnershipChannels: Int) { + self.model = model + self.nnXLen = nnXLen + self.nnYLen = nnYLen + self.optimizeIdentityMask = optimizeIdentityMask + self.numInputChannels = numInputChannels + self.numInputGlobalChannels = numInputGlobalChannels + self.numInputMetaChannels = numInputMetaChannels + self.numPolicyChannels = numPolicyChannels + self.numValueChannels = numValueChannels + self.numScoreValueChannels = numScoreValueChannels + self.numOwnershipChannels = numOwnershipChannels + } + + /// Run inference on a batch of inputs + public func apply( + spatialInput: UnsafeMutablePointer, + globalInput: UnsafeMutablePointer, + metaInput: UnsafeMutablePointer, + maskInput: UnsafeMutablePointer, + policy: UnsafeMutablePointer, + policyPass: UnsafeMutablePointer, + value: UnsafeMutablePointer, + scoreValue: UnsafeMutablePointer, + ownership: UnsafeMutablePointer, + batchSize: Int + ) { + autoreleasepool { + // Process batch elements sequentially (Core ML optimized for batch=1) + for b in 0.., + globalInput: UnsafeMutablePointer, + metaInput: UnsafeMutablePointer, + maskInput: UnsafeMutablePointer, + policy: UnsafeMutablePointer, + policyPass: UnsafeMutablePointer, + value: UnsafeMutablePointer, + scoreValue: UnsafeMutablePointer, + ownership: UnsafeMutablePointer + ) throws { + let spatialSize = Int(nnXLen) * Int(nnYLen) * numInputChannels + let spatialOffset = batchIndex * spatialSize + + // Create MLMultiArray for spatial input (1, C, H, W) + let spatialArray = try MLMultiArray( + shape: [1, NSNumber(value: numInputChannels), + NSNumber(value: nnYLen), NSNumber(value: nnXLen)], + dataType: .float32) + + // Copy spatial data + let spatialPtr = spatialArray.dataPointer.assumingMemoryBound(to: Float32.self) + for i in 0.. 0 { + let metaArray = try MLMultiArray( + shape: [1, NSNumber(value: numInputMetaChannels)], + dataType: .float32) + let metaPtr = metaArray.dataPointer.assumingMemoryBound(to: Float32.self) + let metaOffset = batchIndex * numInputMetaChannels + for i in 0.., + policyPass: UnsafeMutablePointer, + value: UnsafeMutablePointer, + scoreValue: UnsafeMutablePointer, + ownership: UnsafeMutablePointer + ) { + // Extract policy output (1, policyChannels, H, W) + if let policyArray = prediction.featureValue(for: IONames.policyOutput)?.multiArrayValue { + let policySize = Int(nnXLen) * Int(nnYLen) * numPolicyChannels + let policyPtr = policyArray.dataPointer.assumingMemoryBound(to: Float32.self) + let policyOffset = batchIndex * policySize + for i in 0.. CoreMLComputeHandle? { + + // TODO: Enable mask optimization to test ~6.5% speedup + let optimizeMask = requireExactNNLen // When true: skips internal mask operations (~6.5% speedup) + // let optimizeMask = false // Set to true to skip internal mask operations (input still required) + let cacheKey = ModelCacheManager.cacheKey( + modelSHA256: modelSHA256, + boardX: context.nnXLen, + boardY: context.nnYLen, + eliminateMask: optimizeMask) + + // Check cache first + var mlpackagePath: URL + if let cachedPath = ModelCacheManager.getCachedModelPath(key: cacheKey) { + printError("Core ML backend \(serverThreadIdx): Using cached model at \(cachedPath.path)") + mlpackagePath = cachedPath + } else { + // Convert model + mlpackagePath = ModelCacheManager.getModelCachePath(key: cacheKey) + printError("Core ML backend \(serverThreadIdx): Converting model to \(mlpackagePath.path)") + + do { + try CoreMLConverter.convert( + modelPath: modelPath, + outputPath: mlpackagePath, + boardXSize: context.nnXLen, + boardYSize: context.nnYLen, + optimizeIdentityMask: optimizeMask + ) + printError("Core ML backend \(serverThreadIdx): Conversion completed successfully") + } catch CoreMLConverter.ConversionError.pythonNotFound { + printError("Core ML backend: Python3 not found at /usr/bin/python3") + return nil + } catch CoreMLConverter.ConversionError.conversionFailed(let error) { + printError("Core ML backend: Conversion failed: \(error)") + return nil + } catch { + printError("Core ML backend: Conversion failed: \(error)") + return nil + } + } + + // Load Core ML model + do { + let config = MLModelConfiguration() + config.computeUnits = .all // Use Neural Engine + GPU + CPU + + printError("Core ML backend \(serverThreadIdx): Compiling model...") + let compiledURL = try MLModel.compileModel(at: mlpackagePath) + + printError("Core ML backend \(serverThreadIdx): Loading compiled model...") + let model = try MLModel(contentsOf: compiledURL, configuration: config) + + printError("Core ML backend \(serverThreadIdx): Model loaded successfully, \(context.nnXLen)x\(context.nnYLen)") + + return CoreMLComputeHandle( + model: model, + nnXLen: context.nnXLen, + nnYLen: context.nnYLen, + optimizeIdentityMask: optimizeMask, + numInputChannels: Int(numInputChannels), + numInputGlobalChannels: Int(numInputGlobalChannels), + numInputMetaChannels: Int(numInputMetaChannels), + numPolicyChannels: Int(numPolicyChannels), + numValueChannels: Int(numValueChannels), + numScoreValueChannels: Int(numScoreValueChannels), + numOwnershipChannels: Int(numOwnershipChannels) + ) + } catch { + printError("Core ML backend: Failed to load model: \(error)") + return nil + } +} + +/// Print available Core ML compute units +public func printCoreMLDevices() { + printError("Core ML backend: Using Apple Neural Engine + GPU + CPU acceleration") +} From 6a67e80a3ad0e210c4274cd59e769b13785d69c2 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Wed, 31 Dec 2025 17:17:43 +0800 Subject: [PATCH 02/34] Add Core ML backend support to CMake configuration This update enhances the CMake configuration to support the Core ML backend alongside existing options. Key changes include: - Updated project definition to include Core ML in backend options. - Added necessary checks for Swift compiler version and generator type. - Introduced a new library for Core ML and updated target properties. - Modified output messages to reflect the selected backend during runtime. This integration allows for improved compatibility and functionality when using Core ML for neural network evaluations. --- cpp/CMakeLists.txt | 38 ++++++++++++++++++++++++++++++++++++-- cpp/command/benchmark.cpp | 3 +++ cpp/main.cpp | 4 ++++ cpp/program/setup.cpp | 3 +++ 4 files changed, 46 insertions(+), 2 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 254d23233..c18d2d0bf 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.18.2) -if(USE_BACKEND STREQUAL "METAL") +if(USE_BACKEND STREQUAL "METAL" OR USE_BACKEND STREQUAL "COREML") project(katago LANGUAGES CXX Swift) else() project(katago) @@ -32,7 +32,7 @@ endif() set(BUILD_DISTRIBUTED 0 CACHE BOOL "Build with http support for contributing to distributed training") set(USE_BACKEND CACHE STRING "Neural net backend") string(TOUPPER "${USE_BACKEND}" USE_BACKEND) -set_property(CACHE USE_BACKEND PROPERTY STRINGS "" CUDA TENSORRT OPENCL EIGEN) +set_property(CACHE USE_BACKEND PROPERTY STRINGS "" CUDA TENSORRT OPENCL EIGEN METAL COREML) set(USE_TCMALLOC 0 CACHE BOOL "Use TCMalloc") set(NO_GIT_REVISION 0 CACHE BOOL "Disable embedding the git revision into the compiled exe") @@ -124,6 +124,34 @@ elseif(USE_BACKEND STREQUAL "METAL") set_target_properties(KataGoSwift PROPERTIES Swift_MODULE_NAME "KataGoSwift") target_compile_options(KataGoSwift PUBLIC "$<$:-cxx-interoperability-mode=default>") +elseif(USE_BACKEND STREQUAL "COREML") + message(STATUS "-DUSE_BACKEND=COREML, using Core ML backend.") + if(NOT "${CMAKE_GENERATOR}" STREQUAL "Ninja") + message(FATAL_ERROR "Bidirectional C++ Interop requires Ninja generator. Have ${CMAKE_GENERATOR}") + endif() + if("${CMAKE_Swift_COMPILER_VERSION}" VERSION_LESS 5.9) + message(FATAL_ERROR "Bidirectional C++ Interop requires Swift 5.9 or greater. Have ${CMAKE_Swift_COMPILER_VERSION}") + endif() + if(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") + message(FATAL_ERROR "Project requires building with AppleClang. Have ${CMAKE_CXX_COMPILER_ID}") + endif() + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/external/macos/cmake/modules") + include(InitializeSwift) + include(AddSwift) + set(CMAKE_OSX_DEPLOYMENT_TARGET 13.0) + set(NEURALNET_BACKEND_SOURCES + neuralnet/coremlbackend.cpp + ) + add_library(KataGoCoreML STATIC + neuralnet/coremlbackend.swift) + _swift_generate_cxx_header( + KataGoCoreML + "${CMAKE_CURRENT_BINARY_DIR}/include/KataGoCoreML/KataGoCoreML-swift.h" + SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/neuralnet/coremlbackend.swift") + target_include_directories(KataGoCoreML PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/include") + set_target_properties(KataGoCoreML PROPERTIES Swift_MODULE_NAME "KataGoCoreML") + target_compile_options(KataGoCoreML PUBLIC + "$<$:-cxx-interoperability-mode=default>") elseif(USE_BACKEND STREQUAL "OPENCL") message(STATUS "-DUSE_BACKEND=OPENCL, using OpenCL backend.") set(NEURALNET_BACKEND_SOURCES @@ -403,6 +431,12 @@ elseif(USE_BACKEND STREQUAL "METAL") if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64") message(WARNING "You are currently running cmake on an Intel-based processor. It is known that running KataGo in this configuration may encounter performance issues. It is recommended to switch to a cmake version designed for ARM64 architecture for optimal performance.") endif() +elseif(USE_BACKEND STREQUAL "COREML") + target_compile_definitions(katago PRIVATE USE_COREML_BACKEND) + target_link_libraries(katago KataGoCoreML) + if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64") + message(WARNING "You are currently running cmake on an Intel-based processor. Core ML backend may not work optimally. ARM64 architecture is recommended.") + endif() elseif(USE_BACKEND STREQUAL "OPENCL") target_compile_definitions(katago PRIVATE USE_OPENCL_BACKEND) find_package(OpenCL) diff --git a/cpp/command/benchmark.cpp b/cpp/command/benchmark.cpp index 3100fb1b1..b29b9325b 100644 --- a/cpp/command/benchmark.cpp +++ b/cpp/command/benchmark.cpp @@ -260,6 +260,9 @@ int MainCmds::benchmark(const vector& args) { #ifdef USE_METAL_BACKEND cout << "You are currently using the Metal version of KataGo." << endl; #endif +#ifdef USE_COREML_BACKEND + cout << "You are currently using the Core ML version of KataGo." << endl; +#endif #ifdef USE_OPENCL_BACKEND cout << "You are currently using the OpenCL version of KataGo." << endl; cout << "If you have a strong GPU capable of FP16 tensor cores (e.g. RTX2080), " diff --git a/cpp/main.cpp b/cpp/main.cpp index 0fcc36dea..c15703cd8 100644 --- a/cpp/main.cpp +++ b/cpp/main.cpp @@ -244,6 +244,8 @@ string Version::getKataGoVersionFullInfo() { out << "Using TensorRT backend" << endl; #elif defined(USE_METAL_BACKEND) out << "Using Metal backend" << endl; +#elif defined(USE_COREML_BACKEND) + out << "Using Core ML backend" << endl; #elif defined(USE_OPENCL_BACKEND) out << "Using OpenCL backend" << endl; #elif defined(USE_EIGEN_BACKEND) @@ -280,6 +282,8 @@ string Version::getGitRevisionWithBackend() { s += "-trt"; #elif defined(USE_METAL_BACKEND) s += "-metal"; +#elif defined(USE_COREML_BACKEND) + s += "-coreml"; #elif defined(USE_OPENCL_BACKEND) s += "-opencl"; #elif defined(USE_EIGEN_BACKEND) diff --git a/cpp/program/setup.cpp b/cpp/program/setup.cpp index a31295145..e271b3321 100644 --- a/cpp/program/setup.cpp +++ b/cpp/program/setup.cpp @@ -18,6 +18,7 @@ std::vector Setup::getBackendPrefixes() { prefixes.push_back("cuda"); prefixes.push_back("trt"); prefixes.push_back("metal"); + prefixes.push_back("coreml"); prefixes.push_back("opencl"); prefixes.push_back("eigen"); prefixes.push_back("dummybackend"); @@ -84,6 +85,8 @@ vector Setup::initializeNNEvaluators( string backendPrefix = "trt"; #elif defined(USE_METAL_BACKEND) string backendPrefix = "metal"; + #elif defined(USE_COREML_BACKEND) + string backendPrefix = "coreml"; #elif defined(USE_OPENCL_BACKEND) string backendPrefix = "opencl"; #elif defined(USE_EIGEN_BACKEND) From 1b1d2460e9e9d5df4b242ffcc77e3a685340ef82 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Wed, 31 Dec 2025 17:19:03 +0800 Subject: [PATCH 03/34] Add CoreML backend entry to .gitignore This update adds an entry for the CoreML backend to the .gitignore file, ensuring that generated files related to the CoreML integration are not tracked by Git. This change helps maintain a cleaner repository by excluding unnecessary build artifacts. --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 2e933d553..c4f067cbb 100644 --- a/.gitignore +++ b/.gitignore @@ -90,3 +90,6 @@ cpp/.ninja_log cpp/build.ninja cpp/KataGoSwift.* cpp/include/KataGoSwift/KataGoSwift-swift.h + +# For CoreML Backend +cpp/KataGoCoreML.* From 7e4ef7b34e56cdac92053ddc201d9d722f0ac085 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Wed, 31 Dec 2025 17:33:41 +0800 Subject: [PATCH 04/34] Fix CoreML backend MLMultiArray stride handling bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Core ML may return non-contiguous MLMultiArray outputs after GPU computation, especially for spatial tensors. The previous code used direct dataPointer access with linear indexing, which read data from wrong memory locations when strides were non-contiguous. This fix adds stride-aware extraction that checks MLMultiArray.strides and handles both contiguous (fast path) and non-contiguous (recursive copy) cases. Also fixes hard-coded passChannels=2 to use numPolicyChannels. Before: Policy KL Div ~9.19, Ownership Error ~54c After: Policy KL Div ~0.003, Ownership Error ~0.02c 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.swift | 120 +++++++++++++++++++++++------- 1 file changed, 93 insertions(+), 27 deletions(-) diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index c0412272c..32d5859ce 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -294,6 +294,88 @@ public class CoreMLComputeHandle { ) } + /// Copy MLMultiArray data to destination buffer, respecting strides. + /// Core ML may return non-contiguous arrays, especially for spatial outputs after GPU computation. + private func copyMultiArray( + _ array: MLMultiArray, + to dest: UnsafeMutablePointer, + destOffset: Int + ) { + let shape = array.shape.map { $0.intValue } + let strides = array.strides.map { $0.intValue } + let ptr = array.dataPointer.assumingMemoryBound(to: Float32.self) + let totalElements = shape.reduce(1, *) + + // Check if contiguous (strides match expected for row-major C-order) + var isContiguous = true + var expectedStride = 1 + for i in (0.., + to dest: UnsafeMutablePointer, + destOffset: Int, + shape: [Int], + strides: [Int], + dim: Int, + srcOffset: Int, + destIdx: Int + ) -> Int { + var currentDestIdx = destIdx + + if dim == shape.count - 1 { + // Innermost dimension: copy elements + for i in 0.. ) { // Extract policy output (1, policyChannels, H, W) + // Must use stride-aware copy as Core ML may return non-contiguous arrays if let policyArray = prediction.featureValue(for: IONames.policyOutput)?.multiArrayValue { - let policySize = Int(nnXLen) * Int(nnYLen) * numPolicyChannels - let policyPtr = policyArray.dataPointer.assumingMemoryBound(to: Float32.self) - let policyOffset = batchIndex * policySize - for i in 0.. Date: Wed, 31 Dec 2025 17:38:51 +0800 Subject: [PATCH 05/34] Add KataGoCoreML-swift.h to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c4f067cbb..b509ec1ff 100644 --- a/.gitignore +++ b/.gitignore @@ -93,3 +93,4 @@ cpp/include/KataGoSwift/KataGoSwift-swift.h # For CoreML Backend cpp/KataGoCoreML.* +cpp/include/KataGoCoreML/KataGoCoreML-swift.h From 927af3bba740ce9ce298342fe5699f3d2e6c8256 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Wed, 31 Dec 2025 19:50:56 +0800 Subject: [PATCH 06/34] Fix CoreML backend pass policy output name mismatch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The CoreML model exports pass policy as "policy_pass" but the code was looking for "policy_pass_mul2", causing the pass policy buffer to remain at 0. This resulted in systematically inflated pass move probabilities after softmax (up to 14% error vs reference). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index 32d5859ce..25896cf98 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -144,7 +144,7 @@ public class CoreMLComputeHandle { static let metaInput = "meta_input" static let policyOutput = "policy_p2_conv" - static let policyPassOutput = "policy_pass_mul2" + static let policyPassOutput = "policy_pass" static let valueOutput = "value_v3_bias" static let ownershipOutput = "value_ownership_conv" static let scoreValueOutput = "value_sv3_bias" From 402e8d9020f18fe652fd19e7fcd9620f291213cc Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Thu, 1 Jan 2026 21:32:45 +0800 Subject: [PATCH 07/34] Add configurable FP16/FP32 precision to CoreML backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The CoreML backend now respects the useFP16 config option, allowing users to choose between FP16 (default, faster, uses Neural Engine) and FP32 (higher precision). FP16 has ~0.87% max winrate error while FP32 achieves ~0.0006% by matching the Eigen reference. Cache keys include precision suffix to store FP16 and FP32 models separately. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.cpp | 2 +- cpp/neuralnet/coremlbackend.swift | 28 ++++++++++++++++++---------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/cpp/neuralnet/coremlbackend.cpp b/cpp/neuralnet/coremlbackend.cpp index c58e639c6..9cf3ba62c 100644 --- a/cpp/neuralnet/coremlbackend.cpp +++ b/cpp/neuralnet/coremlbackend.cpp @@ -49,7 +49,7 @@ const ModelDesc& NeuralNet::getModelDesc(const LoadedModel* loadedModel) { //------------------------------------------------------------------------------ ComputeContext::ComputeContext(int nnX, int nnY, enabled_t useFP16Mode, enabled_t useNHWCMode): -coremlContext(createCoreMLComputeContext(nnX, nnY)) { +coremlContext(createCoreMLComputeContext(nnX, nnY, useFP16Mode != enabled_t::False)) { this->useFP16Mode = useFP16Mode; this->nnXLen = nnX; this->nnYLen = nnY; diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index 25896cf98..f13846551 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -23,10 +23,11 @@ struct ModelCacheManager { .appendingPathComponent("CoreMLModels") } - /// Generate cache key from model SHA256 hash and board dimensions + /// Generate cache key from model SHA256 hash, board dimensions, and precision static func cacheKey(modelSHA256: String, boardX: Int32, boardY: Int32, - eliminateMask: Bool) -> String { - return "\(modelSHA256)_\(boardX)x\(boardY)_\(eliminateMask ? "nomask" : "mask")" + eliminateMask: Bool, useFP16: Bool) -> String { + let precisionSuffix = useFP16 ? "fp16" : "fp32" + return "\(modelSHA256)_\(boardX)x\(boardY)_\(precisionSuffix)_\(eliminateMask ? "nomask" : "mask")" } /// Get cached model path if it exists @@ -59,11 +60,13 @@ struct CoreMLConverter { outputPath: URL, boardXSize: Int32, boardYSize: Int32, - optimizeIdentityMask: Bool + optimizeIdentityMask: Bool, + useFP16: Bool ) throws { // Escape the paths for Python string let escapedModelPath = modelPath.replacingOccurrences(of: "'", with: "\\'") let escapedOutputPath = outputPath.path.replacingOccurrences(of: "'", with: "\\'") + let precisionStr = useFP16 ? "ct.precision.FLOAT16" : "ct.precision.FLOAT32" let pythonScript = """ import coremltools as ct @@ -72,7 +75,7 @@ struct CoreMLConverter { board_x_size=\(boardXSize), board_y_size=\(boardYSize), minimum_deployment_target=ct.target.iOS18, - compute_precision=ct.precision.FLOAT16, + compute_precision=\(precisionStr), optimize_identity_mask=\(optimizeIdentityMask ? "True" : "False") ) mlmodel.save('\(escapedOutputPath)') @@ -107,19 +110,22 @@ struct CoreMLConverter { public class CoreMLComputeContext { public let nnXLen: Int32 public let nnYLen: Int32 + public let useFP16: Bool - init(nnXLen: Int32, nnYLen: Int32) { + init(nnXLen: Int32, nnYLen: Int32, useFP16: Bool) { self.nnXLen = nnXLen self.nnYLen = nnYLen + self.useFP16 = useFP16 } } /// Create a Core ML compute context public func createCoreMLComputeContext( nnXLen: Int32, - nnYLen: Int32 + nnYLen: Int32, + useFP16: Bool ) -> CoreMLComputeContext { - return CoreMLComputeContext(nnXLen: nnXLen, nnYLen: nnYLen) + return CoreMLComputeContext(nnXLen: nnXLen, nnYLen: nnYLen, useFP16: useFP16) } /// Handle that wraps the loaded MLModel for inference @@ -442,7 +448,8 @@ public func createCoreMLComputeHandle( modelSHA256: modelSHA256, boardX: context.nnXLen, boardY: context.nnYLen, - eliminateMask: optimizeMask) + eliminateMask: optimizeMask, + useFP16: context.useFP16) // Check cache first var mlpackagePath: URL @@ -460,7 +467,8 @@ public func createCoreMLComputeHandle( outputPath: mlpackagePath, boardXSize: context.nnXLen, boardYSize: context.nnYLen, - optimizeIdentityMask: optimizeMask + optimizeIdentityMask: optimizeMask, + useFP16: context.useFP16 ) printError("Core ML backend \(serverThreadIdx): Conversion completed successfully") } catch CoreMLConverter.ConversionError.pythonNotFound { From 65c27b17c425229752ddfda0e5bf96b02e5dae8d Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Mon, 5 Jan 2026 14:01:29 +0800 Subject: [PATCH 08/34] Replace Python CoreML converter with native katagocoreml library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Eliminate Python dependency for CoreML model conversion by using the native C++ katagocoreml library instead of calling Python subprocess. Changes: - CMakeLists.txt: Add pkg-config detection for katagocoreml library - coremlbackend.cpp: Add CoreMLConversion namespace with native converter wrapper, caching logic, and directory management functions - coremlbackend.swift: Remove CoreMLConverter and ModelCacheManager structs, simplify createCoreMLComputeHandle to only load pre-converted models The native converter uses katagocoreml::KataGoConverter::convert() and caches converted models with a "_native" suffix to distinguish from previously Python-converted models. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- cpp/CMakeLists.txt | 9 +- cpp/neuralnet/coremlbackend.cpp | 171 +++++++++++++++++++++++++++--- cpp/neuralnet/coremlbackend.swift | 142 ++----------------------- 3 files changed, 170 insertions(+), 152 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index c18d2d0bf..9d764eb7b 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -135,6 +135,9 @@ elseif(USE_BACKEND STREQUAL "COREML") if(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") message(FATAL_ERROR "Project requires building with AppleClang. Have ${CMAKE_CXX_COMPILER_ID}") endif() + # Find katagocoreml library for native Core ML model conversion using pkg-config + find_package(PkgConfig REQUIRED) + pkg_check_modules(KATAGOCOREML REQUIRED katagocoreml) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/external/macos/cmake/modules") include(InitializeSwift) include(AddSwift) @@ -433,7 +436,11 @@ elseif(USE_BACKEND STREQUAL "METAL") endif() elseif(USE_BACKEND STREQUAL "COREML") target_compile_definitions(katago PRIVATE USE_COREML_BACKEND) - target_link_libraries(katago KataGoCoreML) + target_include_directories(katago PRIVATE ${KATAGOCOREML_INCLUDE_DIRS}) + # Link katagocoreml with full path and add all required library directories + find_library(KATAGOCOREML_LIB katagocoreml HINTS /usr/local/lib REQUIRED) + target_link_directories(katago PRIVATE ${KATAGOCOREML_LIBRARY_DIRS}) + target_link_libraries(katago KataGoCoreML ${KATAGOCOREML_LIB} ${KATAGOCOREML_LDFLAGS}) if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64") message(WARNING "You are currently running cmake on an Intel-based processor. Core ML backend may not work optimally. ARM64 architecture is recommended.") endif() diff --git a/cpp/neuralnet/coremlbackend.cpp b/cpp/neuralnet/coremlbackend.cpp index 9cf3ba62c..ab5fd17df 100644 --- a/cpp/neuralnet/coremlbackend.cpp +++ b/cpp/neuralnet/coremlbackend.cpp @@ -6,10 +6,123 @@ #include "../neuralnet/nninterface.h" #include "../neuralnet/coremlbackend.h" +#include #include +#include +#include +#include +#include using namespace std; +//------------------------------------------------------------------------------ +// CoreML Model Conversion - Native C++ using katagocoreml library +//------------------------------------------------------------------------------ + +namespace CoreMLConversion { + +// Get cache directory: ~/Documents/KataGo/CoreMLModels/ +static string getCacheDirectory() { + const char* homeDir = getenv("HOME"); + if(homeDir == nullptr) { + struct passwd* pw = getpwuid(getuid()); + homeDir = pw ? pw->pw_dir : "/tmp"; + } + return string(homeDir) + "/Documents/KataGo/CoreMLModels"; +} + +// Create directory if it doesn't exist +static bool createDirectoryIfNeeded(const string& path) { + struct stat st; + if(stat(path.c_str(), &st) == 0) { + return S_ISDIR(st.st_mode); + } + // Create parent directories recursively + size_t pos = path.find_last_of('/'); + if(pos != string::npos && pos > 0) { + createDirectoryIfNeeded(path.substr(0, pos)); + } + return mkdir(path.c_str(), 0755) == 0; +} + +// Check if file/directory exists +static bool fileExists(const string& path) { + struct stat st; + return stat(path.c_str(), &st) == 0; +} + +// Generate cache key with _native suffix to distinguish from Python-converted models +static string generateCacheKey( + const string& modelSHA256, + int boardX, + int boardY, + bool useFP16, + bool optimizeMask +) { + string precisionSuffix = useFP16 ? "fp16" : "fp32"; + string maskSuffix = optimizeMask ? "nomask" : "mask"; + return modelSHA256 + "_" + to_string(boardX) + "x" + to_string(boardY) + + "_" + precisionSuffix + "_" + maskSuffix + "_native"; +} + +// Convert KataGo model to CoreML using native katagocoreml library +static void convertModelToCoreML( + const string& modelPath, + const string& outputPath, + int boardXSize, + int boardYSize, + bool useFP16, + bool optimizeIdentityMask +) { + katagocoreml::ConversionOptions opts; + opts.board_x_size = boardXSize; + opts.board_y_size = boardYSize; + opts.compute_precision = useFP16 ? "FLOAT16" : "FLOAT32"; + opts.optimize_identity_mask = optimizeIdentityMask; + opts.specification_version = 8; // iOS 17+ / macOS 14+ + + katagocoreml::KataGoConverter::convert(modelPath, outputPath, opts); +} + +// Ensure model is converted and cached, returns path to .mlpackage +static string ensureModelConverted( + const string& modelPath, + const string& modelSHA256, + int boardX, + int boardY, + bool useFP16, + bool optimizeMask, + int serverThreadIdx +) { + string cacheDir = getCacheDirectory(); + string cacheKey = generateCacheKey(modelSHA256, boardX, boardY, useFP16, optimizeMask); + string mlpackagePath = cacheDir + "/" + cacheKey + ".mlpackage"; + + // Check if already cached + if(fileExists(mlpackagePath)) { + cerr << "Core ML backend " << serverThreadIdx << ": Using cached model at " << mlpackagePath << endl; + return mlpackagePath; + } + + // Create cache directory if needed + if(!createDirectoryIfNeeded(cacheDir)) { + throw runtime_error("Failed to create cache directory: " + cacheDir); + } + + // Convert model + cerr << "Core ML backend " << serverThreadIdx << ": Converting model to " << mlpackagePath << endl; + try { + convertModelToCoreML(modelPath, mlpackagePath, boardX, boardY, useFP16, optimizeMask); + cerr << "Core ML backend " << serverThreadIdx << ": Conversion completed successfully" << endl; + } catch(const exception& e) { + throw runtime_error(string("Core ML model conversion failed: ") + e.what()); + } + + return mlpackagePath; +} + +} // namespace CoreMLConversion + //------------------------------------------------------------------------------ // LoadedModel implementation //------------------------------------------------------------------------------ @@ -91,6 +204,47 @@ void NeuralNet::freeComputeContext(ComputeContext* computeContext) { static mutex computeHandleMutex; +// Helper function to convert model and create CoreML handle +// This is needed because Swift Optional doesn't support assignment in C++ +static swift::Optional convertAndCreateHandle( + ComputeContext* context, + const LoadedModel* loadedModel, + bool requireExactNNLen, + int serverThreadIdx +) { + auto coremlContext = context->coremlContext; + int nnXLen = coremlContext.getNnXLen(); + int nnYLen = coremlContext.getNnYLen(); + bool useFP16 = (context->useFP16Mode != enabled_t::False); + bool optimizeMask = requireExactNNLen; + + // Convert model to CoreML format using native katagocoreml library + string coremlModelPath = CoreMLConversion::ensureModelConverted( + loadedModel->modelPath, + loadedModel->modelDesc.sha256, + nnXLen, + nnYLen, + useFP16, + optimizeMask, + serverThreadIdx + ); + + // Load the pre-converted CoreML model + return createCoreMLComputeHandle( + swift::String(coremlModelPath), + serverThreadIdx, + requireExactNNLen, + loadedModel->modelDesc.numInputChannels, + loadedModel->modelDesc.numInputGlobalChannels, + loadedModel->modelDesc.numInputMetaChannels, + loadedModel->modelDesc.policyHead.p2Conv.outChannels, + loadedModel->modelDesc.numValueChannels, + loadedModel->modelDesc.numScoreValueChannels, + loadedModel->modelDesc.numOwnershipChannels, + coremlContext + ); +} + ComputeHandle::ComputeHandle( ComputeContext* context, const LoadedModel* loadedModel, @@ -98,20 +252,7 @@ ComputeHandle::ComputeHandle( int gpuIdx, int serverThreadIdx, bool requireExactNNLen): -coremlHandle(createCoreMLComputeHandle( - swift::String(loadedModel->modelPath), - swift::String(loadedModel->modelDesc.sha256), - serverThreadIdx, - requireExactNNLen, - loadedModel->modelDesc.numInputChannels, - loadedModel->modelDesc.numInputGlobalChannels, - loadedModel->modelDesc.numInputMetaChannels, - loadedModel->modelDesc.policyHead.p2Conv.outChannels, - loadedModel->modelDesc.numValueChannels, - loadedModel->modelDesc.numScoreValueChannels, - loadedModel->modelDesc.numOwnershipChannels, - context->coremlContext)) { - +coremlHandle(convertAndCreateHandle(context, loadedModel, requireExactNNLen, serverThreadIdx)) { const ModelDesc* modelDesc = &loadedModel->modelDesc; auto coremlContext = context->coremlContext; @@ -122,8 +263,6 @@ coremlHandle(createCoreMLComputeHandle( metaEncoderVersion = modelDesc->metaEncoderVersion; this->inputsUseNHWC = inputsUseNHWC; this->requireExactNNLen = requireExactNNLen; - - // Core ML uses FP16 by default useFP16 = (context->useFP16Mode != enabled_t::False); } diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index f13846551..b899d99ac 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -14,97 +14,8 @@ func printError(_ item: Any) { print(item, to: &instance) } -/// Manages caching of converted Core ML models -struct ModelCacheManager { - /// Cache directory using documents directory - static var cacheDirectory: URL { - FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] - .appendingPathComponent("KataGo") - .appendingPathComponent("CoreMLModels") - } - - /// Generate cache key from model SHA256 hash, board dimensions, and precision - static func cacheKey(modelSHA256: String, boardX: Int32, boardY: Int32, - eliminateMask: Bool, useFP16: Bool) -> String { - let precisionSuffix = useFP16 ? "fp16" : "fp32" - return "\(modelSHA256)_\(boardX)x\(boardY)_\(precisionSuffix)_\(eliminateMask ? "nomask" : "mask")" - } - - /// Get cached model path if it exists - static func getCachedModelPath(key: String) -> URL? { - let path = cacheDirectory.appendingPathComponent("\(key).mlpackage") - return FileManager.default.fileExists(atPath: path.path) ? path : nil - } - - /// Get destination path for new cached model - static func getModelCachePath(key: String) -> URL { - try? FileManager.default.createDirectory( - at: cacheDirectory, - withIntermediateDirectories: true) - return cacheDirectory.appendingPathComponent("\(key).mlpackage") - } -} - -/// Converts KataGo .bin.gz model to Core ML .mlpackage using Python -struct CoreMLConverter { - - enum ConversionError: Error { - case pythonNotFound - case conversionFailed(String) - case modelLoadFailed(String) - } - - /// Convert model using KataGoCoremltools - static func convert( - modelPath: String, - outputPath: URL, - boardXSize: Int32, - boardYSize: Int32, - optimizeIdentityMask: Bool, - useFP16: Bool - ) throws { - // Escape the paths for Python string - let escapedModelPath = modelPath.replacingOccurrences(of: "'", with: "\\'") - let escapedOutputPath = outputPath.path.replacingOccurrences(of: "'", with: "\\'") - let precisionStr = useFP16 ? "ct.precision.FLOAT16" : "ct.precision.FLOAT32" - - let pythonScript = """ - import coremltools as ct - mlmodel = ct.converters.katago.convert( - '\(escapedModelPath)', - board_x_size=\(boardXSize), - board_y_size=\(boardYSize), - minimum_deployment_target=ct.target.iOS18, - compute_precision=\(precisionStr), - optimize_identity_mask=\(optimizeIdentityMask ? "True" : "False") - ) - mlmodel.save('\(escapedOutputPath)') - """ - - let process = Process() - process.executableURL = URL(fileURLWithPath: "/Users/chinchangyang/Code/KataGoCoremltools/envs/KataGoCoremltools-py3.11/bin/python") - process.arguments = ["-c", pythonScript] - - let errorPipe = Pipe() - let outputPipe = Pipe() - process.standardError = errorPipe - process.standardOutput = outputPipe - - do { - try process.run() - } catch { - throw ConversionError.pythonNotFound - } - - process.waitUntilExit() - - if process.terminationStatus != 0 { - let errorData = errorPipe.fileHandleForReading.readDataToEndOfFile() - let errorString = String(data: errorData, encoding: .utf8) ?? "Unknown error" - throw ConversionError.conversionFailed(errorString) - } - } -} +// NOTE: Model caching and conversion are now handled in C++ using the native katagocoreml library. +// The Python-based CoreMLConverter and ModelCacheManager have been removed to eliminate Python dependency. /// Context storing board dimensions and settings public class CoreMLComputeContext { @@ -425,10 +336,10 @@ public class CoreMLComputeHandle { } } -/// Create compute handle - converts model if needed, loads Core ML model +/// Create compute handle - loads pre-converted Core ML model +/// Model conversion is now handled in C++ using the native katagocoreml library public func createCoreMLComputeHandle( - modelPath: String, - modelSHA256: String, + coremlModelPath: String, serverThreadIdx: Int, requireExactNNLen: Bool, numInputChannels: Int32, @@ -441,49 +352,10 @@ public func createCoreMLComputeHandle( context: CoreMLComputeContext ) -> CoreMLComputeHandle? { - // TODO: Enable mask optimization to test ~6.5% speedup let optimizeMask = requireExactNNLen // When true: skips internal mask operations (~6.5% speedup) - // let optimizeMask = false // Set to true to skip internal mask operations (input still required) - let cacheKey = ModelCacheManager.cacheKey( - modelSHA256: modelSHA256, - boardX: context.nnXLen, - boardY: context.nnYLen, - eliminateMask: optimizeMask, - useFP16: context.useFP16) - - // Check cache first - var mlpackagePath: URL - if let cachedPath = ModelCacheManager.getCachedModelPath(key: cacheKey) { - printError("Core ML backend \(serverThreadIdx): Using cached model at \(cachedPath.path)") - mlpackagePath = cachedPath - } else { - // Convert model - mlpackagePath = ModelCacheManager.getModelCachePath(key: cacheKey) - printError("Core ML backend \(serverThreadIdx): Converting model to \(mlpackagePath.path)") - - do { - try CoreMLConverter.convert( - modelPath: modelPath, - outputPath: mlpackagePath, - boardXSize: context.nnXLen, - boardYSize: context.nnYLen, - optimizeIdentityMask: optimizeMask, - useFP16: context.useFP16 - ) - printError("Core ML backend \(serverThreadIdx): Conversion completed successfully") - } catch CoreMLConverter.ConversionError.pythonNotFound { - printError("Core ML backend: Python3 not found at /usr/bin/python3") - return nil - } catch CoreMLConverter.ConversionError.conversionFailed(let error) { - printError("Core ML backend: Conversion failed: \(error)") - return nil - } catch { - printError("Core ML backend: Conversion failed: \(error)") - return nil - } - } + let mlpackagePath = URL(fileURLWithPath: coremlModelPath) - // Load Core ML model + // Load Core ML model (already converted by C++ katagocoreml library) do { let config = MLModelConfiguration() config.computeUnits = .all // Use Neural Engine + GPU + CPU From e6fd90177b800d2a42ffd8eb7ffbb3545b95de96 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Mon, 5 Jan 2026 20:00:37 +0800 Subject: [PATCH 09/34] Add hybrid CoreML + MPSGraph backend for improved throughput MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement a hybrid inference system that runs CoreML on CPU + Neural Engine and MPSGraph on GPU simultaneously, with adaptive batch sizing: - Add mpsgraphlayers.swift: Shared MPSGraph layer implementations - Add HybridComputeHandle: Dispatches work to both backends in parallel - Add ThroughputTracker: Adaptively adjusts batch split ratio using EMA - Parallelize CoreML batch processing with DispatchQueue.concurrentPerform - Optimize data copying with memcpy for inputs and outputs - Clean up CMakeLists.txt: Remove redundant SOURCES from _swift_generate_cxx_header Performance: Achieves 577 nnEvals/s at 16 threads (vs ~374 before), exceeding the 500 nnEvals/s target for CPU+GPU+ANE utilization. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- cpp/CMakeLists.txt | 14 +- cpp/neuralnet/coremlbackend.cpp | 289 +++- cpp/neuralnet/coremlbackend.h | 5 +- cpp/neuralnet/coremlbackend.swift | 500 +++++- cpp/neuralnet/mpsgraphlayers.swift | 2261 ++++++++++++++++++++++++++++ 5 files changed, 3030 insertions(+), 39 deletions(-) create mode 100644 cpp/neuralnet/mpsgraphlayers.swift diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 9d764eb7b..ab2b3dc69 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -118,8 +118,7 @@ elseif(USE_BACKEND STREQUAL "METAL") neuralnet/metalbackend.swift) _swift_generate_cxx_header( KataGoSwift - "${CMAKE_CURRENT_BINARY_DIR}/include/KataGoSwift/KataGoSwift-swift.h" - SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/neuralnet/metalbackend.swift") + "${CMAKE_CURRENT_BINARY_DIR}/include/KataGoSwift/KataGoSwift-swift.h") target_include_directories(KataGoSwift PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/include") set_target_properties(KataGoSwift PROPERTIES Swift_MODULE_NAME "KataGoSwift") target_compile_options(KataGoSwift PUBLIC @@ -146,11 +145,11 @@ elseif(USE_BACKEND STREQUAL "COREML") neuralnet/coremlbackend.cpp ) add_library(KataGoCoreML STATIC - neuralnet/coremlbackend.swift) + neuralnet/coremlbackend.swift + neuralnet/mpsgraphlayers.swift) _swift_generate_cxx_header( KataGoCoreML - "${CMAKE_CURRENT_BINARY_DIR}/include/KataGoCoreML/KataGoCoreML-swift.h" - SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/neuralnet/coremlbackend.swift") + "${CMAKE_CURRENT_BINARY_DIR}/include/KataGoCoreML/KataGoCoreML-swift.h") target_include_directories(KataGoCoreML PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/include") set_target_properties(KataGoCoreML PROPERTIES Swift_MODULE_NAME "KataGoCoreML") target_compile_options(KataGoCoreML PUBLIC @@ -440,7 +439,10 @@ elseif(USE_BACKEND STREQUAL "COREML") # Link katagocoreml with full path and add all required library directories find_library(KATAGOCOREML_LIB katagocoreml HINTS /usr/local/lib REQUIRED) target_link_directories(katago PRIVATE ${KATAGOCOREML_LIBRARY_DIRS}) - target_link_libraries(katago KataGoCoreML ${KATAGOCOREML_LIB} ${KATAGOCOREML_LDFLAGS}) + # Link MetalPerformanceShadersGraph for hybrid MPSGraph + CoreML backend + target_link_libraries(katago KataGoCoreML ${KATAGOCOREML_LIB} ${KATAGOCOREML_LDFLAGS} + "-framework MetalPerformanceShaders" + "-framework MetalPerformanceShadersGraph") if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64") message(WARNING "You are currently running cmake on an Intel-based processor. Core ML backend may not work optimally. ARM64 architecture is recommended.") endif() diff --git a/cpp/neuralnet/coremlbackend.cpp b/cpp/neuralnet/coremlbackend.cpp index ab5fd17df..faf38363b 100644 --- a/cpp/neuralnet/coremlbackend.cpp +++ b/cpp/neuralnet/coremlbackend.cpp @@ -123,6 +123,273 @@ static string ensureModelConverted( } // namespace CoreMLConversion +//------------------------------------------------------------------------------ +// Model Descriptor Conversion - C++ to Swift types for MPSGraph +//------------------------------------------------------------------------------ + +namespace CoreMLProcess { + +/// Converts a ConvLayerDesc instance from C++ to Swift +SWConvLayerDesc convLayerDescToSwift(const ConvLayerDesc* desc) { + return createSWConvLayerDesc( + desc->convYSize, + desc->convXSize, + desc->inChannels, + desc->outChannels, + desc->dilationY, + desc->dilationX, + (float*)desc->weights.data()); +} + +/// Converts a BatchNormLayerDesc instance from C++ to Swift +SWBatchNormLayerDesc batchNormLayerDescToSwift(const BatchNormLayerDesc* desc) { + return createSWBatchNormLayerDesc( + desc->numChannels, + (float*)desc->mergedScale.data(), + (float*)desc->mergedBias.data()); +} + +/// Convert an activation layer description from C++ to Swift +ActivationKind activationLayerDescToSwift(const ActivationLayerDesc* desc) { + switch(desc->activation) { + case ACTIVATION_RELU: + return ActivationKind::relu(); + case ACTIVATION_MISH: + return ActivationKind::mish(); + case ACTIVATION_MISH_SCALE8: + return ActivationKind::identity(); // Metal/CoreML does not use scaled mish + case ACTIVATION_IDENTITY: + return ActivationKind::identity(); + default: + return ActivationKind::identity(); + } +} + +/// Convert a matrix multiplication layer description from C++ to Swift +SWMatMulLayerDesc matMulLayerDescToSwift(const MatMulLayerDesc* desc) { + return createSWMatMulLayerDesc( + desc->inChannels, + desc->outChannels, + (float*)desc->weights.data()); +} + +/// Convert a matrix bias layer description from C++ to Swift +SWMatBiasLayerDesc matBiasLayerDescToSwift(const MatBiasLayerDesc* desc) { + return createSWMatBiasLayerDesc(desc->numChannels, (float*)desc->weights.data()); +} + +/// Convert a residual block description from C++ to Swift +SWResidualBlockDesc residualBlockDescToSwift(const ResidualBlockDesc* desc) { + SWBatchNormLayerDesc preBN = batchNormLayerDescToSwift(&desc->preBN); + ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); + SWConvLayerDesc regularConv = convLayerDescToSwift(&desc->regularConv); + SWBatchNormLayerDesc midBN = batchNormLayerDescToSwift(&desc->midBN); + ActivationKind midActivationKind = activationLayerDescToSwift(&desc->midActivation); + SWConvLayerDesc finalConv = convLayerDescToSwift(&desc->finalConv); + + return createSWResidualBlockDesc( + preBN, + preActivationKind, + regularConv, + midBN, + midActivationKind, + finalConv); +} + +/// Convert a global pooling residual block description from C++ to Swift +SWGlobalPoolingResidualBlockDesc globalPoolingResidualBlockDescToSwift(const GlobalPoolingResidualBlockDesc* desc) { + SWBatchNormLayerDesc preBN = batchNormLayerDescToSwift(&desc->preBN); + ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); + SWConvLayerDesc regularConv = convLayerDescToSwift(&desc->regularConv); + SWConvLayerDesc gpoolConv = convLayerDescToSwift(&desc->gpoolConv); + SWBatchNormLayerDesc gpoolBN = batchNormLayerDescToSwift(&desc->gpoolBN); + ActivationKind gpoolActivationKind = activationLayerDescToSwift(&desc->gpoolActivation); + SWMatMulLayerDesc gpoolToBiasMul = matMulLayerDescToSwift(&desc->gpoolToBiasMul); + SWBatchNormLayerDesc midBN = batchNormLayerDescToSwift(&desc->midBN); + ActivationKind midActivationKind = activationLayerDescToSwift(&desc->midActivation); + SWConvLayerDesc finalConv = convLayerDescToSwift(&desc->finalConv); + + return createSWGlobalPoolingResidualBlockDesc( + preBN, + preActivationKind, + regularConv, + gpoolConv, + gpoolBN, + gpoolActivationKind, + gpoolToBiasMul, + midBN, + midActivationKind, + finalConv); +} + +// Forward declaration for mutual recursion +swift::Array residualBlocksToSwift(const vector>& blocks); + +/// Convert a nested bottleneck residual block description from C++ to Swift +SWNestedBottleneckResidualBlockDesc nestedBottleneckResidualBlockDescToSwift(const NestedBottleneckResidualBlockDesc* desc) { + SWBatchNormLayerDesc preBN = batchNormLayerDescToSwift(&desc->preBN); + ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); + SWConvLayerDesc preConv = convLayerDescToSwift(&desc->preConv); + auto swBlocks = residualBlocksToSwift(desc->blocks); + SWBatchNormLayerDesc postBN = batchNormLayerDescToSwift(&desc->postBN); + ActivationKind postActivationKind = activationLayerDescToSwift(&desc->postActivation); + SWConvLayerDesc postConv = convLayerDescToSwift(&desc->postConv); + + return createSWNestedBottleneckResidualBlockDesc( + preBN, + preActivationKind, + preConv, + swBlocks, + postBN, + postActivationKind, + postConv); +} + +/// Convert residual blocks from C++ to Swift +swift::Array residualBlocksToSwift(const vector>& blocks) { + auto builder = createBlockDescriptorBuilder(); + + for(size_t i = 0; i < blocks.size(); i++) { + void* blockDesc = blocks[i].second.get(); + + if(blocks[i].first == GLOBAL_POOLING_BLOCK_KIND) { + BlockDescriptor descriptor = globalPoolingResidualBlockDescToSwift((GlobalPoolingResidualBlockDesc*)blockDesc); + builder.enque(descriptor); + } else if(blocks[i].first == NESTED_BOTTLENECK_BLOCK_KIND) { + BlockDescriptor descriptor = nestedBottleneckResidualBlockDescToSwift((NestedBottleneckResidualBlockDesc*)blockDesc); + builder.enque(descriptor); + } else { + BlockDescriptor descriptor = residualBlockDescToSwift((ResidualBlockDesc*)blockDesc); + builder.enque(descriptor); + } + } + + return builder.getBlockDescriptors(); +} + +/// Convert a SGF metadata encoder description from C++ to Swift +swift::Optional sGFMetadataEncoderDescToSwift(const SGFMetadataEncoderDesc* desc) { + SWMatMulLayerDesc mul1 = matMulLayerDescToSwift(&desc->mul1); + SWMatBiasLayerDesc bias1 = matBiasLayerDescToSwift(&desc->bias1); + ActivationKind act1 = activationLayerDescToSwift(&desc->act1); + SWMatMulLayerDesc mul2 = matMulLayerDescToSwift(&desc->mul2); + SWMatBiasLayerDesc bias2 = matBiasLayerDescToSwift(&desc->bias2); + ActivationKind act2 = activationLayerDescToSwift(&desc->act2); + SWMatMulLayerDesc mul3 = matMulLayerDescToSwift(&desc->mul3); + + return createSWSGFMetadataEncoderDesc( + desc->metaEncoderVersion, + desc->numInputMetaChannels, + mul1, + bias1, + act1, + mul2, + bias2, + act2, + mul3); +} + +/// Convert a trunk description from C++ to Swift +SWTrunkDesc trunkDescToSwift(const TrunkDesc* trunk) { + SWConvLayerDesc initialConv = convLayerDescToSwift(&trunk->initialConv); + SWMatMulLayerDesc initialMatMul = matMulLayerDescToSwift(&trunk->initialMatMul); + auto sgfMetadataEncoder = sGFMetadataEncoderDescToSwift(&trunk->sgfMetadataEncoder); + auto swBlocks = residualBlocksToSwift(trunk->blocks); + SWBatchNormLayerDesc trunkTipBN = batchNormLayerDescToSwift(&trunk->trunkTipBN); + ActivationKind trunkTipActivation = activationLayerDescToSwift(&trunk->trunkTipActivation); + + return createSWTrunkDesc( + trunk->modelVersion, + trunk->trunkNumChannels, + trunk->midNumChannels, + trunk->regularNumChannels, + trunk->gpoolNumChannels, + initialConv, + initialMatMul, + sgfMetadataEncoder, + swBlocks, + trunkTipBN, + trunkTipActivation); +} + +/// Convert a policy head description from C++ to Swift +SWPolicyHeadDesc policyHeadDescToSwift(const PolicyHeadDesc* policyHead) { + SWConvLayerDesc p1Conv = convLayerDescToSwift(&policyHead->p1Conv); + SWConvLayerDesc g1Conv = convLayerDescToSwift(&policyHead->g1Conv); + SWBatchNormLayerDesc g1BN = batchNormLayerDescToSwift(&policyHead->g1BN); + ActivationKind g1Activation = activationLayerDescToSwift(&policyHead->g1Activation); + SWMatMulLayerDesc gpoolToBiasMul = matMulLayerDescToSwift(&policyHead->gpoolToBiasMul); + SWBatchNormLayerDesc p1BN = batchNormLayerDescToSwift(&policyHead->p1BN); + ActivationKind p1Activation = activationLayerDescToSwift(&policyHead->p1Activation); + SWConvLayerDesc p2Conv = convLayerDescToSwift(&policyHead->p2Conv); + SWMatMulLayerDesc gpoolToPassMul = matMulLayerDescToSwift(&policyHead->gpoolToPassMul); + SWMatBiasLayerDesc gpoolToPassBias = matBiasLayerDescToSwift(&policyHead->gpoolToPassBias); + ActivationKind passActivation = activationLayerDescToSwift(&policyHead->passActivation); + SWMatMulLayerDesc gpoolToPassMul2 = matMulLayerDescToSwift(&policyHead->gpoolToPassMul2); + + return createSWPolicyHeadDesc( + policyHead->modelVersion, + p1Conv, + g1Conv, + g1BN, + g1Activation, + gpoolToBiasMul, + p1BN, + p1Activation, + p2Conv, + gpoolToPassMul, + gpoolToPassBias, + passActivation, + gpoolToPassMul2); +} + +/// Convert a value head description from C++ to Swift +SWValueHeadDesc valueHeadDescToSwift(const ValueHeadDesc* valueHead) { + SWConvLayerDesc v1Conv = convLayerDescToSwift(&valueHead->v1Conv); + SWBatchNormLayerDesc v1BN = batchNormLayerDescToSwift(&valueHead->v1BN); + ActivationKind v1Activation = activationLayerDescToSwift(&valueHead->v1Activation); + SWMatMulLayerDesc v2Mul = matMulLayerDescToSwift(&valueHead->v2Mul); + SWMatBiasLayerDesc v2Bias = matBiasLayerDescToSwift(&valueHead->v2Bias); + ActivationKind v2Activation = activationLayerDescToSwift(&valueHead->v2Activation); + SWMatMulLayerDesc v3Mul = matMulLayerDescToSwift(&valueHead->v3Mul); + SWMatBiasLayerDesc v3Bias = matBiasLayerDescToSwift(&valueHead->v3Bias); + SWMatMulLayerDesc sv3Mul = matMulLayerDescToSwift(&valueHead->sv3Mul); + SWMatBiasLayerDesc sv3Bias = matBiasLayerDescToSwift(&valueHead->sv3Bias); + SWConvLayerDesc vOwnershipConv = convLayerDescToSwift(&valueHead->vOwnershipConv); + + return createSWValueHeadDesc( + valueHead->modelVersion, + v1Conv, + v1BN, + v1Activation, + v2Mul, + v2Bias, + v2Activation, + v3Mul, + v3Bias, + sv3Mul, + sv3Bias, + vOwnershipConv); +} + +/// Convert a model description from C++ to Swift +SWModelDesc modelDescToSwift(const ModelDesc* modelDesc) { + return createSWModelDesc( + modelDesc->modelVersion, + swift::String(modelDesc->name), + modelDesc->numInputChannels, + modelDesc->numInputGlobalChannels, + modelDesc->numInputMetaChannels, + modelDesc->numValueChannels, + modelDesc->numScoreValueChannels, + modelDesc->numOwnershipChannels, + trunkDescToSwift(&modelDesc->trunk), + policyHeadDescToSwift(&modelDesc->policyHead), + valueHeadDescToSwift(&modelDesc->valueHead)); +} + +} // namespace CoreMLProcess + //------------------------------------------------------------------------------ // LoadedModel implementation //------------------------------------------------------------------------------ @@ -204,9 +471,9 @@ void NeuralNet::freeComputeContext(ComputeContext* computeContext) { static mutex computeHandleMutex; -// Helper function to convert model and create CoreML handle +// Helper function to convert model and create hybrid compute handle // This is needed because Swift Optional doesn't support assignment in C++ -static swift::Optional convertAndCreateHandle( +static swift::Optional convertAndCreateHybridHandle( ComputeContext* context, const LoadedModel* loadedModel, bool requireExactNNLen, @@ -229,9 +496,13 @@ static swift::Optional convertAndCreateHandle serverThreadIdx ); - // Load the pre-converted CoreML model - return createCoreMLComputeHandle( + // Convert model descriptor to Swift format for MPSGraph path + SWModelDesc swModelDesc = CoreMLProcess::modelDescToSwift(&loadedModel->modelDesc); + + // Create hybrid compute handle (CoreML on CPU+ANE, MPSGraph on GPU) + return createHybridComputeHandle( swift::String(coremlModelPath), + swModelDesc, serverThreadIdx, requireExactNNLen, loadedModel->modelDesc.numInputChannels, @@ -252,7 +523,7 @@ ComputeHandle::ComputeHandle( int gpuIdx, int serverThreadIdx, bool requireExactNNLen): -coremlHandle(convertAndCreateHandle(context, loadedModel, requireExactNNLen, serverThreadIdx)) { +hybridHandle(convertAndCreateHybridHandle(context, loadedModel, requireExactNNLen, serverThreadIdx)) { const ModelDesc* modelDesc = &loadedModel->modelDesc; auto coremlContext = context->coremlContext; @@ -662,13 +933,13 @@ void CoreMLProcess::getCoreMLOutput( CoreMLProcess::processRowData(row, gpuHandle, inputBuffers, inputBufs); } - auto coremlHandle = gpuHandle->coremlHandle; - assert(coremlHandle); + auto hybridHandle = gpuHandle->hybridHandle; + assert(hybridHandle); - // Call Core ML inference + // Call hybrid inference (CoreML on CPU+ANE, MPSGraph on GPU) // Mask buffer has correct stride (singleMaskElts = H*W per batch element) // When requireExactNNLen is true, mask operations can be optimized (optimize_identity_mask) - coremlHandle.get().apply( + hybridHandle.get().apply( inputBuffers->userInputBuffer, inputBuffers->userInputGlobalBuffer, inputBuffers->userInputMetaBuffer, diff --git a/cpp/neuralnet/coremlbackend.h b/cpp/neuralnet/coremlbackend.h index 9e201550e..af52762cb 100644 --- a/cpp/neuralnet/coremlbackend.h +++ b/cpp/neuralnet/coremlbackend.h @@ -167,9 +167,10 @@ struct ComputeHandle { bool requireExactNNLen; /** - * @brief The Core ML handle instance from Swift. + * @brief The hybrid compute handle instance from Swift. + * This handle dispatches work to both CoreML (CPU+ANE) and MPSGraph (GPU). */ - swift::Optional coremlHandle; + swift::Optional hybridHandle; /** * @brief Construct a new ComputeHandle object. diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index b899d99ac..8c297aaca 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -1,5 +1,7 @@ import Foundation import CoreML +import MetalPerformanceShaders +import MetalPerformanceShadersGraph /// A class that handles output to standard error. class StandardError: TextOutputStream { @@ -102,9 +104,10 @@ public class CoreMLComputeHandle { ownership: UnsafeMutablePointer, batchSize: Int ) { - autoreleasepool { - // Process batch elements sequentially (Core ML optimized for batch=1) - for b in 0...size) // Create global input array (1, C) - rank 2 as expected by converter let globalArray = try MLMultiArray( @@ -158,9 +159,7 @@ public class CoreMLComputeHandle { dataType: .float32) let globalPtr = globalArray.dataPointer.assumingMemoryBound(to: Float32.self) let globalOffset = batchIndex * numInputGlobalChannels - for i in 0...size) // Build feature provider dictionary var inputDict: [String: MLFeatureValue] = [ @@ -177,9 +176,7 @@ public class CoreMLComputeHandle { let maskPtr = maskArray.dataPointer.assumingMemoryBound(to: Float32.self) let maskSize = Int(nnXLen) * Int(nnYLen) let maskOffset = batchIndex * maskSize - for i in 0...size) inputDict[IONames.inputMask] = MLFeatureValue(multiArray: maskArray) // Add meta input if model has it @@ -189,9 +186,7 @@ public class CoreMLComputeHandle { dataType: .float32) let metaPtr = metaArray.dataPointer.assumingMemoryBound(to: Float32.self) let metaOffset = batchIndex * numInputMetaChannels - for i in 0...size) inputDict[IONames.metaInput] = MLFeatureValue(multiArray: metaArray) } @@ -235,10 +230,8 @@ public class CoreMLComputeHandle { } if isContiguous { - // Fast path: direct memcpy-style copy - for i in 0...size) } else { // Slow path: copy with strides (handles non-contiguous layouts) copyWithStrides( @@ -358,7 +351,7 @@ public func createCoreMLComputeHandle( // Load Core ML model (already converted by C++ katagocoreml library) do { let config = MLModelConfiguration() - config.computeUnits = .all // Use Neural Engine + GPU + CPU + config.computeUnits = .cpuAndNeuralEngine // Exclude GPU for hybrid mode printError("Core ML backend \(serverThreadIdx): Compiling model...") let compiledURL = try MLModel.compileModel(at: mlpackagePath) @@ -389,5 +382,468 @@ public func createCoreMLComputeHandle( /// Print available Core ML compute units public func printCoreMLDevices() { - printError("Core ML backend: Using Apple Neural Engine + GPU + CPU acceleration") + printError("Core ML backend: Hybrid mode - CoreML (CPU+ANE) + MPSGraph (GPU)") +} + +// MARK: - Throughput Tracker for Adaptive Batch Sizing + +/// Tracks throughput for CoreML and MPSGraph paths to adaptively adjust batch split ratio +public class ThroughputTracker { + private var coreMLSamplesPerSec: Double = 1.0 + private var mpsGraphSamplesPerSec: Double = 1.0 + private let alpha: Double = 0.3 // EMA smoothing factor (higher = faster adaptation) + private let lock = NSLock() + + /// Update CoreML throughput measurement + public func updateCoreML(samples: Int, duration: TimeInterval) { + guard duration > 0, samples > 0 else { return } + let newRate = Double(samples) / duration + lock.lock() + coreMLSamplesPerSec = alpha * newRate + (1 - alpha) * coreMLSamplesPerSec + lock.unlock() + } + + /// Update MPSGraph throughput measurement + public func updateMPSGraph(samples: Int, duration: TimeInterval) { + guard duration > 0, samples > 0 else { return } + let newRate = Double(samples) / duration + lock.lock() + mpsGraphSamplesPerSec = alpha * newRate + (1 - alpha) * mpsGraphSamplesPerSec + lock.unlock() + } + + /// Get optimal CoreML ratio (0.0 to 1.0) based on measured throughput + public func getOptimalCoreMLRatio() -> Float { + lock.lock() + let total = coreMLSamplesPerSec + mpsGraphSamplesPerSec + let ratio = total > 0 ? Float(coreMLSamplesPerSec / total) : 0.5 + lock.unlock() + return ratio + } + + /// Get current throughput stats for logging + public func getStats() -> (coreML: Double, mpsGraph: Double, ratio: Float) { + lock.lock() + let stats = (coreMLSamplesPerSec, mpsGraphSamplesPerSec, getOptimalCoreMLRatio()) + lock.unlock() + return stats + } +} + +// MARK: - MPSGraph-based Model for GPU Inference + +/// GPU-based model using MPSGraph for inference +public class MPSGraphModelHandle { + let device: MTLDevice + let commandQueue: MTLCommandQueue + let graph: MPSGraph + let nnXLen: Int32 + let nnYLen: Int32 + let numInputChannels: Int + let numInputGlobalChannels: Int + let numInputMetaChannels: Int + let numPolicyChannels: Int + let numValueChannels: Int + let numScoreValueChannels: Int + let numOwnershipChannels: Int + + // Layers + let input: InputLayer + let inputGlobal: InputGlobalLayer + let inputMeta: InputMetaLayer + let mask: MaskLayer + let trunk: Trunk + let policyHead: PolicyHead + let valueHead: ValueHead + let targetTensors: [MPSGraphTensor] + + public init?( + modelDesc: SWModelDesc, + nnXLen: Int32, + nnYLen: Int32 + ) { + guard let device = MTLCreateSystemDefaultDevice() else { + printError("MPSGraph backend: Failed to create Metal device") + return nil + } + + self.device = device + guard let queue = device.makeCommandQueue() else { + printError("MPSGraph backend: Failed to create command queue") + return nil + } + self.commandQueue = queue + self.graph = MPSGraph() + self.nnXLen = nnXLen + self.nnYLen = nnYLen + self.numInputChannels = modelDesc.numInputChannels.intValue + self.numInputGlobalChannels = modelDesc.numInputGlobalChannels.intValue + self.numInputMetaChannels = modelDesc.numInputMetaChannels.intValue + self.numPolicyChannels = 2 // Policy has 2 channels + self.numValueChannels = modelDesc.numValueChannels.intValue + self.numScoreValueChannels = modelDesc.numScoreValueChannels.intValue + self.numOwnershipChannels = modelDesc.numOwnershipChannels.intValue + + let nnXLenNS = nnXLen as NSNumber + let nnYLenNS = nnYLen as NSNumber + + input = InputLayer( + graph: graph, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS, + numChannels: modelDesc.numInputChannels) + + inputGlobal = InputGlobalLayer( + graph: graph, + numGlobalFeatures: modelDesc.numInputGlobalChannels) + + inputMeta = InputMetaLayer( + graph: graph, + numMetaFeatures: modelDesc.numInputMetaChannels) + + mask = MaskLayer( + graph: graph, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + + let maskSum = MaskSumLayer( + graph: graph, + maskTensor: mask.tensor) + + let maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( + graph: graph, + maskSum: maskSum) + + let maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( + graph: graph, + maskSumSqrtS14M01: maskSumSqrtS14M01) + + trunk = Trunk( + graph: graph, + descriptor: modelDesc.trunk, + inputTensor: input.tensor, + inputGlobalTensor: inputGlobal.tensor, + inputMetaTensor: inputMeta.tensor, + maskTensor: mask.tensor, + maskSumTensor: maskSum.tensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + + policyHead = PolicyHead( + graph: graph, + descriptor: modelDesc.policyHead, + sourceTensor: trunk.resultTensor, + maskTensor: mask.tensor, + maskSumTensor: maskSum.tensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + + valueHead = ValueHead( + graph: graph, + descriptor: modelDesc.valueHead, + sourceTensor: trunk.resultTensor, + maskTensor: mask.tensor, + maskSumTensor: maskSum.tensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, + maskSumSqrtS14M01SquareS01Tensor: maskSumSqrtS14M01SquareS01.tensor, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + + targetTensors = [ + policyHead.policyTensor, + policyHead.policyPassTensor, + valueHead.valueTensor, + valueHead.scoreValueTensor, + valueHead.ownershipTensor, + ] + + printError("MPSGraph backend: Initialized on \(device.name)") + } + + /// Run inference on a batch using MPSGraph (GPU) + public func apply( + input inputPointer: UnsafeMutablePointer, + inputGlobal inputGlobalPointer: UnsafeMutablePointer, + inputMeta inputMetaPointer: UnsafeMutablePointer, + policy: UnsafeMutablePointer, + policyPass: UnsafeMutablePointer, + value: UnsafeMutablePointer, + scoreValue: UnsafeMutablePointer, + ownership: UnsafeMutablePointer, + batchSize: Int + ) { + let channelAxis = InputShape.getChannelAxis() + let numInputChannels = input.shape[channelAxis] + let nnXLenNS = nnXLen as NSNumber + let nnYLenNS = nnYLen as NSNumber + + let inputShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: numInputChannels, + nnYLen: nnYLenNS, + nnXLen: nnXLenNS) + + let inputDescriptor = MPSNDArrayDescriptor( + dataType: input.tensor.dataType, + shape: inputShape) + + let inputArray = MPSNDArray( + device: device, + descriptor: inputDescriptor) + + inputArray.writeBytes(inputPointer) + + let numInputGlobalChannels = inputGlobal.shape[channelAxis] + + let inputGlobalShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: numInputGlobalChannels, + nnYLen: 1, + nnXLen: 1) + + let inputGlobalDescriptor = MPSNDArrayDescriptor( + dataType: inputGlobal.tensor.dataType, + shape: inputGlobalShape) + + let inputGlobalArray = MPSNDArray( + device: device, + descriptor: inputGlobalDescriptor) + + inputGlobalArray.writeBytes(inputGlobalPointer) + + let numInputMetaChannels = inputMeta.shape[channelAxis] + + let inputMetaShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: numInputMetaChannels, + nnYLen: 1, + nnXLen: 1) + + let inputMetaDescriptor = MPSNDArrayDescriptor( + dataType: inputMeta.tensor.dataType, + shape: inputMetaShape) + + let inputMetaArray = MPSNDArray( + device: device, + descriptor: inputMetaDescriptor) + + inputMetaArray.writeBytes(inputMetaPointer) + + let maskShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: 1, + nnYLen: nnYLenNS, + nnXLen: nnXLenNS) + + let maskDescriptor = MPSNDArrayDescriptor( + dataType: mask.tensor.dataType, + shape: maskShape) + + let maskArray = MPSNDArray( + device: device, + descriptor: maskDescriptor) + + // Extract mask from first channel of spatial input + var maskStrideArray = [ + MemoryLayout.size, + Int(nnXLen) * MemoryLayout.size, + Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, + numInputChannels.intValue * Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, + ] + + maskArray.writeBytes(inputPointer, strideBytes: &maskStrideArray) + + let feeds = [ + input.tensor: MPSGraphTensorData(inputArray), + inputGlobal.tensor: MPSGraphTensorData(inputGlobalArray), + inputMeta.tensor: MPSGraphTensorData(inputMetaArray), + mask.tensor: MPSGraphTensorData(maskArray), + ] + + let fetch = graph.run( + with: commandQueue, + feeds: feeds, + targetTensors: targetTensors, + targetOperations: nil) + + fetch[policyHead.policyTensor]?.mpsndarray().readBytes(policy) + fetch[policyHead.policyPassTensor]?.mpsndarray().readBytes(policyPass) + fetch[valueHead.valueTensor]?.mpsndarray().readBytes(value) + fetch[valueHead.scoreValueTensor]?.mpsndarray().readBytes(scoreValue) + fetch[valueHead.ownershipTensor]?.mpsndarray().readBytes(ownership) + } +} + +// MARK: - Hybrid Compute Handle + +/// Hybrid compute handle that dispatches to both CoreML (CPU+ANE) and MPSGraph (GPU) +public class HybridComputeHandle { + let coremlHandle: CoreMLComputeHandle + let mpsGraphHandle: MPSGraphModelHandle + let throughputTracker: ThroughputTracker + let coremlQueue: DispatchQueue + let mpsGraphQueue: DispatchQueue + let nnXLen: Int32 + let nnYLen: Int32 + + public init( + coremlHandle: CoreMLComputeHandle, + mpsGraphHandle: MPSGraphModelHandle + ) { + self.coremlHandle = coremlHandle + self.mpsGraphHandle = mpsGraphHandle + self.throughputTracker = ThroughputTracker() + self.coremlQueue = DispatchQueue(label: "com.katago.coreml", qos: .userInitiated) + self.mpsGraphQueue = DispatchQueue(label: "com.katago.mpsgraph", qos: .userInitiated) + self.nnXLen = coremlHandle.nnXLen + self.nnYLen = coremlHandle.nnYLen + } + + /// Run hybrid inference - splits batch between CoreML and MPSGraph + public func apply( + spatialInput: UnsafeMutablePointer, + globalInput: UnsafeMutablePointer, + metaInput: UnsafeMutablePointer, + maskInput: UnsafeMutablePointer, + policy: UnsafeMutablePointer, + policyPass: UnsafeMutablePointer, + value: UnsafeMutablePointer, + scoreValue: UnsafeMutablePointer, + ownership: UnsafeMutablePointer, + batchSize: Int + ) { + // Get optimal split ratio based on throughput + let ratio = throughputTracker.getOptimalCoreMLRatio() + let coreMLBatchSize = max(1, min(batchSize - 1, Int(Float(batchSize) * ratio))) + let mpsGraphBatchSize = batchSize - coreMLBatchSize + + // Calculate buffer offsets + let spatialSize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numInputChannels + let globalSize = coremlHandle.numInputGlobalChannels + let metaSize = coremlHandle.numInputMetaChannels + let maskSize = Int(nnXLen) * Int(nnYLen) + let policySize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numPolicyChannels + let valueSize = coremlHandle.numValueChannels + let scoreValueSize = coremlHandle.numScoreValueChannels + let ownershipSize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numOwnershipChannels + + let group = DispatchGroup() + + // CoreML path (CPU + ANE) + if coreMLBatchSize > 0 { + group.enter() + coremlQueue.async { [self] in + let start = CFAbsoluteTimeGetCurrent() + + coremlHandle.apply( + spatialInput: spatialInput, + globalInput: globalInput, + metaInput: metaInput, + maskInput: maskInput, + policy: policy, + policyPass: policyPass, + value: value, + scoreValue: scoreValue, + ownership: ownership, + batchSize: coreMLBatchSize + ) + + let duration = CFAbsoluteTimeGetCurrent() - start + throughputTracker.updateCoreML(samples: coreMLBatchSize, duration: duration) + group.leave() + } + } + + // MPSGraph path (GPU) + if mpsGraphBatchSize > 0 { + group.enter() + mpsGraphQueue.async { [self] in + let start = CFAbsoluteTimeGetCurrent() + + // Offset pointers for MPSGraph batch portion + let spatialOffset = coreMLBatchSize * spatialSize + let globalOffset = coreMLBatchSize * globalSize + let metaOffset = coreMLBatchSize * metaSize + let policyOffset = coreMLBatchSize * policySize + let valueOffset = coreMLBatchSize * valueSize + let scoreValueOffset = coreMLBatchSize * scoreValueSize + let ownershipOffset = coreMLBatchSize * ownershipSize + + autoreleasepool { + mpsGraphHandle.apply( + input: spatialInput.advanced(by: spatialOffset), + inputGlobal: globalInput.advanced(by: globalOffset), + inputMeta: metaInput.advanced(by: metaOffset), + policy: policy.advanced(by: policyOffset), + policyPass: policyPass.advanced(by: coreMLBatchSize * coremlHandle.numPolicyChannels), + value: value.advanced(by: valueOffset), + scoreValue: scoreValue.advanced(by: scoreValueOffset), + ownership: ownership.advanced(by: ownershipOffset), + batchSize: mpsGraphBatchSize + ) + } + + let duration = CFAbsoluteTimeGetCurrent() - start + throughputTracker.updateMPSGraph(samples: mpsGraphBatchSize, duration: duration) + group.leave() + } + } + + // Wait for both paths to complete + group.wait() + } +} + +/// Create a hybrid compute handle +public func createHybridComputeHandle( + coremlModelPath: String, + modelDesc: SWModelDesc, + serverThreadIdx: Int, + requireExactNNLen: Bool, + numInputChannels: Int32, + numInputGlobalChannels: Int32, + numInputMetaChannels: Int32, + numPolicyChannels: Int32, + numValueChannels: Int32, + numScoreValueChannels: Int32, + numOwnershipChannels: Int32, + context: CoreMLComputeContext +) -> HybridComputeHandle? { + + // Create CoreML handle (CPU + ANE) + guard let coremlHandle = createCoreMLComputeHandle( + coremlModelPath: coremlModelPath, + serverThreadIdx: serverThreadIdx, + requireExactNNLen: requireExactNNLen, + numInputChannels: numInputChannels, + numInputGlobalChannels: numInputGlobalChannels, + numInputMetaChannels: numInputMetaChannels, + numPolicyChannels: numPolicyChannels, + numValueChannels: numValueChannels, + numScoreValueChannels: numScoreValueChannels, + numOwnershipChannels: numOwnershipChannels, + context: context + ) else { + printError("Hybrid backend \(serverThreadIdx): Failed to create CoreML handle") + return nil + } + + // Create MPSGraph handle (GPU) + guard let mpsGraphHandle = MPSGraphModelHandle( + modelDesc: modelDesc, + nnXLen: context.nnXLen, + nnYLen: context.nnYLen + ) else { + printError("Hybrid backend \(serverThreadIdx): Failed to create MPSGraph handle") + return nil + } + + printError("Hybrid backend \(serverThreadIdx): Initialized CoreML (CPU+ANE) + MPSGraph (GPU)") + + return HybridComputeHandle( + coremlHandle: coremlHandle, + mpsGraphHandle: mpsGraphHandle + ) } diff --git a/cpp/neuralnet/mpsgraphlayers.swift b/cpp/neuralnet/mpsgraphlayers.swift new file mode 100644 index 000000000..92e0edf24 --- /dev/null +++ b/cpp/neuralnet/mpsgraphlayers.swift @@ -0,0 +1,2261 @@ +// MPSGraph layer implementations shared between Metal and CoreML backends +// Extracted from metalbackend.swift to enable hybrid CoreML + MPSGraph execution + +import Foundation +import MetalPerformanceShaders +import MetalPerformanceShadersGraph + +// MARK: - Helper Extensions + +/// An extension to the Data struct for handling float data with optional FP16 conversion. +extension Data { + /// Initializes a new Data instance using an UnsafeMutablePointer, with optional conversion to FP16 format. + init( + floatsNoCopy: UnsafeMutablePointer, + shape: [NSNumber] + ) { + self.init( + bytesNoCopy: floatsNoCopy, + count: shape.countBytesOfFloat32(), + deallocator: .none) + } +} + +/// Extension to MPSNDArray to convert from MPSGraphTensor, and to read/write bytes from/to UnsafeMutableRawPointer +extension MPSNDArray { + /// Read bytes from the buffer + func readBytes(_ buffer: UnsafeMutableRawPointer) { + self.readBytes(buffer, strideBytes: nil) + } + + /// Write bytes to the buffer + func writeBytes(_ buffer: UnsafeMutableRawPointer) { + self.writeBytes(buffer, strideBytes: nil) + } +} + +/// Extension to Array to count number of elements and bytes +extension Array where Element == NSNumber { + /// Count number of elements + func countElements() -> Int { + return reduce(1, { $0 * $1.intValue }) + } + + /// Count number of bytes + func countBytesOfFloat32() -> Int { + return countElements() * MemoryLayout.size + } +} + +/// Extension to MPSGraph to the mish activation function +extension MPSGraph { + /// Mish activation: x * tanh(softplus(x)) + func mish(tensor: MPSGraphTensor) -> MPSGraphTensor { + assert(tensor.dataType == .float32) + + let one = 1.0 + let threshold = 20.0 + let thresholdTensor = constant(threshold, dataType: tensor.dataType) + let minimumTensor = minimum(tensor, thresholdTensor, name: nil) + let expTensor = exponent(with: minimumTensor, name: nil) + let oneTensor = constant(one, dataType: tensor.dataType) + let addTensor = addition(expTensor, oneTensor, name: nil) + let logTensor = logarithm(with: addTensor, name: nil) + let lessTensor = lessThan(tensor, thresholdTensor, name: nil) + let selectTensor = select( + predicate: lessTensor, trueTensor: logTensor, falseTensor: tensor, name: nil) + let tanhTensor = tanh(with: selectTensor, name: nil) + let mulTensor = multiplication(tensor, tanhTensor, name: nil) + + return mulTensor + } +} + +// MARK: - Input Shape Utilities + +/// A structure that represents the input shape (internal - not exposed to C++) +struct InputShape { + /// Create a shape for the input tensor + static func create( + batchSize: NSNumber, + numChannels: NSNumber, + nnYLen: NSNumber, + nnXLen: NSNumber + ) -> [NSNumber] { + return [batchSize, numChannels, nnYLen, nnXLen] + } + + /// Get the channel axis + static func getChannelAxis() -> Int { + return 1 + } + + /// Get the HW axes + static func getHWAxes() -> [NSNumber] { + return [2, 3] as [NSNumber] + } +} + +// MARK: - Input Layers + +/// A structure that represents the input layer +struct InputLayer { + let tensor: MPSGraphTensor + let shape: [NSNumber] + + init( + graph: MPSGraph, + nnXLen: NSNumber, + nnYLen: NSNumber, + numChannels: NSNumber, + dataType: MPSDataType = .float32 + ) { + shape = InputShape.create( + batchSize: -1, + numChannels: numChannels, + nnYLen: nnYLen, + nnXLen: nnXLen) + + self.tensor = graph.placeholder( + shape: shape, + dataType: dataType, + name: nil) + + assert(self.tensor.shape?.count == 4) + } +} + +/// A structure that represents an input global layer for a neural network model. +struct InputGlobalLayer { + let tensor: MPSGraphTensor + let shape: [NSNumber] + + init( + graph: MPSGraph, + numGlobalFeatures: NSNumber, + dataType: MPSDataType = .float32 + ) { + shape = InputShape.create( + batchSize: -1, + numChannels: numGlobalFeatures, + nnYLen: 1, + nnXLen: 1) + + self.tensor = graph.placeholder( + shape: shape, + dataType: dataType, + name: nil) + + assert(self.tensor.shape?.count == 4) + } +} + +/// A structure representing the input meta layer for a neural network graph. +struct InputMetaLayer { + let tensor: MPSGraphTensor + let shape: [NSNumber] + + init( + graph: MPSGraph, + numMetaFeatures: NSNumber, + dataType: MPSDataType = .float32 + ) { + shape = InputShape.create( + batchSize: -1, + numChannels: numMetaFeatures, + nnYLen: 1, + nnXLen: 1) + + self.tensor = graph.placeholder( + shape: shape, + dataType: dataType, + name: nil) + } +} + +/// A structure that represents a mask layer for a neural network model. +struct MaskLayer { + let tensor: MPSGraphTensor + let shape: [NSNumber] + + init( + graph: MPSGraph, + nnXLen: NSNumber, + nnYLen: NSNumber, + dataType: MPSDataType = .float32 + ) { + shape = InputShape.create( + batchSize: -1, + numChannels: 1, + nnYLen: nnYLen, + nnXLen: nnXLen) + + self.tensor = graph.placeholder( + shape: shape, + dataType: dataType, + name: nil) + + assert(self.tensor.shape?.count == 4) + } +} + +// MARK: - Mask Processing Layers + +/// A structure that represents a layer which performs the summation operation on a mask layer. +struct MaskSumLayer { + let tensor: MPSGraphTensor + + init(tensor: MPSGraphTensor) { + self.tensor = tensor + assert(self.tensor.shape?.count == 4) + } + + init( + graph: MPSGraph, + maskTensor: MPSGraphTensor + ) { + let hwAxes = InputShape.getHWAxes() + + self.tensor = graph.reductionSum( + with: maskTensor, + axes: hwAxes, + name: nil) + + assert(self.tensor.shape?.count == 4) + } +} + +/// A structure that represents sqrt(maskSum) * 0.1 - 1.4 +struct MaskSumSqrtS14M01Layer { + let tensor: MPSGraphTensor + + init(tensor: MPSGraphTensor) { + self.tensor = tensor + assert(self.tensor.shape?.count == 4) + } + + init( + graph: MPSGraph, + maskSum: MaskSumLayer + ) { + let sqrtMaskSum = graph.squareRoot(with: maskSum.tensor, name: nil) + + let fourTeen = graph.constant( + 14.0, + shape: [1], + dataType: maskSum.tensor.dataType) + + let subtracted = graph.subtraction(sqrtMaskSum, fourTeen, name: nil) + + let zeroPointone = graph.constant( + 0.1, + shape: [1], + dataType: maskSum.tensor.dataType) + + self.tensor = graph.multiplication( + subtracted, + zeroPointone, + name: nil) + + assert(self.tensor.shape?.count == 4) + } +} + +/// A structure for (sqrt(maskSum) * 0.1 - 1.4)^2 - 0.1 +struct MaskSumSqrtS14M01SquareS01Layer { + let tensor: MPSGraphTensor + + init(tensor: MPSGraphTensor) { + self.tensor = tensor + assert(self.tensor.shape?.count == 4) + } + + init( + graph: MPSGraph, + maskSumSqrtS14M01: MaskSumSqrtS14M01Layer + ) { + let squared = graph.square(with: maskSumSqrtS14M01.tensor, name: nil) + + let zeroPointone = graph.constant( + 0.1, + shape: [1], + dataType: maskSumSqrtS14M01.tensor.dataType) + + self.tensor = graph.subtraction( + squared, + zeroPointone, + name: nil) + + assert(self.tensor.shape?.count == 4) + } +} + +// MARK: - Layer Descriptors + +/// An enumeration of the different kinds of activation function. +public enum ActivationKind { + case identity + case relu + case mish +} + +/// A struct that represents a description of convolutional layer. +public struct SWConvLayerDesc { + let convYSize: NSNumber + let convXSize: NSNumber + let inChannels: NSNumber + let outChannels: NSNumber + let dilationY: Int + let dilationX: Int + let weights: UnsafeMutablePointer + + init( + convYSize: NSNumber, + convXSize: NSNumber, + inChannels: NSNumber, + outChannels: NSNumber, + dilationY: Int, + dilationX: Int, + weights: UnsafeMutablePointer + ) { + self.convYSize = convYSize + self.convXSize = convXSize + self.inChannels = inChannels + self.outChannels = outChannels + self.dilationY = dilationY + self.dilationX = dilationX + self.weights = weights + } +} + +public func createSWConvLayerDesc( + convYSize: Int32, + convXSize: Int32, + inChannels: Int32, + outChannels: Int32, + dilationY: Int32, + dilationX: Int32, + weights: UnsafeMutablePointer +) -> SWConvLayerDesc { + return SWConvLayerDesc( + convYSize: convYSize as NSNumber, + convXSize: convXSize as NSNumber, + inChannels: inChannels as NSNumber, + outChannels: outChannels as NSNumber, + dilationY: Int(dilationY), + dilationX: Int(dilationX), + weights: weights) +} + +/// A struct that represents a description of a batch normalization layer. +public struct SWBatchNormLayerDesc { + let numChannels: NSNumber + let mergedScale: UnsafeMutablePointer + let mergedBias: UnsafeMutablePointer + + init( + numChannels: NSNumber, + mergedScale: UnsafeMutablePointer, + mergedBias: UnsafeMutablePointer + ) { + self.numChannels = numChannels + self.mergedScale = mergedScale + self.mergedBias = mergedBias + } +} + +public func createSWBatchNormLayerDesc( + numChannels: Int32, + mergedScale: UnsafeMutablePointer, + mergedBias: UnsafeMutablePointer +) -> SWBatchNormLayerDesc { + return SWBatchNormLayerDesc( + numChannels: numChannels as NSNumber, + mergedScale: mergedScale, + mergedBias: mergedBias) +} + +/// A struct that represents a matrix multiplication layer descriptor +public struct SWMatMulLayerDesc { + let inChannels: NSNumber + let outChannels: NSNumber + let weights: UnsafeMutablePointer + + init( + inChannels: NSNumber, + outChannels: NSNumber, + weights: UnsafeMutablePointer + ) { + self.inChannels = inChannels + self.outChannels = outChannels + self.weights = weights + } +} + +public func createSWMatMulLayerDesc( + inChannels: Int32, + outChannels: Int32, + weights: UnsafeMutablePointer +) -> SWMatMulLayerDesc { + return SWMatMulLayerDesc( + inChannels: inChannels as NSNumber, + outChannels: outChannels as NSNumber, + weights: weights) +} + +/// A struct that represents the bias layer description. +public struct SWMatBiasLayerDesc { + let numChannels: NSNumber + let weights: UnsafeMutablePointer + + init( + numChannels: NSNumber, + weights: UnsafeMutablePointer + ) { + self.numChannels = numChannels + self.weights = weights + } +} + +public func createSWMatBiasLayerDesc( + numChannels: Int32, + weights: UnsafeMutablePointer +) -> SWMatBiasLayerDesc { + return SWMatBiasLayerDesc( + numChannels: numChannels as NSNumber, + weights: weights) +} + +// MARK: - Core Layers + +/// A class that represents a convolutional layer using MPSGraph +class ConvLayer { + let resultTensor: MPSGraphTensor + let convDescriptor = MPSGraphConvolution2DOpDescriptor( + strideInX: 1, + strideInY: 1, + dilationRateInX: 1, + dilationRateInY: 1, + groups: 1, + paddingStyle: .TF_SAME, + dataLayout: .NCHW, + weightsLayout: .OIHW)! + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + descriptor: SWConvLayerDesc, + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + let weightsShape = [ + descriptor.outChannels, + descriptor.inChannels, + descriptor.convYSize, + descriptor.convXSize, + ] + + let weightsData = Data( + floatsNoCopy: descriptor.weights, + shape: weightsShape) + + let weightsTensor = graph.constant( + weightsData, + shape: weightsShape, + dataType: sourceTensor.dataType) + + resultTensor = graph.convolution2D( + sourceTensor, + weights: weightsTensor, + descriptor: convDescriptor, + name: nil) + + assert(resultTensor.shape?.count == 4) + } +} + +/// A class that represents a batch normalization layer. +class BatchNormLayer { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + maskTensor: MPSGraphTensor, + descriptor: SWBatchNormLayerDesc, + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + let scaleBiasShape = InputShape.create( + batchSize: 1, + numChannels: descriptor.numChannels, + nnYLen: 1, + nnXLen: 1) + + let mergedScaleData = Data( + floatsNoCopy: descriptor.mergedScale, + shape: scaleBiasShape) + + let mergedBiasData = Data( + floatsNoCopy: descriptor.mergedBias, + shape: scaleBiasShape) + + let scaleTensor = graph.constant( + mergedScaleData, + shape: scaleBiasShape, + dataType: sourceTensor.dataType) + + let biasTensor = graph.constant( + mergedBiasData, + shape: scaleBiasShape, + dataType: sourceTensor.dataType) + + let scaled = graph.multiplication( + sourceTensor, + scaleTensor, + name: nil) + + let normalized = graph.addition( + scaled, + biasTensor, + name: nil) + + resultTensor = graph.multiplication( + normalized, + maskTensor, + name: nil) + + assert(resultTensor.shape?.count == 4) + } +} + +/// A structure that represents an activation layer +struct ActivationLayer { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + activationKind: ActivationKind + ) { + switch activationKind { + case .relu: + resultTensor = graph.reLU(with: sourceTensor, name: nil) + case .mish: + resultTensor = graph.mish(tensor: sourceTensor) + default: + resultTensor = sourceTensor + } + + assert(resultTensor.shape == sourceTensor.shape) + } +} + +/// A structure representing a matrix multiplication layer. +struct MatMulLayer { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + descriptor: SWMatMulLayerDesc, + sourceTensor: MPSGraphTensor + ) { + assert( + (sourceTensor.shape?.count == 4) || (sourceTensor.shape?[1] == descriptor.inChannels)) + assert( + (sourceTensor.shape?.count == 2) || (sourceTensor.shape?[1] == descriptor.inChannels)) + + let weightsShape = [ + descriptor.inChannels, + descriptor.outChannels, + ] + + let weightsData = Data( + floatsNoCopy: descriptor.weights, + shape: weightsShape) + + let weightsTensor = graph.constant( + weightsData, + shape: weightsShape, + dataType: sourceTensor.dataType) + + let shape = [-1, descriptor.inChannels] + + let reshapedSource = graph.reshape( + sourceTensor, + shape: shape, + name: nil) + + resultTensor = graph.matrixMultiplication( + primary: reshapedSource, + secondary: weightsTensor, + name: nil) + + assert(resultTensor.shape?.count == 2) + } +} + +/// A structure that performs matrix bias operations +struct MatBiasLayer { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + descriptor: SWMatBiasLayerDesc, + sourceTensor: MPSGraphTensor + ) { + assert( + (sourceTensor.shape?.count == 2) && (sourceTensor.shape?[1] == descriptor.numChannels)) + + let weightsShape = [1, descriptor.numChannels] + + let weightsData = Data( + floatsNoCopy: descriptor.weights, + shape: weightsShape) + + let weightsTensor = graph.constant( + weightsData, + shape: weightsShape, + dataType: sourceTensor.dataType) + + resultTensor = graph.addition( + sourceTensor, + weightsTensor, + name: nil) + } +} + +/// A structure that performs bias operations in NC coordinates. +struct AddNCBiasLayer { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + biasTensor: MPSGraphTensor, + nnXLen: NSNumber, + nnYLen: NSNumber, + numChannels: NSNumber + ) { + let shape = InputShape.create( + batchSize: -1, + numChannels: numChannels, + nnYLen: 1, + nnXLen: 1) + + assert(biasTensor.shape?[1] == shape[1]) + + let reshaped = graph.reshape(biasTensor, shape: shape, name: nil) + resultTensor = graph.addition(sourceTensor, reshaped, name: nil) + + assert(resultTensor.shape?.count == 4) + assert(resultTensor.shape?[2] == nnYLen) + assert(resultTensor.shape?[3] == nnXLen) + } +} + +// MARK: - Pooling Layers + +/// A structure that represents a global pooling layer +struct GlobalPoolingLayer { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + maskTensor: MPSGraphTensor, + maskSumTensor: MPSGraphTensor, + maskSumSqrtS14M01Tensor: MPSGraphTensor + ) { + let hwAxes = InputShape.getHWAxes() + let channelAxis = InputShape.getChannelAxis() + + let sumTensor = graph.reductionSum( + with: sourceTensor, + axes: hwAxes, + name: nil) + + let meanTensor = graph.division(sumTensor, maskSumTensor, name: nil) + + let meanMaskTensor = graph.multiplication( + meanTensor, + maskSumSqrtS14M01Tensor, + name: nil) + + let oneTensor = graph.constant(1.0, dataType: sourceTensor.dataType) + let maskM1Tensor = graph.subtraction(maskTensor, oneTensor, name: nil) + let addition = graph.addition(sourceTensor, maskM1Tensor, name: nil) + + let maxTensor = graph.reductionMaximum( + with: addition, + axes: hwAxes, + name: nil) + + resultTensor = graph.concatTensors( + [ + meanTensor, + meanMaskTensor, + maxTensor, + ], + dimension: channelAxis, + name: nil) + + assert(resultTensor.shape?.count == 4) + assert(resultTensor.shape?[2] == 1) + assert(resultTensor.shape?[3] == 1) + } +} + +/// A structure that represents a layer that performs global pooling on the input tensor +struct GlobalPoolingValueLayer { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + maskSumTensor: MPSGraphTensor, + maskSumSqrtS14M01Tensor: MPSGraphTensor, + maskSumSqrtS14M01SquareS01Tensor: MPSGraphTensor + ) { + let hwAxes = InputShape.getHWAxes() + let channelAxis = InputShape.getChannelAxis() + + let sumTensor = graph.reductionSum( + with: sourceTensor, + axes: hwAxes, + name: nil) + + let meanTensor = graph.division(sumTensor, maskSumTensor, name: nil) + + let meanMaskTensor = graph.multiplication( + meanTensor, + maskSumSqrtS14M01Tensor, + name: nil) + + let meanMaskSquareTensor = graph.multiplication( + meanTensor, + maskSumSqrtS14M01SquareS01Tensor, + name: nil) + + resultTensor = graph.concatTensors( + [ + meanTensor, + meanMaskTensor, + meanMaskSquareTensor, + ], + dimension: channelAxis, + name: nil) + + assert(resultTensor.shape?.count == 4) + assert(resultTensor.shape?[2] == 1) + assert(resultTensor.shape?[3] == 1) + } +} + +// MARK: - Block Descriptors + +/// Base class for block descriptors +public class BlockDescriptor { +} + +/// A class that represents a residual block. +public class SWResidualBlockDesc: BlockDescriptor { + let preBN: SWBatchNormLayerDesc + let preActivation: ActivationKind + let regularConv: SWConvLayerDesc + let midBN: SWBatchNormLayerDesc + let midActivation: ActivationKind + let finalConv: SWConvLayerDesc + + init( + preBN: SWBatchNormLayerDesc, + preActivation: ActivationKind, + regularConv: SWConvLayerDesc, + midBN: SWBatchNormLayerDesc, + midActivation: ActivationKind, + finalConv: SWConvLayerDesc + ) { + self.preBN = preBN + self.preActivation = preActivation + self.regularConv = regularConv + self.midBN = midBN + self.midActivation = midActivation + self.finalConv = finalConv + } +} + +public func createSWResidualBlockDesc( + preBN: SWBatchNormLayerDesc, + preActivation: ActivationKind, + regularConv: SWConvLayerDesc, + midBN: SWBatchNormLayerDesc, + midActivation: ActivationKind, + finalConv: SWConvLayerDesc +) -> SWResidualBlockDesc { + return SWResidualBlockDesc( + preBN: preBN, + preActivation: preActivation, + regularConv: regularConv, + midBN: midBN, + midActivation: midActivation, + finalConv: finalConv) +} + +/// A class that represents a residual block with global pooling. +public class SWGlobalPoolingResidualBlockDesc: BlockDescriptor { + let preBN: SWBatchNormLayerDesc + let preActivation: ActivationKind + let regularConv: SWConvLayerDesc + let gpoolConv: SWConvLayerDesc + let gpoolBN: SWBatchNormLayerDesc + let gpoolActivation: ActivationKind + let gpoolToBiasMul: SWMatMulLayerDesc + let midBN: SWBatchNormLayerDesc + let midActivation: ActivationKind + let finalConv: SWConvLayerDesc + + init( + preBN: SWBatchNormLayerDesc, + preActivation: ActivationKind, + regularConv: SWConvLayerDesc, + gpoolConv: SWConvLayerDesc, + gpoolBN: SWBatchNormLayerDesc, + gpoolActivation: ActivationKind, + gpoolToBiasMul: SWMatMulLayerDesc, + midBN: SWBatchNormLayerDesc, + midActivation: ActivationKind, + finalConv: SWConvLayerDesc + ) { + self.preBN = preBN + self.preActivation = preActivation + self.regularConv = regularConv + self.gpoolConv = gpoolConv + self.gpoolBN = gpoolBN + self.gpoolActivation = gpoolActivation + self.gpoolToBiasMul = gpoolToBiasMul + self.midBN = midBN + self.midActivation = midActivation + self.finalConv = finalConv + } +} + +public func createSWGlobalPoolingResidualBlockDesc( + preBN: SWBatchNormLayerDesc, + preActivation: ActivationKind, + regularConv: SWConvLayerDesc, + gpoolConv: SWConvLayerDesc, + gpoolBN: SWBatchNormLayerDesc, + gpoolActivation: ActivationKind, + gpoolToBiasMul: SWMatMulLayerDesc, + midBN: SWBatchNormLayerDesc, + midActivation: ActivationKind, + finalConv: SWConvLayerDesc +) -> SWGlobalPoolingResidualBlockDesc { + return SWGlobalPoolingResidualBlockDesc( + preBN: preBN, + preActivation: preActivation, + regularConv: regularConv, + gpoolConv: gpoolConv, + gpoolBN: gpoolBN, + gpoolActivation: gpoolActivation, + gpoolToBiasMul: gpoolToBiasMul, + midBN: midBN, + midActivation: midActivation, + finalConv: finalConv) +} + +/// A class that represents a nested bottleneck residual block +public class SWNestedBottleneckResidualBlockDesc: BlockDescriptor { + let preBN: SWBatchNormLayerDesc + let preActivation: ActivationKind + let preConv: SWConvLayerDesc + let blockDescriptors: [BlockDescriptor] + let postBN: SWBatchNormLayerDesc + let postActivation: ActivationKind + let postConv: SWConvLayerDesc + + init( + preBN: SWBatchNormLayerDesc, + preActivation: ActivationKind, + preConv: SWConvLayerDesc, + blockDescriptors: [BlockDescriptor], + postBN: SWBatchNormLayerDesc, + postActivation: ActivationKind, + postConv: SWConvLayerDesc + ) { + self.preBN = preBN + self.preActivation = preActivation + self.preConv = preConv + self.blockDescriptors = blockDescriptors + self.postBN = postBN + self.postActivation = postActivation + self.postConv = postConv + } +} + +public func createSWNestedBottleneckResidualBlockDesc( + preBN: SWBatchNormLayerDesc, + preActivation: ActivationKind, + preConv: SWConvLayerDesc, + blockDescriptors: [BlockDescriptor], + postBN: SWBatchNormLayerDesc, + postActivation: ActivationKind, + postConv: SWConvLayerDesc +) -> SWNestedBottleneckResidualBlockDesc { + return SWNestedBottleneckResidualBlockDesc( + preBN: preBN, + preActivation: preActivation, + preConv: preConv, + blockDescriptors: blockDescriptors, + postBN: postBN, + postActivation: postActivation, + postConv: postConv) +} + +public class BlockDescriptorBuilder { + public var blockDescriptors: [BlockDescriptor] = [] + + init() {} + + public func enque(with descriptor: BlockDescriptor) { + blockDescriptors.append(descriptor) + } +} + +public func createBlockDescriptorBuilder() -> BlockDescriptorBuilder { + return BlockDescriptorBuilder() +} + +// MARK: - Block Implementations + +/// A class that represents a Residual Block layer +class ResidualBlock { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + maskTensor: MPSGraphTensor, + descriptor: SWResidualBlockDesc, + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + let preBN = BatchNormLayer( + graph: graph, + sourceTensor: sourceTensor, + maskTensor: maskTensor, + descriptor: descriptor.preBN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let preActivation = ActivationLayer( + graph: graph, + sourceTensor: preBN.resultTensor, + activationKind: descriptor.preActivation) + + let regularConv = ConvLayer( + graph: graph, + sourceTensor: preActivation.resultTensor, + descriptor: descriptor.regularConv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let midBN = BatchNormLayer( + graph: graph, + sourceTensor: regularConv.resultTensor, + maskTensor: maskTensor, + descriptor: descriptor.midBN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let midActivation = ActivationLayer( + graph: graph, + sourceTensor: midBN.resultTensor, + activationKind: descriptor.midActivation) + + let finalConv = ConvLayer( + graph: graph, + sourceTensor: midActivation.resultTensor, + descriptor: descriptor.finalConv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + resultTensor = graph.addition( + sourceTensor, + finalConv.resultTensor, + name: nil) + + assert(resultTensor.shape?.count == 4) + } +} + +/// A class representing a residual block with global pooling +class GlobalPoolingResidualBlock { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + maskTensor: MPSGraphTensor, + maskSumTensor: MPSGraphTensor, + maskSumSqrtS14M01Tensor: MPSGraphTensor, + descriptor: SWGlobalPoolingResidualBlockDesc, + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + let maskSum = MaskSumLayer(tensor: maskSumTensor) + let maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer(tensor: maskSumSqrtS14M01Tensor) + + let preBN = BatchNormLayer( + graph: graph, + sourceTensor: sourceTensor, + maskTensor: maskTensor, + descriptor: descriptor.preBN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let preActivation = ActivationLayer( + graph: graph, + sourceTensor: preBN.resultTensor, + activationKind: descriptor.preActivation) + + let regularConv = ConvLayer( + graph: graph, + sourceTensor: preActivation.resultTensor, + descriptor: descriptor.regularConv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let gpoolConv = ConvLayer( + graph: graph, + sourceTensor: preActivation.resultTensor, + descriptor: descriptor.gpoolConv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let gpoolBN = BatchNormLayer( + graph: graph, + sourceTensor: gpoolConv.resultTensor, + maskTensor: maskTensor, + descriptor: descriptor.gpoolBN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let gpoolActivation = ActivationLayer( + graph: graph, + sourceTensor: gpoolBN.resultTensor, + activationKind: descriptor.gpoolActivation) + + let gpoolConcat = GlobalPoolingLayer( + graph: graph, + sourceTensor: gpoolActivation.resultTensor, + maskTensor: maskTensor, + maskSumTensor: maskSum.tensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor) + + assert(gpoolConcat.resultTensor.shape?[1] == descriptor.gpoolToBiasMul.inChannels) + + let gpoolToBiasMul = MatMulLayer( + graph: graph, + descriptor: descriptor.gpoolToBiasMul, + sourceTensor: gpoolConcat.resultTensor) + + let added = AddNCBiasLayer( + graph: graph, + sourceTensor: regularConv.resultTensor, + biasTensor: gpoolToBiasMul.resultTensor, + nnXLen: nnXLen, + nnYLen: nnYLen, + numChannels: descriptor.gpoolToBiasMul.outChannels) + + let midBN = BatchNormLayer( + graph: graph, + sourceTensor: added.resultTensor, + maskTensor: maskTensor, + descriptor: descriptor.midBN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let midActivation = ActivationLayer( + graph: graph, + sourceTensor: midBN.resultTensor, + activationKind: descriptor.midActivation) + + let finalConv = ConvLayer( + graph: graph, + sourceTensor: midActivation.resultTensor, + descriptor: descriptor.finalConv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + resultTensor = graph.addition( + sourceTensor, + finalConv.resultTensor, + name: nil) + + assert(resultTensor.shape?.count == 4) + } +} + +/// A structure that represents a block stack +struct BlockStack { + let resultTensor: MPSGraphTensor + + static func processBlockDescriptors( + _ graph: MPSGraph, + _ sourceTensor: MPSGraphTensor, + _ maskTensor: MPSGraphTensor, + _ maskSumTensor: MPSGraphTensor, + _ maskSumSqrtS14M01Tensor: MPSGraphTensor, + _ blockDescriptors: [BlockDescriptor], + _ index: Int, + _ nnXLen: NSNumber, + _ nnYLen: NSNumber + ) -> MPSGraphTensor { + guard index < blockDescriptors.count else { + return sourceTensor + } + + let blockDescriptor = blockDescriptors[index] + let blockInput: MPSGraphTensor + + switch blockDescriptor { + case let globalPoolingDescriptor as SWGlobalPoolingResidualBlockDesc: + let globalPooling = GlobalPoolingResidualBlock( + graph: graph, + sourceTensor: sourceTensor, + maskTensor: maskTensor, + maskSumTensor: maskSumTensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, + descriptor: globalPoolingDescriptor, + nnXLen: nnXLen, + nnYLen: nnYLen) + + blockInput = globalPooling.resultTensor + case let nestedBottleneckDescriptor as SWNestedBottleneckResidualBlockDesc: + let nestedBottleneck = NestedBottleneckResidualBlock( + graph: graph, + sourceTensor: sourceTensor, + maskTensor: maskTensor, + maskSumTensor: maskSumTensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, + descriptor: nestedBottleneckDescriptor, + nnXLen: nnXLen, + nnYLen: nnYLen) + + blockInput = nestedBottleneck.resultTensor + case let residualBlockDescriptor as SWResidualBlockDesc: + let ordinary = ResidualBlock( + graph: graph, + sourceTensor: sourceTensor, + maskTensor: maskTensor, + descriptor: residualBlockDescriptor, + nnXLen: nnXLen, + nnYLen: nnYLen) + + blockInput = ordinary.resultTensor + default: + blockInput = sourceTensor + } + + return processBlockDescriptors( + graph, + blockInput, + maskTensor, + maskSumTensor, + maskSumSqrtS14M01Tensor, + blockDescriptors, + index + 1, + nnXLen, + nnYLen) + } + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + maskTensor: MPSGraphTensor, + maskSumTensor: MPSGraphTensor, + maskSumSqrtS14M01Tensor: MPSGraphTensor, + blockDescriptors: [BlockDescriptor], + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + resultTensor = BlockStack.processBlockDescriptors( + graph, + sourceTensor, + maskTensor, + maskSumTensor, + maskSumSqrtS14M01Tensor, + blockDescriptors, + 0, + nnXLen, + nnYLen) + } +} + +/// A structure that represents a nested bottleneck residual block +struct NestedBottleneckResidualBlock { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + sourceTensor: MPSGraphTensor, + maskTensor: MPSGraphTensor, + maskSumTensor: MPSGraphTensor, + maskSumSqrtS14M01Tensor: MPSGraphTensor, + descriptor: SWNestedBottleneckResidualBlockDesc, + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + let preBN = BatchNormLayer( + graph: graph, + sourceTensor: sourceTensor, + maskTensor: maskTensor, + descriptor: descriptor.preBN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let preActivation = ActivationLayer( + graph: graph, + sourceTensor: preBN.resultTensor, + activationKind: descriptor.preActivation) + + let preConv = ConvLayer( + graph: graph, + sourceTensor: preActivation.resultTensor, + descriptor: descriptor.preConv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let blocks = BlockStack( + graph: graph, + sourceTensor: preConv.resultTensor, + maskTensor: maskTensor, + maskSumTensor: maskSumTensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, + blockDescriptors: descriptor.blockDescriptors, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let postBN = BatchNormLayer( + graph: graph, + sourceTensor: blocks.resultTensor, + maskTensor: maskTensor, + descriptor: descriptor.postBN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let postActivation = ActivationLayer( + graph: graph, + sourceTensor: postBN.resultTensor, + activationKind: descriptor.postActivation) + + let postConv = ConvLayer( + graph: graph, + sourceTensor: postActivation.resultTensor, + descriptor: descriptor.postConv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + resultTensor = graph.addition( + sourceTensor, + postConv.resultTensor, + name: nil) + + assert(resultTensor.shape?.count == 4) + } +} + +// MARK: - SGF Metadata Encoder + +/// Class representing the description of the SGF Metadata Encoder. +public class SWSGFMetadataEncoderDesc { + let version: Int + let numInputMetaChannels: Int + let mul1: SWMatMulLayerDesc + let bias1: SWMatBiasLayerDesc + let act1: ActivationKind + let mul2: SWMatMulLayerDesc + let bias2: SWMatBiasLayerDesc + let act2: ActivationKind + let mul3: SWMatMulLayerDesc + + init( + version: Int, + numInputMetaChannels: Int, + mul1: SWMatMulLayerDesc, + bias1: SWMatBiasLayerDesc, + act1: ActivationKind, + mul2: SWMatMulLayerDesc, + bias2: SWMatBiasLayerDesc, + act2: ActivationKind, + mul3: SWMatMulLayerDesc + ) { + self.version = version + self.numInputMetaChannels = numInputMetaChannels + self.mul1 = mul1 + self.bias1 = bias1 + self.act1 = act1 + self.mul2 = mul2 + self.bias2 = bias2 + self.act2 = act2 + self.mul3 = mul3 + } +} + +public func createSWSGFMetadataEncoderDesc( + version: Int32, + numInputMetaChannels: Int32, + mul1: SWMatMulLayerDesc, + bias1: SWMatBiasLayerDesc, + act1: ActivationKind, + mul2: SWMatMulLayerDesc, + bias2: SWMatBiasLayerDesc, + act2: ActivationKind, + mul3: SWMatMulLayerDesc +) -> SWSGFMetadataEncoderDesc? { + return SWSGFMetadataEncoderDesc( + version: Int(version), + numInputMetaChannels: Int(numInputMetaChannels), + mul1: mul1, + bias1: bias1, + act1: act1, + mul2: mul2, + bias2: bias2, + act2: act2, + mul3: mul3) +} + +/// A class that encodes SGF metadata. +class SGFMetadataEncoder { + let resultTensor: MPSGraphTensor + + init( + graph: MPSGraph, + descriptor: SWSGFMetadataEncoderDesc, + sourceTensor: MPSGraphTensor + ) { + let mul1 = MatMulLayer( + graph: graph, + descriptor: descriptor.mul1, + sourceTensor: sourceTensor) + + let bias1 = MatBiasLayer( + graph: graph, + descriptor: descriptor.bias1, + sourceTensor: mul1.resultTensor) + + let act1 = ActivationLayer( + graph: graph, + sourceTensor: bias1.resultTensor, + activationKind: descriptor.act1) + + let mul2 = MatMulLayer( + graph: graph, + descriptor: descriptor.mul2, + sourceTensor: act1.resultTensor) + + let bias2 = MatBiasLayer( + graph: graph, + descriptor: descriptor.bias2, + sourceTensor: mul2.resultTensor) + + let act2 = ActivationLayer( + graph: graph, + sourceTensor: bias2.resultTensor, + activationKind: descriptor.act2) + + let mul3 = MatMulLayer( + graph: graph, + descriptor: descriptor.mul3, + sourceTensor: act2.resultTensor) + + resultTensor = mul3.resultTensor + + assert(resultTensor.shape?.count == 2) + } +} + +// MARK: - Trunk + +/// A class that describes a trunk for a neural network +public class SWTrunkDesc { + let version: Int + let trunkNumChannels: NSNumber + let midNumChannels: NSNumber + let regularNumChannels: NSNumber + let gpoolNumChannels: NSNumber + let initialConv: SWConvLayerDesc + let initialMatMul: SWMatMulLayerDesc + let sgfMetadataEncoder: SWSGFMetadataEncoderDesc? + let blockDescriptors: [BlockDescriptor] + let trunkTipBN: SWBatchNormLayerDesc + let trunkTipActivation: ActivationKind + + init( + version: Int, + trunkNumChannels: NSNumber, + midNumChannels: NSNumber, + regularNumChannels: NSNumber, + gpoolNumChannels: NSNumber, + initialConv: SWConvLayerDesc, + initialMatMul: SWMatMulLayerDesc, + sgfMetadataEncoder: SWSGFMetadataEncoderDesc?, + blockDescriptors: [BlockDescriptor], + trunkTipBN: SWBatchNormLayerDesc, + trunkTipActivation: ActivationKind + ) { + self.version = version + self.trunkNumChannels = trunkNumChannels + self.midNumChannels = midNumChannels + self.regularNumChannels = regularNumChannels + self.gpoolNumChannels = gpoolNumChannels + self.initialConv = initialConv + self.initialMatMul = initialMatMul + self.sgfMetadataEncoder = sgfMetadataEncoder + self.blockDescriptors = blockDescriptors + self.trunkTipBN = trunkTipBN + self.trunkTipActivation = trunkTipActivation + } +} + +public func createSWTrunkDesc( + version: Int32, + trunkNumChannels: Int32, + midNumChannels: Int32, + regularNumChannels: Int32, + gpoolNumChannels: Int32, + initialConv: SWConvLayerDesc, + initialMatMul: SWMatMulLayerDesc, + sgfMetadataEncoder: SWSGFMetadataEncoderDesc?, + blockDescriptors: [BlockDescriptor], + trunkTipBN: SWBatchNormLayerDesc, + trunkTipActivation: ActivationKind +) -> SWTrunkDesc { + return SWTrunkDesc( + version: Int(version), + trunkNumChannels: trunkNumChannels as NSNumber, + midNumChannels: midNumChannels as NSNumber, + regularNumChannels: regularNumChannels as NSNumber, + gpoolNumChannels: gpoolNumChannels as NSNumber, + initialConv: initialConv, + initialMatMul: initialMatMul, + sgfMetadataEncoder: sgfMetadataEncoder, + blockDescriptors: blockDescriptors, + trunkTipBN: trunkTipBN, + trunkTipActivation: trunkTipActivation) +} + +/// A structure representing a ResNet trunk for a neural network +struct Trunk { + let resultTensor: MPSGraphTensor + + static func getBlockSourceTensor( + graph: MPSGraph, + descriptor: SWSGFMetadataEncoderDesc?, + initialAdd: AddNCBiasLayer, + inputMetaTensor: MPSGraphTensor?, + nnXLen: NSNumber, + nnYLen: NSNumber, + numChannels: NSNumber + ) -> MPSGraphTensor { + var blockSourceTensor: MPSGraphTensor + + if let inputMetaTensor, + let descriptor, descriptor.numInputMetaChannels > 0 + { + let encoded = SGFMetadataEncoder( + graph: graph, + descriptor: descriptor, + sourceTensor: inputMetaTensor) + + let encodedAdd = AddNCBiasLayer( + graph: graph, + sourceTensor: initialAdd.resultTensor, + biasTensor: encoded.resultTensor, + nnXLen: nnXLen, + nnYLen: nnYLen, + numChannels: numChannels) + + blockSourceTensor = encodedAdd.resultTensor + } else { + blockSourceTensor = initialAdd.resultTensor + } + + return blockSourceTensor + } + + init( + graph: MPSGraph, + descriptor: SWTrunkDesc, + inputTensor: MPSGraphTensor, + inputGlobalTensor: MPSGraphTensor, + inputMetaTensor: MPSGraphTensor?, + maskTensor: MPSGraphTensor, + maskSumTensor: MPSGraphTensor, + maskSumSqrtS14M01Tensor: MPSGraphTensor, + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + let initialConv = ConvLayer( + graph: graph, + sourceTensor: inputTensor, + descriptor: descriptor.initialConv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let initialMatMul = MatMulLayer( + graph: graph, + descriptor: descriptor.initialMatMul, + sourceTensor: inputGlobalTensor) + + let initialAdd = AddNCBiasLayer( + graph: graph, + sourceTensor: initialConv.resultTensor, + biasTensor: initialMatMul.resultTensor, + nnXLen: nnXLen, + nnYLen: nnYLen, + numChannels: descriptor.initialMatMul.outChannels) + + let blockSourceTensor = Trunk.getBlockSourceTensor( + graph: graph, + descriptor: descriptor.sgfMetadataEncoder, + initialAdd: initialAdd, + inputMetaTensor: inputMetaTensor, + nnXLen: nnXLen, + nnYLen: nnYLen, + numChannels: descriptor.initialMatMul.outChannels) + + let blocks = BlockStack( + graph: graph, + sourceTensor: blockSourceTensor, + maskTensor: maskTensor, + maskSumTensor: maskSumTensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, + blockDescriptors: descriptor.blockDescriptors, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let trunkTipBN = BatchNormLayer( + graph: graph, + sourceTensor: blocks.resultTensor, + maskTensor: maskTensor, + descriptor: descriptor.trunkTipBN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let trunkTipActivation = ActivationLayer( + graph: graph, + sourceTensor: trunkTipBN.resultTensor, + activationKind: descriptor.trunkTipActivation) + + resultTensor = trunkTipActivation.resultTensor + + assert(resultTensor.shape?.count == 4) + } +} + +// MARK: - Policy Head + +/// A class that describes a policy head for a neural network +public struct SWPolicyHeadDesc { + let version: Int + let p1Conv: SWConvLayerDesc + let g1Conv: SWConvLayerDesc + let g1BN: SWBatchNormLayerDesc + let g1Activation: ActivationKind + let gpoolToBiasMul: SWMatMulLayerDesc + let p1BN: SWBatchNormLayerDesc + let p1Activation: ActivationKind + let p2Conv: SWConvLayerDesc + let gpoolToPassMul: SWMatMulLayerDesc + let gpoolToPassBias: SWMatBiasLayerDesc? + let passActivation: ActivationKind? + let gpoolToPassMul2: SWMatMulLayerDesc? + + init( + version: Int, + p1Conv: SWConvLayerDesc, + g1Conv: SWConvLayerDesc, + g1BN: SWBatchNormLayerDesc, + g1Activation: ActivationKind, + gpoolToBiasMul: SWMatMulLayerDesc, + p1BN: SWBatchNormLayerDesc, + p1Activation: ActivationKind, + p2Conv: SWConvLayerDesc, + gpoolToPassMul: SWMatMulLayerDesc, + gpoolToPassBias: SWMatBiasLayerDesc?, + passActivation: ActivationKind?, + gpoolToPassMul2: SWMatMulLayerDesc? + ) { + self.version = version + self.p1Conv = p1Conv + self.g1Conv = g1Conv + self.g1BN = g1BN + self.g1Activation = g1Activation + self.gpoolToBiasMul = gpoolToBiasMul + self.p1BN = p1BN + self.p1Activation = p1Activation + self.p2Conv = p2Conv + self.gpoolToPassMul = gpoolToPassMul + self.gpoolToPassBias = gpoolToPassBias + self.passActivation = passActivation + self.gpoolToPassMul2 = gpoolToPassMul2 + + assert( + (version >= 15) + || ((gpoolToPassBias == nil) && (passActivation == nil) && (gpoolToPassMul2 == nil)) + ) + assert( + (version < 15) + || ((gpoolToPassBias != nil) && (passActivation != nil) && (gpoolToPassMul2 != nil)) + ) + } +} + +public func createSWPolicyHeadDesc( + version: Int32, + p1Conv: SWConvLayerDesc, + g1Conv: SWConvLayerDesc, + g1BN: SWBatchNormLayerDesc, + g1Activation: ActivationKind, + gpoolToBiasMul: SWMatMulLayerDesc, + p1BN: SWBatchNormLayerDesc, + p1Activation: ActivationKind, + p2Conv: SWConvLayerDesc, + gpoolToPassMul: SWMatMulLayerDesc, + gpoolToPassBias: SWMatBiasLayerDesc, + passActivation: ActivationKind, + gpoolToPassMul2: SWMatMulLayerDesc +) -> SWPolicyHeadDesc { + if version >= 15 { + return SWPolicyHeadDesc( + version: Int(version), + p1Conv: p1Conv, + g1Conv: g1Conv, + g1BN: g1BN, + g1Activation: g1Activation, + gpoolToBiasMul: gpoolToBiasMul, + p1BN: p1BN, + p1Activation: p1Activation, + p2Conv: p2Conv, + gpoolToPassMul: gpoolToPassMul, + gpoolToPassBias: gpoolToPassBias, + passActivation: passActivation, + gpoolToPassMul2: gpoolToPassMul2) + } else { + return SWPolicyHeadDesc( + version: Int(version), + p1Conv: p1Conv, + g1Conv: g1Conv, + g1BN: g1BN, + g1Activation: g1Activation, + gpoolToBiasMul: gpoolToBiasMul, + p1BN: p1BN, + p1Activation: p1Activation, + p2Conv: p2Conv, + gpoolToPassMul: gpoolToPassMul, + gpoolToPassBias: nil, + passActivation: nil, + gpoolToPassMul2: nil) + } +} + +/// A structure that represents a policy head of a neural network. +struct PolicyHead { + let policyTensor: MPSGraphTensor + let policyPassTensor: MPSGraphTensor + + init( + graph: MPSGraph, + descriptor: SWPolicyHeadDesc, + sourceTensor: MPSGraphTensor, + maskTensor: MPSGraphTensor, + maskSumTensor: MPSGraphTensor, + maskSumSqrtS14M01Tensor: MPSGraphTensor, + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + let p1Conv = ConvLayer( + graph: graph, + sourceTensor: sourceTensor, + descriptor: descriptor.p1Conv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let g1Conv = ConvLayer( + graph: graph, + sourceTensor: sourceTensor, + descriptor: descriptor.g1Conv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let g1BN = BatchNormLayer( + graph: graph, + sourceTensor: g1Conv.resultTensor, + maskTensor: maskTensor, + descriptor: descriptor.g1BN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let g1Activation = ActivationLayer( + graph: graph, + sourceTensor: g1BN.resultTensor, + activationKind: descriptor.g1Activation) + + let g1Concat = GlobalPoolingLayer( + graph: graph, + sourceTensor: g1Activation.resultTensor, + maskTensor: maskTensor, + maskSumTensor: maskSumTensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor) + + assert(g1Concat.resultTensor.shape?[1] == descriptor.gpoolToBiasMul.inChannels) + + let gpoolToBiasMul = MatMulLayer( + graph: graph, + descriptor: descriptor.gpoolToBiasMul, + sourceTensor: g1Concat.resultTensor) + + let added = AddNCBiasLayer( + graph: graph, + sourceTensor: p1Conv.resultTensor, + biasTensor: gpoolToBiasMul.resultTensor, + nnXLen: nnXLen, + nnYLen: nnYLen, + numChannels: descriptor.gpoolToBiasMul.outChannels) + + let p1BN = BatchNormLayer( + graph: graph, + sourceTensor: added.resultTensor, + maskTensor: maskTensor, + descriptor: descriptor.p1BN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let p1Activation = ActivationLayer( + graph: graph, + sourceTensor: p1BN.resultTensor, + activationKind: descriptor.p1Activation) + + let p2Conv = ConvLayer( + graph: graph, + sourceTensor: p1Activation.resultTensor, + descriptor: descriptor.p2Conv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + policyTensor = p2Conv.resultTensor + + assert(g1Concat.resultTensor.shape?[1] == descriptor.gpoolToPassMul.inChannels) + + let gpoolToPassMul = MatMulLayer( + graph: graph, + descriptor: descriptor.gpoolToPassMul, + sourceTensor: g1Concat.resultTensor) + + if let gpoolToPassBias = descriptor.gpoolToPassBias, + let passActivation = descriptor.passActivation, + let gpoolToPassMul2 = descriptor.gpoolToPassMul2 + { + assert(descriptor.version >= 15) + + let gpoolToPassBiasLayer = MatBiasLayer( + graph: graph, + descriptor: gpoolToPassBias, + sourceTensor: gpoolToPassMul.resultTensor) + + let passActivationLayer = ActivationLayer( + graph: graph, + sourceTensor: gpoolToPassBiasLayer.resultTensor, + activationKind: passActivation) + + let gpoolToPassMul2Layer = MatMulLayer( + graph: graph, + descriptor: gpoolToPassMul2, + sourceTensor: passActivationLayer.resultTensor) + + policyPassTensor = gpoolToPassMul2Layer.resultTensor + } else { + assert(descriptor.version < 15) + policyPassTensor = gpoolToPassMul.resultTensor + } + + assert(policyTensor.shape?.count == 4) + assert(policyPassTensor.shape?.count == 2) + } +} + +// MARK: - Value Head + +/// A struct that describes the value head of a neural network +public struct SWValueHeadDesc { + let version: Int + let v1Conv: SWConvLayerDesc + let v1BN: SWBatchNormLayerDesc + let v1Activation: ActivationKind + let v2Mul: SWMatMulLayerDesc + let v2Bias: SWMatBiasLayerDesc + let v2Activation: ActivationKind + let v3Mul: SWMatMulLayerDesc + let v3Bias: SWMatBiasLayerDesc + let sv3Mul: SWMatMulLayerDesc + let sv3Bias: SWMatBiasLayerDesc + let vOwnershipConv: SWConvLayerDesc + + init( + version: Int, + v1Conv: SWConvLayerDesc, + v1BN: SWBatchNormLayerDesc, + v1Activation: ActivationKind, + v2Mul: SWMatMulLayerDesc, + v2Bias: SWMatBiasLayerDesc, + v2Activation: ActivationKind, + v3Mul: SWMatMulLayerDesc, + v3Bias: SWMatBiasLayerDesc, + sv3Mul: SWMatMulLayerDesc, + sv3Bias: SWMatBiasLayerDesc, + vOwnershipConv: SWConvLayerDesc + ) { + self.version = version + self.v1Conv = v1Conv + self.v1BN = v1BN + self.v1Activation = v1Activation + self.v2Mul = v2Mul + self.v2Bias = v2Bias + self.v2Activation = v2Activation + self.v3Mul = v3Mul + self.v3Bias = v3Bias + self.sv3Mul = sv3Mul + self.sv3Bias = sv3Bias + self.vOwnershipConv = vOwnershipConv + } +} + +public func createSWValueHeadDesc( + version: Int32, + v1Conv: SWConvLayerDesc, + v1BN: SWBatchNormLayerDesc, + v1Activation: ActivationKind, + v2Mul: SWMatMulLayerDesc, + v2Bias: SWMatBiasLayerDesc, + v2Activation: ActivationKind, + v3Mul: SWMatMulLayerDesc, + v3Bias: SWMatBiasLayerDesc, + sv3Mul: SWMatMulLayerDesc, + sv3Bias: SWMatBiasLayerDesc, + vOwnershipConv: SWConvLayerDesc +) -> SWValueHeadDesc { + return SWValueHeadDesc( + version: Int(version), + v1Conv: v1Conv, + v1BN: v1BN, + v1Activation: v1Activation, + v2Mul: v2Mul, + v2Bias: v2Bias, + v2Activation: v2Activation, + v3Mul: v3Mul, + v3Bias: v3Bias, + sv3Mul: sv3Mul, + sv3Bias: sv3Bias, + vOwnershipConv: vOwnershipConv) +} + +/// A structure that creates a value head for the neural network +struct ValueHead { + let valueTensor: MPSGraphTensor + let scoreValueTensor: MPSGraphTensor + let ownershipTensor: MPSGraphTensor + + init( + graph: MPSGraph, + descriptor: SWValueHeadDesc, + sourceTensor: MPSGraphTensor, + maskTensor: MPSGraphTensor, + maskSumTensor: MPSGraphTensor, + maskSumSqrtS14M01Tensor: MPSGraphTensor, + maskSumSqrtS14M01SquareS01Tensor: MPSGraphTensor, + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + let v1Conv = ConvLayer( + graph: graph, + sourceTensor: sourceTensor, + descriptor: descriptor.v1Conv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let v1BN = BatchNormLayer( + graph: graph, + sourceTensor: v1Conv.resultTensor, + maskTensor: maskTensor, + descriptor: descriptor.v1BN, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let v1Activation = ActivationLayer( + graph: graph, + sourceTensor: v1BN.resultTensor, + activationKind: descriptor.v1Activation) + + let v1Mean = + GlobalPoolingValueLayer( + graph: graph, + sourceTensor: v1Activation.resultTensor, + maskSumTensor: maskSumTensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, + maskSumSqrtS14M01SquareS01Tensor: maskSumSqrtS14M01SquareS01Tensor) + + assert(v1Mean.resultTensor.shape?[1] == descriptor.v2Mul.inChannels) + + let v2Mul = MatMulLayer( + graph: graph, + descriptor: descriptor.v2Mul, + sourceTensor: v1Mean.resultTensor) + + let v2Bias = MatBiasLayer( + graph: graph, + descriptor: descriptor.v2Bias, + sourceTensor: v2Mul.resultTensor) + + let v2Activation = ActivationLayer( + graph: graph, + sourceTensor: v2Bias.resultTensor, + activationKind: descriptor.v2Activation) + + let v3Mul = MatMulLayer( + graph: graph, + descriptor: descriptor.v3Mul, + sourceTensor: v2Activation.resultTensor) + + let v3Bias = MatBiasLayer( + graph: graph, + descriptor: descriptor.v3Bias, + sourceTensor: v3Mul.resultTensor) + + let sv3Mul = MatMulLayer( + graph: graph, + descriptor: descriptor.sv3Mul, + sourceTensor: v2Activation.resultTensor) + + let sv3Bias = MatBiasLayer( + graph: graph, + descriptor: descriptor.sv3Bias, + sourceTensor: sv3Mul.resultTensor) + + let vOwnershipConv = ConvLayer( + graph: graph, + sourceTensor: v1Activation.resultTensor, + descriptor: descriptor.vOwnershipConv, + nnXLen: nnXLen, + nnYLen: nnYLen) + + valueTensor = v3Bias.resultTensor + scoreValueTensor = sv3Bias.resultTensor + ownershipTensor = vOwnershipConv.resultTensor + + assert(valueTensor.shape?.count == 2) + assert(scoreValueTensor.shape?.count == 2) + assert(ownershipTensor.shape?.count == 4) + } +} + +// MARK: - Model Descriptor + +/// A struct that describes a neural network model used for playing the game of Go. +public struct SWModelDesc { + let version: Int + let name: String + let numInputChannels: NSNumber + let numInputGlobalChannels: NSNumber + let numInputMetaChannels: NSNumber + let numValueChannels: NSNumber + let numScoreValueChannels: NSNumber + let numOwnershipChannels: NSNumber + let trunk: SWTrunkDesc + let policyHead: SWPolicyHeadDesc + let valueHead: SWValueHeadDesc + + init( + version: Int, + name: String, + numInputChannels: NSNumber, + numInputGlobalChannels: NSNumber, + numInputMetaChannels: NSNumber, + numValueChannels: NSNumber, + numScoreValueChannels: NSNumber, + numOwnershipChannels: NSNumber, + trunk: SWTrunkDesc, + policyHead: SWPolicyHeadDesc, + valueHead: SWValueHeadDesc + ) { + self.version = version + self.name = name + self.numInputChannels = numInputChannels + self.numInputGlobalChannels = numInputGlobalChannels + self.numInputMetaChannels = numInputMetaChannels + self.numValueChannels = numValueChannels + self.numScoreValueChannels = numScoreValueChannels + self.numOwnershipChannels = numOwnershipChannels + self.trunk = trunk + self.policyHead = policyHead + self.valueHead = valueHead + } +} + +public func createSWModelDesc( + version: Int32, + name: String, + numInputChannels: Int32, + numInputGlobalChannels: Int32, + numInputMetaChannels: Int32, + numValueChannels: Int32, + numScoreValueChannels: Int32, + numOwnershipChannels: Int32, + trunk: SWTrunkDesc, + policyHead: SWPolicyHeadDesc, + valueHead: SWValueHeadDesc +) -> SWModelDesc { + return SWModelDesc( + version: Int(version), + name: name, + numInputChannels: numInputChannels as NSNumber, + numInputGlobalChannels: numInputGlobalChannels as NSNumber, + numInputMetaChannels: numInputMetaChannels as NSNumber, + numValueChannels: numValueChannels as NSNumber, + numScoreValueChannels: numScoreValueChannels as NSNumber, + numOwnershipChannels: numOwnershipChannels as NSNumber, + trunk: trunk, + policyHead: policyHead, + valueHead: valueHead) +} + +// MARK: - MPSGraph Model (for GPU inference) + +/// A structure representing a neural network model for processing Go game states using MPSGraph. +struct MPSGraphModel { + let device: MTLDevice + let commandQueue: MTLCommandQueue + let graph: MPSGraph + let nnXLen: NSNumber + let nnYLen: NSNumber + let version: Int + let numValueChannels: NSNumber + let numScoreValueChannels: NSNumber + let numOwnershipChannels: NSNumber + let input: InputLayer + let inputGlobal: InputGlobalLayer + let inputMeta: InputMetaLayer + let mask: MaskLayer + let trunk: Trunk + let policyHead: PolicyHead + let valueHead: ValueHead + let targetTensors: [MPSGraphTensor] + + init( + device: MTLDevice, + graph: MPSGraph, + descriptor: SWModelDesc, + nnXLen: NSNumber, + nnYLen: NSNumber + ) { + self.device = device + self.commandQueue = device.makeCommandQueue()! + self.graph = graph + self.nnXLen = nnXLen + self.nnYLen = nnYLen + self.version = descriptor.version + self.numValueChannels = descriptor.numValueChannels + self.numScoreValueChannels = descriptor.numScoreValueChannels + self.numOwnershipChannels = descriptor.numOwnershipChannels + + input = InputLayer( + graph: graph, + nnXLen: nnXLen, + nnYLen: nnYLen, + numChannels: descriptor.numInputChannels) + + inputGlobal = InputGlobalLayer( + graph: graph, + numGlobalFeatures: descriptor.numInputGlobalChannels) + + inputMeta = InputMetaLayer( + graph: graph, + numMetaFeatures: descriptor.numInputMetaChannels) + + mask = MaskLayer( + graph: graph, + nnXLen: nnXLen, + nnYLen: nnYLen) + + let maskSum = MaskSumLayer( + graph: graph, + maskTensor: mask.tensor) + + let maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( + graph: graph, + maskSum: maskSum) + + let maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( + graph: graph, + maskSumSqrtS14M01: maskSumSqrtS14M01) + + trunk = Trunk( + graph: graph, + descriptor: descriptor.trunk, + inputTensor: input.tensor, + inputGlobalTensor: inputGlobal.tensor, + inputMetaTensor: inputMeta.tensor, + maskTensor: mask.tensor, + maskSumTensor: maskSum.tensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, + nnXLen: nnXLen, + nnYLen: nnYLen) + + policyHead = PolicyHead( + graph: graph, + descriptor: descriptor.policyHead, + sourceTensor: trunk.resultTensor, + maskTensor: mask.tensor, + maskSumTensor: maskSum.tensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, + nnXLen: nnXLen, + nnYLen: nnYLen) + + valueHead = ValueHead( + graph: graph, + descriptor: descriptor.valueHead, + sourceTensor: trunk.resultTensor, + maskTensor: mask.tensor, + maskSumTensor: maskSum.tensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, + maskSumSqrtS14M01SquareS01Tensor: maskSumSqrtS14M01SquareS01.tensor, + nnXLen: nnXLen, + nnYLen: nnYLen) + + targetTensors = [ + policyHead.policyTensor, + policyHead.policyPassTensor, + valueHead.valueTensor, + valueHead.scoreValueTensor, + valueHead.ownershipTensor, + ] + } + + /// Applies the model to the given input data + public func apply( + input inputPointer: UnsafeMutablePointer, + inputGlobal inputGlobalPointer: UnsafeMutablePointer, + inputMeta inputMetaPointer: UnsafeMutablePointer, + policy: UnsafeMutablePointer, + policyPass: UnsafeMutablePointer, + value: UnsafeMutablePointer, + scoreValue: UnsafeMutablePointer, + ownership: UnsafeMutablePointer, + batchSize: Int + ) { + let channelAxis = InputShape.getChannelAxis() + let numInputChannels = input.shape[channelAxis] + + let inputShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: numInputChannels, + nnYLen: nnYLen, + nnXLen: nnXLen) + + let inputDescriptor = MPSNDArrayDescriptor( + dataType: input.tensor.dataType, + shape: inputShape) + + let inputArray = MPSNDArray( + device: device, + descriptor: inputDescriptor) + + inputArray.writeBytes(inputPointer) + + let numInputGlobalChannels = inputGlobal.shape[channelAxis] + + let inputGlobalShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: numInputGlobalChannels, + nnYLen: 1, + nnXLen: 1) + + let inputGlobalDescriptor = MPSNDArrayDescriptor( + dataType: inputGlobal.tensor.dataType, + shape: inputGlobalShape) + + let inputGlobalArray = MPSNDArray( + device: device, + descriptor: inputGlobalDescriptor) + + inputGlobalArray.writeBytes(inputGlobalPointer) + + let numInputMetaChannels = inputMeta.shape[channelAxis] + + let inputMetaShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: numInputMetaChannels, + nnYLen: 1, + nnXLen: 1) + + let inputMetaDescriptor = MPSNDArrayDescriptor( + dataType: inputMeta.tensor.dataType, + shape: inputMetaShape) + + let inputMetaArray = MPSNDArray( + device: device, + descriptor: inputMetaDescriptor) + + inputMetaArray.writeBytes(inputMetaPointer) + + let maskShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: 1, + nnYLen: nnYLen, + nnXLen: nnXLen) + + let maskDescriptor = MPSNDArrayDescriptor( + dataType: mask.tensor.dataType, + shape: maskShape) + + let maskArray = MPSNDArray( + device: device, + descriptor: maskDescriptor) + + var maskStrideArray = [ + MemoryLayout.size, + nnXLen.intValue * MemoryLayout.size, + nnYLen.intValue * nnXLen.intValue * MemoryLayout.size, + numInputChannels.intValue * nnYLen.intValue * nnXLen.intValue + * MemoryLayout.size, + ] + + maskArray.writeBytes(inputPointer, strideBytes: &maskStrideArray) + + let feeds = [ + input.tensor: MPSGraphTensorData(inputArray), + inputGlobal.tensor: MPSGraphTensorData(inputGlobalArray), + inputMeta.tensor: MPSGraphTensorData(inputMetaArray), + mask.tensor: MPSGraphTensorData(maskArray), + ] + + let fetch = graph.run( + with: commandQueue, + feeds: feeds, + targetTensors: targetTensors, + targetOperations: nil) + + assert(fetch[policyHead.policyTensor] != nil) + assert(fetch[policyHead.policyPassTensor] != nil) + assert(fetch[valueHead.valueTensor] != nil) + assert(fetch[valueHead.scoreValueTensor] != nil) + assert(fetch[valueHead.ownershipTensor] != nil) + + fetch[policyHead.policyTensor]?.mpsndarray().readBytes(policy) + fetch[policyHead.policyPassTensor]?.mpsndarray().readBytes(policyPass) + fetch[valueHead.valueTensor]?.mpsndarray().readBytes(value) + fetch[valueHead.scoreValueTensor]?.mpsndarray().readBytes(scoreValue) + fetch[valueHead.ownershipTensor]?.mpsndarray().readBytes(ownership) + } +} From 8f24d1ab8f0208686c7e1f83b2393a63cc8dd1a2 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Mon, 5 Jan 2026 20:44:54 +0800 Subject: [PATCH 10/34] Optimize MPSGraph mask operations when requireExactNNLen is true MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When requireExactNNLen is true (all mask values are 1), skip unnecessary mask operations in MPSGraph layers: - BatchNormLayer: Skip output * maskTensor multiplication - GlobalPoolingLayer: Skip mask-1 trick for max pooling - MaskSumLayer and derived layers: Use precomputed constants instead of computing from mask tensor The optimization is enabled by passing requireExactNNLen to MPSGraphModelHandle, which propagates it through the layer hierarchy. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.swift | 57 ++++++--- cpp/neuralnet/mpsgraphlayers.swift | 183 ++++++++++++++++++++++------- 2 files changed, 181 insertions(+), 59 deletions(-) diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index 8c297aaca..bb8ee1a0e 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -460,7 +460,8 @@ public class MPSGraphModelHandle { public init?( modelDesc: SWModelDesc, nnXLen: Int32, - nnYLen: Int32 + nnYLen: Int32, + optimizeIdentityMask: Bool = false ) { guard let device = MTLCreateSystemDefaultDevice() else { printError("MPSGraph backend: Failed to create Metal device") @@ -506,17 +507,35 @@ public class MPSGraphModelHandle { nnXLen: nnXLenNS, nnYLen: nnYLenNS) - let maskSum = MaskSumLayer( - graph: graph, - maskTensor: mask.tensor) - - let maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( - graph: graph, - maskSum: maskSum) - - let maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( - graph: graph, - maskSumSqrtS14M01: maskSumSqrtS14M01) + // Use constant tensors when mask is all 1s (requireExactNNLen=true) + let maskSum: MaskSumLayer + let maskSumSqrtS14M01: MaskSumSqrtS14M01Layer + let maskSumSqrtS14M01SquareS01: MaskSumSqrtS14M01SquareS01Layer + + if optimizeIdentityMask { + maskSum = MaskSumLayer( + graph: graph, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( + graph: graph, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( + graph: graph, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + } else { + maskSum = MaskSumLayer( + graph: graph, + maskTensor: mask.tensor) + maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( + graph: graph, + maskSum: maskSum) + maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( + graph: graph, + maskSumSqrtS14M01: maskSumSqrtS14M01) + } trunk = Trunk( graph: graph, @@ -528,7 +547,8 @@ public class MPSGraphModelHandle { maskSumTensor: maskSum.tensor, maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, nnXLen: nnXLenNS, - nnYLen: nnYLenNS) + nnYLen: nnYLenNS, + optimizeIdentityMask: optimizeIdentityMask) policyHead = PolicyHead( graph: graph, @@ -538,7 +558,8 @@ public class MPSGraphModelHandle { maskSumTensor: maskSum.tensor, maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, nnXLen: nnXLenNS, - nnYLen: nnYLenNS) + nnYLen: nnYLenNS, + optimizeIdentityMask: optimizeIdentityMask) valueHead = ValueHead( graph: graph, @@ -549,7 +570,8 @@ public class MPSGraphModelHandle { maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, maskSumSqrtS14M01SquareS01Tensor: maskSumSqrtS14M01SquareS01.tensor, nnXLen: nnXLenNS, - nnYLen: nnYLenNS) + nnYLen: nnYLenNS, + optimizeIdentityMask: optimizeIdentityMask) targetTensors = [ policyHead.policyTensor, @@ -559,7 +581,7 @@ public class MPSGraphModelHandle { valueHead.ownershipTensor, ] - printError("MPSGraph backend: Initialized on \(device.name)") + printError("MPSGraph backend: Initialized on \(device.name)\(optimizeIdentityMask ? " (mask optimized)" : "")") } /// Run inference on a batch using MPSGraph (GPU) @@ -834,7 +856,8 @@ public func createHybridComputeHandle( guard let mpsGraphHandle = MPSGraphModelHandle( modelDesc: modelDesc, nnXLen: context.nnXLen, - nnYLen: context.nnYLen + nnYLen: context.nnYLen, + optimizeIdentityMask: requireExactNNLen ) else { printError("Hybrid backend \(serverThreadIdx): Failed to create MPSGraph handle") return nil diff --git a/cpp/neuralnet/mpsgraphlayers.swift b/cpp/neuralnet/mpsgraphlayers.swift index 92e0edf24..fce460678 100644 --- a/cpp/neuralnet/mpsgraphlayers.swift +++ b/cpp/neuralnet/mpsgraphlayers.swift @@ -223,6 +223,23 @@ struct MaskSumLayer { assert(self.tensor.shape?.count == 4) } + + /// Optimized init for when mask is all 1s (requireExactNNLen=true) + /// Returns constant tensor with boardSize value + init( + graph: MPSGraph, + nnXLen: NSNumber, + nnYLen: NSNumber, + dataType: MPSDataType = .float32 + ) { + let boardSize = Double(nnXLen.intValue * nnYLen.intValue) + self.tensor = graph.constant( + boardSize, + shape: [1, 1, 1, 1], + dataType: dataType) + + assert(self.tensor.shape?.count == 4) + } } /// A structure that represents sqrt(maskSum) * 0.1 - 1.4 @@ -259,6 +276,24 @@ struct MaskSumSqrtS14M01Layer { assert(self.tensor.shape?.count == 4) } + + /// Optimized init for when mask is all 1s (requireExactNNLen=true) + /// Returns constant tensor: (sqrt(boardSize) - 14) * 0.1 + init( + graph: MPSGraph, + nnXLen: NSNumber, + nnYLen: NSNumber, + dataType: MPSDataType = .float32 + ) { + let boardSize = Double(nnXLen.intValue * nnYLen.intValue) + let value = (sqrt(boardSize) - 14.0) * 0.1 + self.tensor = graph.constant( + value, + shape: [1, 1, 1, 1], + dataType: dataType) + + assert(self.tensor.shape?.count == 4) + } } /// A structure for (sqrt(maskSum) * 0.1 - 1.4)^2 - 0.1 @@ -288,6 +323,25 @@ struct MaskSumSqrtS14M01SquareS01Layer { assert(self.tensor.shape?.count == 4) } + + /// Optimized init for when mask is all 1s (requireExactNNLen=true) + /// Returns constant tensor: ((sqrt(boardSize) - 14) * 0.1)^2 - 0.1 + init( + graph: MPSGraph, + nnXLen: NSNumber, + nnYLen: NSNumber, + dataType: MPSDataType = .float32 + ) { + let boardSize = Double(nnXLen.intValue * nnYLen.intValue) + let sqrtS14M01 = (sqrt(boardSize) - 14.0) * 0.1 + let value = sqrtS14M01 * sqrtS14M01 - 0.1 + self.tensor = graph.constant( + value, + shape: [1, 1, 1, 1], + dataType: dataType) + + assert(self.tensor.shape?.count == 4) + } } // MARK: - Layer Descriptors @@ -484,7 +538,8 @@ class BatchNormLayer { maskTensor: MPSGraphTensor, descriptor: SWBatchNormLayerDesc, nnXLen: NSNumber, - nnYLen: NSNumber + nnYLen: NSNumber, + optimizeIdentityMask: Bool = false ) { let scaleBiasShape = InputShape.create( batchSize: 1, @@ -520,10 +575,15 @@ class BatchNormLayer { biasTensor, name: nil) - resultTensor = graph.multiplication( - normalized, - maskTensor, - name: nil) + // Skip mask multiplication when all mask values are 1 + if optimizeIdentityMask { + resultTensor = normalized + } else { + resultTensor = graph.multiplication( + normalized, + maskTensor, + name: nil) + } assert(resultTensor.shape?.count == 4) } @@ -665,7 +725,8 @@ struct GlobalPoolingLayer { sourceTensor: MPSGraphTensor, maskTensor: MPSGraphTensor, maskSumTensor: MPSGraphTensor, - maskSumSqrtS14M01Tensor: MPSGraphTensor + maskSumSqrtS14M01Tensor: MPSGraphTensor, + optimizeIdentityMask: Bool = false ) { let hwAxes = InputShape.getHWAxes() let channelAxis = InputShape.getChannelAxis() @@ -682,14 +743,24 @@ struct GlobalPoolingLayer { maskSumSqrtS14M01Tensor, name: nil) - let oneTensor = graph.constant(1.0, dataType: sourceTensor.dataType) - let maskM1Tensor = graph.subtraction(maskTensor, oneTensor, name: nil) - let addition = graph.addition(sourceTensor, maskM1Tensor, name: nil) - - let maxTensor = graph.reductionMaximum( - with: addition, - axes: hwAxes, - name: nil) + let maxTensor: MPSGraphTensor + if optimizeIdentityMask { + // When all mask values are 1, directly compute max without mask adjustment + maxTensor = graph.reductionMaximum( + with: sourceTensor, + axes: hwAxes, + name: nil) + } else { + // Mask out invalid positions by subtracting 1 (making them very negative) + let oneTensor = graph.constant(1.0, dataType: sourceTensor.dataType) + let maskM1Tensor = graph.subtraction(maskTensor, oneTensor, name: nil) + let addition = graph.addition(sourceTensor, maskM1Tensor, name: nil) + + maxTensor = graph.reductionMaximum( + with: addition, + axes: hwAxes, + name: nil) + } resultTensor = graph.concatTensors( [ @@ -938,7 +1009,8 @@ class ResidualBlock { maskTensor: MPSGraphTensor, descriptor: SWResidualBlockDesc, nnXLen: NSNumber, - nnYLen: NSNumber + nnYLen: NSNumber, + optimizeIdentityMask: Bool = false ) { let preBN = BatchNormLayer( graph: graph, @@ -946,7 +1018,8 @@ class ResidualBlock { maskTensor: maskTensor, descriptor: descriptor.preBN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let preActivation = ActivationLayer( graph: graph, @@ -966,7 +1039,8 @@ class ResidualBlock { maskTensor: maskTensor, descriptor: descriptor.midBN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let midActivation = ActivationLayer( graph: graph, @@ -1001,7 +1075,8 @@ class GlobalPoolingResidualBlock { maskSumSqrtS14M01Tensor: MPSGraphTensor, descriptor: SWGlobalPoolingResidualBlockDesc, nnXLen: NSNumber, - nnYLen: NSNumber + nnYLen: NSNumber, + optimizeIdentityMask: Bool = false ) { let maskSum = MaskSumLayer(tensor: maskSumTensor) let maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer(tensor: maskSumSqrtS14M01Tensor) @@ -1012,7 +1087,8 @@ class GlobalPoolingResidualBlock { maskTensor: maskTensor, descriptor: descriptor.preBN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let preActivation = ActivationLayer( graph: graph, @@ -1039,7 +1115,8 @@ class GlobalPoolingResidualBlock { maskTensor: maskTensor, descriptor: descriptor.gpoolBN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let gpoolActivation = ActivationLayer( graph: graph, @@ -1051,7 +1128,8 @@ class GlobalPoolingResidualBlock { sourceTensor: gpoolActivation.resultTensor, maskTensor: maskTensor, maskSumTensor: maskSum.tensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor) + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, + optimizeIdentityMask: optimizeIdentityMask) assert(gpoolConcat.resultTensor.shape?[1] == descriptor.gpoolToBiasMul.inChannels) @@ -1074,7 +1152,8 @@ class GlobalPoolingResidualBlock { maskTensor: maskTensor, descriptor: descriptor.midBN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let midActivation = ActivationLayer( graph: graph, @@ -1110,7 +1189,8 @@ struct BlockStack { _ blockDescriptors: [BlockDescriptor], _ index: Int, _ nnXLen: NSNumber, - _ nnYLen: NSNumber + _ nnYLen: NSNumber, + _ optimizeIdentityMask: Bool ) -> MPSGraphTensor { guard index < blockDescriptors.count else { return sourceTensor @@ -1129,7 +1209,8 @@ struct BlockStack { maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, descriptor: globalPoolingDescriptor, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) blockInput = globalPooling.resultTensor case let nestedBottleneckDescriptor as SWNestedBottleneckResidualBlockDesc: @@ -1141,7 +1222,8 @@ struct BlockStack { maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, descriptor: nestedBottleneckDescriptor, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) blockInput = nestedBottleneck.resultTensor case let residualBlockDescriptor as SWResidualBlockDesc: @@ -1151,7 +1233,8 @@ struct BlockStack { maskTensor: maskTensor, descriptor: residualBlockDescriptor, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) blockInput = ordinary.resultTensor default: @@ -1167,7 +1250,8 @@ struct BlockStack { blockDescriptors, index + 1, nnXLen, - nnYLen) + nnYLen, + optimizeIdentityMask) } init( @@ -1178,7 +1262,8 @@ struct BlockStack { maskSumSqrtS14M01Tensor: MPSGraphTensor, blockDescriptors: [BlockDescriptor], nnXLen: NSNumber, - nnYLen: NSNumber + nnYLen: NSNumber, + optimizeIdentityMask: Bool = false ) { resultTensor = BlockStack.processBlockDescriptors( graph, @@ -1189,7 +1274,8 @@ struct BlockStack { blockDescriptors, 0, nnXLen, - nnYLen) + nnYLen, + optimizeIdentityMask) } } @@ -1205,7 +1291,8 @@ struct NestedBottleneckResidualBlock { maskSumSqrtS14M01Tensor: MPSGraphTensor, descriptor: SWNestedBottleneckResidualBlockDesc, nnXLen: NSNumber, - nnYLen: NSNumber + nnYLen: NSNumber, + optimizeIdentityMask: Bool = false ) { let preBN = BatchNormLayer( graph: graph, @@ -1213,7 +1300,8 @@ struct NestedBottleneckResidualBlock { maskTensor: maskTensor, descriptor: descriptor.preBN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let preActivation = ActivationLayer( graph: graph, @@ -1235,7 +1323,8 @@ struct NestedBottleneckResidualBlock { maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, blockDescriptors: descriptor.blockDescriptors, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let postBN = BatchNormLayer( graph: graph, @@ -1243,7 +1332,8 @@ struct NestedBottleneckResidualBlock { maskTensor: maskTensor, descriptor: descriptor.postBN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let postActivation = ActivationLayer( graph: graph, @@ -1495,7 +1585,8 @@ struct Trunk { maskSumTensor: MPSGraphTensor, maskSumSqrtS14M01Tensor: MPSGraphTensor, nnXLen: NSNumber, - nnYLen: NSNumber + nnYLen: NSNumber, + optimizeIdentityMask: Bool = false ) { let initialConv = ConvLayer( graph: graph, @@ -1534,7 +1625,8 @@ struct Trunk { maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, blockDescriptors: descriptor.blockDescriptors, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let trunkTipBN = BatchNormLayer( graph: graph, @@ -1542,7 +1634,8 @@ struct Trunk { maskTensor: maskTensor, descriptor: descriptor.trunkTipBN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let trunkTipActivation = ActivationLayer( graph: graph, @@ -1674,7 +1767,8 @@ struct PolicyHead { maskSumTensor: MPSGraphTensor, maskSumSqrtS14M01Tensor: MPSGraphTensor, nnXLen: NSNumber, - nnYLen: NSNumber + nnYLen: NSNumber, + optimizeIdentityMask: Bool = false ) { let p1Conv = ConvLayer( graph: graph, @@ -1696,7 +1790,8 @@ struct PolicyHead { maskTensor: maskTensor, descriptor: descriptor.g1BN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let g1Activation = ActivationLayer( graph: graph, @@ -1708,7 +1803,8 @@ struct PolicyHead { sourceTensor: g1Activation.resultTensor, maskTensor: maskTensor, maskSumTensor: maskSumTensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor) + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, + optimizeIdentityMask: optimizeIdentityMask) assert(g1Concat.resultTensor.shape?[1] == descriptor.gpoolToBiasMul.inChannels) @@ -1731,7 +1827,8 @@ struct PolicyHead { maskTensor: maskTensor, descriptor: descriptor.p1BN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let p1Activation = ActivationLayer( graph: graph, @@ -1876,7 +1973,8 @@ struct ValueHead { maskSumSqrtS14M01Tensor: MPSGraphTensor, maskSumSqrtS14M01SquareS01Tensor: MPSGraphTensor, nnXLen: NSNumber, - nnYLen: NSNumber + nnYLen: NSNumber, + optimizeIdentityMask: Bool = false ) { let v1Conv = ConvLayer( graph: graph, @@ -1891,7 +1989,8 @@ struct ValueHead { maskTensor: maskTensor, descriptor: descriptor.v1BN, nnXLen: nnXLen, - nnYLen: nnYLen) + nnYLen: nnYLen, + optimizeIdentityMask: optimizeIdentityMask) let v1Activation = ActivationLayer( graph: graph, From 270651bd10c6959e1f5529d1d4185f092c7e640d Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Mon, 5 Jan 2026 20:50:52 +0800 Subject: [PATCH 11/34] Remove unused maskSize variable in HybridComputeHandle MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.swift | 1 - 1 file changed, 1 deletion(-) diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index bb8ee1a0e..9b17b610d 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -745,7 +745,6 @@ public class HybridComputeHandle { let spatialSize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numInputChannels let globalSize = coremlHandle.numInputGlobalChannels let metaSize = coremlHandle.numInputMetaChannels - let maskSize = Int(nnXLen) * Int(nnYLen) let policySize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numPolicyChannels let valueSize = coremlHandle.numValueChannels let scoreValueSize = coremlHandle.numScoreValueChannels From 31a36a310d2fc1029731dd626e1787048aff53bc Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Tue, 6 Jan 2026 07:40:03 +0800 Subject: [PATCH 12/34] Add CoreML backend build instructions to Compiling.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Document the CoreML backend as an alternative to Metal for macOS, including Homebrew installation of the katagocoreml library dependency. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- Compiling.md | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/Compiling.md b/Compiling.md index 648fea548..80cd7df36 100644 --- a/Compiling.md +++ b/Compiling.md @@ -118,7 +118,7 @@ As also mentioned in the instructions below but repeated here for visibility, if * If using OpenCL, you will want to verify that KataGo is picking up the correct device (e.g. some systems may have both an Intel CPU OpenCL and GPU OpenCL, if KataGo appears to pick the wrong one, you can correct this by specifying `openclGpuToUse` in `configs/gtp_example.cfg`). ## MacOS - * TLDR: + * TLDR (Metal backend - recommended for most users): ``` git clone https://github.com/lightvector/KataGo.git cd KataGo/cpp @@ -127,11 +127,23 @@ As also mentioned in the instructions below but repeated here for visibility, if cmake -G Ninja -DUSE_BACKEND=METAL -DBUILD_DISTRIBUTED=1 ninja ``` + * TLDR (CoreML backend - hybrid CPU+GPU+Neural Engine for maximum throughput): + ``` + # First, install the katagocoreml library via Homebrew + brew tap chinchangyang/katagocoreml-cpp + brew install katagocoreml + + git clone https://github.com/lightvector/KataGo.git + cd KataGo/cpp + cmake -G Ninja -DUSE_BACKEND=COREML -DBUILD_DISTRIBUTED=1 + ninja + ``` * Requirements * [Homebrew](https://brew.sh): `/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"` * CMake with a minimum version of 3.18.2: `brew install cmake`. * AppleClang and Swift compilers: `xcode-select --install`. - * If using the Metal backend, [Ninja](https://ninja-build.org): `brew install ninja` + * If using the Metal or CoreML backend, [Ninja](https://ninja-build.org): `brew install ninja` + * If using the CoreML backend, katagocoreml library: `brew tap chinchangyang/katagocoreml-cpp && brew install katagocoreml` * libzip: `brew install libzip`. * If you want to do self-play training and research, probably Google perftools `brew install gperftools` for TCMalloc or some other better malloc implementation. For unknown reasons, the allocation pattern in self-play with large numbers of threads and parallel games causes a lot of memory fragmentation under glibc malloc that will eventually run your machine out of memory, but better mallocs handle it fine. * If compiling to contribute to public distributed training runs, OpenSSL is required (`brew install openssl`). @@ -139,14 +151,14 @@ As also mentioned in the instructions below but repeated here for visibility, if * `git clone https://github.com/lightvector/KataGo.git` * Compile using CMake and make in the cpp directory: * `cd KataGo/cpp` - * `cmake . -G Ninja -DUSE_BACKEND=METAL` or `cmake . -DUSE_BACKEND=OPENCL` or `cmake . -DUSE_BACKEND=EIGEN` depending on which backend you want. + * `cmake . -G Ninja -DUSE_BACKEND=METAL` or `cmake . -G Ninja -DUSE_BACKEND=COREML` or `cmake . -DUSE_BACKEND=OPENCL` or `cmake . -DUSE_BACKEND=EIGEN` depending on which backend you want. * Specify also `-DUSE_TCMALLOC=1` if using TCMalloc. * Compiling will also call git commands to embed the git hash into the compiled executable, specify also `-DNO_GIT_REVISION=1` to disable it if this is causing issues for you. * Specify `-DUSE_AVX2=1` to also compile Eigen with AVX2 and FMA support, which will make it incompatible with old CPUs but much faster. Intel-based Macs with new processors support AVX2, but Apple Silicon Macs do not support AVX2 natively. (If you want to go further, you can also add `-DCMAKE_CXX_FLAGS='-march=native'` which will specialize to precisely your machine's CPU, but the exe might not run on other machines at all). * Specify `-DBUILD_DISTRIBUTED=1` to compile with support for contributing data to public distributed training runs. * If building distributed, you will also need to build with Git revision support, including building within a clone of the repo, as opposed to merely an unzipped copy of its source. * Only builds from specific tagged versions or branches can contribute, in particular, instead of the `master` branch, use either the latest [release](https://github.com/lightvector/KataGo/releases) tag or the tip of the `stable` branch. To minimize the chance of any data incompatibilities or bugs, please do NOT attempt to contribute with custom changes or circumvent these limitations. - * `ninja` for Metal backend, or `make` for other backends. + * `ninja` for Metal or CoreML backend, or `make` for other backends. * Done! You should now have a compiled `katago` executable in your working directory. * Pre-trained neural nets are available at [the main training website](https://katagotraining.org/). * You will probably want to edit `configs/gtp_example.cfg` (see "Tuning for Performance" above). From f952519e104ab0c643da347f728711182393a3f3 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Tue, 6 Jan 2026 17:31:47 +0800 Subject: [PATCH 13/34] Add CoreML backend CI job to GitHub Actions workflow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- .github/workflows/build.yml | 50 +++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 708479f79..fa9fa4daf 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -97,6 +97,56 @@ jobs: name: katago-macos-opencl path: cpp/katago + build-macos-coreml: + runs-on: macos-latest + permissions: + contents: read + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install dependencies + run: | + brew install ninja zlib libzip + brew tap chinchangyang/katagocoreml-cpp + brew install katagocoreml + + - name: Cache CMake build + uses: actions/cache@v4 + with: + path: | + cpp/CMakeCache.txt + cpp/CMakeFiles + cpp/build.ninja + cpp/.ninja_deps + cpp/.ninja_log + key: ${{ runner.os }}-cmake-coreml-${{ hashFiles('**/CMakeLists.txt') }} + restore-keys: | + ${{ runner.os }}-cmake-coreml- + + - name: Configure CMake + working-directory: cpp + run: | + cmake . -G Ninja -DUSE_BACKEND=COREML -DCMAKE_BUILD_TYPE=Release + + - name: Build + working-directory: cpp + run: | + ninja + + - name: Run tests + working-directory: cpp + run: | + ./katago runtests + + - name: Upload artifact + if: github.event_name == 'push' && github.ref == 'refs/heads/master' + uses: actions/upload-artifact@v4 + with: + name: katago-macos-coreml + path: cpp/katago + build-windows: runs-on: windows-latest permissions: From bb69d0db21a25c1df3b2b9ed2b0b9618fea6cd6c Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Tue, 20 Jan 2026 19:52:23 +0800 Subject: [PATCH 14/34] Simplify CoreML model loading with dynamic batch size support - Add runtime batch size support (1 to maxBatchSize) with batch size included in model cache key for proper cache invalidation - Simplify model loading: convert to temp .mlpackage, load via MLModel.compileModel(), then delete immediately (CoreML caches internally) - Remove ~400 lines of complex manual cache management code - Ensure temp files are cleaned up on conversion/compile failures using defer, with warning logged if cleanup itself fails Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.cpp | 153 ++++++++++++----------------- cpp/neuralnet/coremlbackend.h | 4 +- cpp/neuralnet/coremlbackend.swift | 16 ++- cpp/neuralnet/mpsgraphlayers.swift | 5 + 4 files changed, 85 insertions(+), 93 deletions(-) diff --git a/cpp/neuralnet/coremlbackend.cpp b/cpp/neuralnet/coremlbackend.cpp index faf38363b..270465962 100644 --- a/cpp/neuralnet/coremlbackend.cpp +++ b/cpp/neuralnet/coremlbackend.cpp @@ -7,11 +7,11 @@ #include "../neuralnet/coremlbackend.h" #include +#include #include -#include -#include -#include -#include +#include +#include +#include // For getpid() using namespace std; @@ -19,106 +19,74 @@ using namespace std; // CoreML Model Conversion - Native C++ using katagocoreml library //------------------------------------------------------------------------------ +namespace gfs = ghc::filesystem; + namespace CoreMLConversion { -// Get cache directory: ~/Documents/KataGo/CoreMLModels/ -static string getCacheDirectory() { - const char* homeDir = getenv("HOME"); - if(homeDir == nullptr) { - struct passwd* pw = getpwuid(getuid()); - homeDir = pw ? pw->pw_dir : "/tmp"; +// Get temp directory for model conversion +static string getTempDirectory() { + gfs::path tempDir = gfs::temp_directory_path() / "katago_coreml"; + std::error_code ec; + gfs::create_directories(tempDir, ec); + if(ec) { + throw runtime_error("Failed to create temp directory: " + ec.message()); } - return string(homeDir) + "/Documents/KataGo/CoreMLModels"; + return tempDir.string(); } -// Create directory if it doesn't exist -static bool createDirectoryIfNeeded(const string& path) { - struct stat st; - if(stat(path.c_str(), &st) == 0) { - return S_ISDIR(st.st_mode); - } - // Create parent directories recursively - size_t pos = path.find_last_of('/'); - if(pos != string::npos && pos > 0) { - createDirectoryIfNeeded(path.substr(0, pos)); - } - return mkdir(path.c_str(), 0755) == 0; +// Generate unique temporary path for model conversion +static string generateTempPath(int serverThreadIdx) { + auto now = chrono::steady_clock::now().time_since_epoch().count(); + return getTempDirectory() + "/model_" + to_string(getpid()) + "_" + + to_string(serverThreadIdx) + "_" + to_string(now) + ".mlpackage"; } -// Check if file/directory exists -static bool fileExists(const string& path) { - struct stat st; - return stat(path.c_str(), &st) == 0; -} - -// Generate cache key with _native suffix to distinguish from Python-converted models -static string generateCacheKey( - const string& modelSHA256, - int boardX, - int boardY, - bool useFP16, - bool optimizeMask -) { - string precisionSuffix = useFP16 ? "fp16" : "fp32"; - string maskSuffix = optimizeMask ? "nomask" : "mask"; - return modelSHA256 + "_" + to_string(boardX) + "x" + to_string(boardY) + - "_" + precisionSuffix + "_" + maskSuffix + "_native"; -} +// CoreML model metadata constants +static const string COREML_MODEL_AUTHOR = "KataGo"; +static const string COREML_MODEL_LICENSE = "See original model file for license terms"; -// Convert KataGo model to CoreML using native katagocoreml library -static void convertModelToCoreML( +// Convert KataGo model to CoreML in temp directory, returns path to .mlpackage +// The caller (Swift side) is responsible for deleting the temp file after loading +static string convertModelToTemp( const string& modelPath, - const string& outputPath, - int boardXSize, - int boardYSize, - bool useFP16, - bool optimizeIdentityMask -) { - katagocoreml::ConversionOptions opts; - opts.board_x_size = boardXSize; - opts.board_y_size = boardYSize; - opts.compute_precision = useFP16 ? "FLOAT16" : "FLOAT32"; - opts.optimize_identity_mask = optimizeIdentityMask; - opts.specification_version = 8; // iOS 17+ / macOS 14+ - - katagocoreml::KataGoConverter::convert(modelPath, outputPath, opts); -} - -// Ensure model is converted and cached, returns path to .mlpackage -static string ensureModelConverted( - const string& modelPath, - const string& modelSHA256, int boardX, int boardY, bool useFP16, bool optimizeMask, + int maxBatchSize, int serverThreadIdx ) { - string cacheDir = getCacheDirectory(); - string cacheKey = generateCacheKey(modelSHA256, boardX, boardY, useFP16, optimizeMask); - string mlpackagePath = cacheDir + "/" + cacheKey + ".mlpackage"; - - // Check if already cached - if(fileExists(mlpackagePath)) { - cerr << "Core ML backend " << serverThreadIdx << ": Using cached model at " << mlpackagePath << endl; - return mlpackagePath; - } + // maxBatchSize is validated upstream: cfg.getInt("nnMaxBatchSize", 1, 65536) in setup.cpp + // and NNEvaluator constructor throws if maxBatchSize <= 0. Assert for defensive documentation. + assert(maxBatchSize >= 1); - // Create cache directory if needed - if(!createDirectoryIfNeeded(cacheDir)) { - throw runtime_error("Failed to create cache directory: " + cacheDir); - } + string tempPath = generateTempPath(serverThreadIdx); + cerr << "Core ML backend " << serverThreadIdx << ": Converting model to " << tempPath << endl; + + katagocoreml::ConversionOptions opts; + opts.board_x_size = boardX; + opts.board_y_size = boardY; + opts.compute_precision = useFP16 ? "FLOAT16" : "FLOAT32"; + opts.optimize_identity_mask = optimizeMask; + opts.min_batch_size = 1; + opts.max_batch_size = maxBatchSize; + opts.author = COREML_MODEL_AUTHOR; + opts.license = COREML_MODEL_LICENSE; - // Convert model - cerr << "Core ML backend " << serverThreadIdx << ": Converting model to " << mlpackagePath << endl; try { - convertModelToCoreML(modelPath, mlpackagePath, boardX, boardY, useFP16, optimizeMask); - cerr << "Core ML backend " << serverThreadIdx << ": Conversion completed successfully" << endl; + katagocoreml::KataGoConverter::convert(modelPath, tempPath, opts); } catch(const exception& e) { + // Clean up partial conversion on failure + std::error_code ec; + gfs::remove_all(tempPath, ec); + if(ec) { + cerr << "Core ML backend " << serverThreadIdx << ": Warning: Failed to clean up partial conversion at " << tempPath << ": " << ec.message() << endl; + } throw runtime_error(string("Core ML model conversion failed: ") + e.what()); } - return mlpackagePath; + cerr << "Core ML backend " << serverThreadIdx << ": Conversion completed" << endl; + return tempPath; } } // namespace CoreMLConversion @@ -383,6 +351,7 @@ SWModelDesc modelDescToSwift(const ModelDesc* modelDesc) { modelDesc->numValueChannels, modelDesc->numScoreValueChannels, modelDesc->numOwnershipChannels, + modelDesc->numPolicyChannels, trunkDescToSwift(&modelDesc->trunk), policyHeadDescToSwift(&modelDesc->policyHead), valueHeadDescToSwift(&modelDesc->valueHead)); @@ -408,7 +377,7 @@ void NeuralNet::globalInitialize() { } void NeuralNet::globalCleanup() { - // No global cleanup needed for Core ML + // No cleanup needed - temp files are deleted immediately after loading } LoadedModel* NeuralNet::loadModelFile(const string& file, const string& expectedSha256) { @@ -477,6 +446,7 @@ static swift::Optional convertAndCreateHybrid ComputeContext* context, const LoadedModel* loadedModel, bool requireExactNNLen, + int maxBatchSize, int serverThreadIdx ) { auto coremlContext = context->coremlContext; @@ -485,14 +455,15 @@ static swift::Optional convertAndCreateHybrid bool useFP16 = (context->useFP16Mode != enabled_t::False); bool optimizeMask = requireExactNNLen; - // Convert model to CoreML format using native katagocoreml library - string coremlModelPath = CoreMLConversion::ensureModelConverted( + // Convert model to CoreML format in temp directory + // The Swift side will delete the temp file after loading + string coremlModelPath = CoreMLConversion::convertModelToTemp( loadedModel->modelPath, - loadedModel->modelDesc.sha256, nnXLen, nnYLen, useFP16, optimizeMask, + maxBatchSize, serverThreadIdx ); @@ -508,7 +479,7 @@ static swift::Optional convertAndCreateHybrid loadedModel->modelDesc.numInputChannels, loadedModel->modelDesc.numInputGlobalChannels, loadedModel->modelDesc.numInputMetaChannels, - loadedModel->modelDesc.policyHead.p2Conv.outChannels, + loadedModel->modelDesc.numPolicyChannels, loadedModel->modelDesc.numValueChannels, loadedModel->modelDesc.numScoreValueChannels, loadedModel->modelDesc.numOwnershipChannels, @@ -522,8 +493,9 @@ ComputeHandle::ComputeHandle( bool inputsUseNHWC, int gpuIdx, int serverThreadIdx, - bool requireExactNNLen): -hybridHandle(convertAndCreateHybridHandle(context, loadedModel, requireExactNNLen, serverThreadIdx)) { + bool requireExactNNLen, + int maxBatchSize): +hybridHandle(convertAndCreateHybridHandle(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)) { const ModelDesc* modelDesc = &loadedModel->modelDesc; auto coremlContext = context->coremlContext; @@ -551,14 +523,13 @@ ComputeHandle* NeuralNet::createComputeHandle( int serverThreadIdx) { (void)logger; - (void)maxBatchSize; int gpuIdx = (gpuIdxForThisThread == -1) ? 0 : gpuIdxForThisThread; ComputeHandle* handle = nullptr; { lock_guard lock(computeHandleMutex); - handle = new ComputeHandle(context, loadedModel, inputsUseNHWC, gpuIdx, serverThreadIdx, requireExactNNLen); + handle = new ComputeHandle(context, loadedModel, inputsUseNHWC, gpuIdx, serverThreadIdx, requireExactNNLen, maxBatchSize); } return handle; diff --git a/cpp/neuralnet/coremlbackend.h b/cpp/neuralnet/coremlbackend.h index af52762cb..28fa3ba1f 100644 --- a/cpp/neuralnet/coremlbackend.h +++ b/cpp/neuralnet/coremlbackend.h @@ -180,6 +180,7 @@ struct ComputeHandle { * @param gpuIdx The index of the GPU to use. * @param serverThreadIdx The index of the server thread. * @param requireExactNNLen Whether exact NN length is required. + * @param maxBatchSize Maximum batch size for dynamic batch support. */ ComputeHandle( ComputeContext* context, @@ -187,7 +188,8 @@ struct ComputeHandle { bool inputsUseNHWC, int gpuIdx, int serverThreadIdx, - bool requireExactNNLen); + bool requireExactNNLen, + int maxBatchSize); ~ComputeHandle(); ComputeHandle() = delete; diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index 9b17b610d..fbcb8c785 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -329,6 +329,17 @@ public class CoreMLComputeHandle { } } +/// Delete the source .mlpackage after compilation +/// CoreML caches the compiled model, so the source is no longer needed +private func deleteSourceModel(at url: URL, serverThreadIdx: Int) { + do { + try FileManager.default.removeItem(at: url) + printError("Core ML backend \(serverThreadIdx): Deleted temp model") + } catch { + printError("Core ML backend \(serverThreadIdx): Warning: Failed to delete temp model: \(error)") + } +} + /// Create compute handle - loads pre-converted Core ML model /// Model conversion is now handled in C++ using the native katagocoreml library public func createCoreMLComputeHandle( @@ -348,6 +359,9 @@ public func createCoreMLComputeHandle( let optimizeMask = requireExactNNLen // When true: skips internal mask operations (~6.5% speedup) let mlpackagePath = URL(fileURLWithPath: coremlModelPath) + // Ensure temp file is deleted regardless of success/failure + defer { deleteSourceModel(at: mlpackagePath, serverThreadIdx: serverThreadIdx) } + // Load Core ML model (already converted by C++ katagocoreml library) do { let config = MLModelConfiguration() @@ -480,7 +494,7 @@ public class MPSGraphModelHandle { self.numInputChannels = modelDesc.numInputChannels.intValue self.numInputGlobalChannels = modelDesc.numInputGlobalChannels.intValue self.numInputMetaChannels = modelDesc.numInputMetaChannels.intValue - self.numPolicyChannels = 2 // Policy has 2 channels + self.numPolicyChannels = modelDesc.numPolicyChannels.intValue self.numValueChannels = modelDesc.numValueChannels.intValue self.numScoreValueChannels = modelDesc.numScoreValueChannels.intValue self.numOwnershipChannels = modelDesc.numOwnershipChannels.intValue diff --git a/cpp/neuralnet/mpsgraphlayers.swift b/cpp/neuralnet/mpsgraphlayers.swift index fce460678..e99107438 100644 --- a/cpp/neuralnet/mpsgraphlayers.swift +++ b/cpp/neuralnet/mpsgraphlayers.swift @@ -2071,6 +2071,7 @@ public struct SWModelDesc { let numValueChannels: NSNumber let numScoreValueChannels: NSNumber let numOwnershipChannels: NSNumber + let numPolicyChannels: NSNumber let trunk: SWTrunkDesc let policyHead: SWPolicyHeadDesc let valueHead: SWValueHeadDesc @@ -2084,6 +2085,7 @@ public struct SWModelDesc { numValueChannels: NSNumber, numScoreValueChannels: NSNumber, numOwnershipChannels: NSNumber, + numPolicyChannels: NSNumber, trunk: SWTrunkDesc, policyHead: SWPolicyHeadDesc, valueHead: SWValueHeadDesc @@ -2096,6 +2098,7 @@ public struct SWModelDesc { self.numValueChannels = numValueChannels self.numScoreValueChannels = numScoreValueChannels self.numOwnershipChannels = numOwnershipChannels + self.numPolicyChannels = numPolicyChannels self.trunk = trunk self.policyHead = policyHead self.valueHead = valueHead @@ -2111,6 +2114,7 @@ public func createSWModelDesc( numValueChannels: Int32, numScoreValueChannels: Int32, numOwnershipChannels: Int32, + numPolicyChannels: Int32, trunk: SWTrunkDesc, policyHead: SWPolicyHeadDesc, valueHead: SWValueHeadDesc @@ -2124,6 +2128,7 @@ public func createSWModelDesc( numValueChannels: numValueChannels as NSNumber, numScoreValueChannels: numScoreValueChannels as NSNumber, numOwnershipChannels: numOwnershipChannels as NSNumber, + numPolicyChannels: numPolicyChannels as NSNumber, trunk: trunk, policyHead: policyHead, valueHead: valueHead) From e9140624ce0d6b44304755084a4599c25bf5ba6d Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Tue, 20 Jan 2026 23:00:00 +0800 Subject: [PATCH 15/34] Add FP32 GPU-only mode using MPSGraph to bypass CoreML converter When useFP16=false, the CoreML CPU+ANE path runs extremely slowly in FP32 mode. This adds a GPU-only execution path using MPSGraph that bypasses CoreML model conversion entirely, providing much faster FP32 inference. Changes: - Add createMPSGraphOnlyHandle() function in Swift for direct GPU-only handle creation without CoreML conversion - Add mpsGraphOnlyHandle field to ComputeHandle for FP32 mode - Add conditional helper functions to create appropriate handle based on precision mode (hybridHandle for FP16, mpsGraphOnlyHandle for FP32) - Modify getCoreMLOutput() to dispatch to correct handle - Add assertion enforcing mutual exclusivity of handles - Standardize logging to use "Core ML backend X:" prefix throughout FP16 mode continues to use hybrid CoreML+MPSGraph execution as before. Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.cpp | 99 +++++++++++++++++++++++++------ cpp/neuralnet/coremlbackend.h | 6 ++ cpp/neuralnet/coremlbackend.swift | 28 ++++++++- 3 files changed, 112 insertions(+), 21 deletions(-) diff --git a/cpp/neuralnet/coremlbackend.cpp b/cpp/neuralnet/coremlbackend.cpp index 270465962..300be229e 100644 --- a/cpp/neuralnet/coremlbackend.cpp +++ b/cpp/neuralnet/coremlbackend.cpp @@ -487,6 +487,50 @@ static swift::Optional convertAndCreateHybrid ); } +// Helper function to create hybrid handle if FP16 mode, otherwise returns none +static swift::Optional createHybridHandleIfNeeded( + ComputeContext* context, + const LoadedModel* loadedModel, + bool requireExactNNLen, + int maxBatchSize, + int serverThreadIdx +) { + if(context->useFP16Mode == enabled_t::False) { + // FP32 mode - don't create hybrid handle + return swift::Optional::none(); + } + // FP16 mode: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) + return convertAndCreateHybridHandle(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx); +} + +// Helper function to create MPSGraph-only handle if FP32 mode, otherwise returns none +// Used when useFP16=false to avoid slow FP32 CoreML execution on CPU+ANE +static swift::Optional createMPSGraphHandleIfNeeded( + ComputeContext* context, + const LoadedModel* loadedModel, + bool requireExactNNLen, + int serverThreadIdx +) { + if(context->useFP16Mode != enabled_t::False) { + // FP16 mode - don't create MPSGraph-only handle + return swift::Optional::none(); + } + // FP32 mode: Use MPSGraph only (GPU-only, skip slow FP32 CoreML) + cerr << "Core ML backend " << serverThreadIdx << ": FP32 mode - using MPSGraph GPU-only (skipping CoreML converter)" << endl; + + // Convert model descriptor to Swift format for MPSGraph path + // Note: No CoreML conversion needed - MPSGraph reads weights directly + SWModelDesc swModelDesc = CoreMLProcess::modelDescToSwift(&loadedModel->modelDesc); + + // Create MPSGraph-only handle (GPU only) + return createMPSGraphOnlyHandle( + swModelDesc, + serverThreadIdx, + requireExactNNLen, + context->coremlContext + ); +} + ComputeHandle::ComputeHandle( ComputeContext* context, const LoadedModel* loadedModel, @@ -495,7 +539,11 @@ ComputeHandle::ComputeHandle( int serverThreadIdx, bool requireExactNNLen, int maxBatchSize): -hybridHandle(convertAndCreateHybridHandle(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)) { +hybridHandle(createHybridHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)), +mpsGraphOnlyHandle(createMPSGraphHandleIfNeeded(context, loadedModel, requireExactNNLen, serverThreadIdx)) { + assert(((hybridHandle && !mpsGraphOnlyHandle) || (!hybridHandle && mpsGraphOnlyHandle)) && + "Exactly one of hybridHandle or mpsGraphOnlyHandle must be valid"); + const ModelDesc* modelDesc = &loadedModel->modelDesc; auto coremlContext = context->coremlContext; @@ -904,23 +952,38 @@ void CoreMLProcess::getCoreMLOutput( CoreMLProcess::processRowData(row, gpuHandle, inputBuffers, inputBufs); } - auto hybridHandle = gpuHandle->hybridHandle; - assert(hybridHandle); - - // Call hybrid inference (CoreML on CPU+ANE, MPSGraph on GPU) - // Mask buffer has correct stride (singleMaskElts = H*W per batch element) - // When requireExactNNLen is true, mask operations can be optimized (optimize_identity_mask) - hybridHandle.get().apply( - inputBuffers->userInputBuffer, - inputBuffers->userInputGlobalBuffer, - inputBuffers->userInputMetaBuffer, - inputBuffers->userInputMaskBuffer, // Dedicated mask buffer with correct stride - inputBuffers->policyResults, - inputBuffers->policyPassResults, - inputBuffers->valueResults, - inputBuffers->scoreValuesResults, - inputBuffers->ownershipResults, - batchSize); + // Dispatch to appropriate handle based on mode + if(gpuHandle->hybridHandle) { + // FP16 mode: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) + // Mask buffer has correct stride (singleMaskElts = H*W per batch element) + // When requireExactNNLen is true, mask operations can be optimized (optimize_identity_mask) + gpuHandle->hybridHandle.get().apply( + inputBuffers->userInputBuffer, + inputBuffers->userInputGlobalBuffer, + inputBuffers->userInputMetaBuffer, + inputBuffers->userInputMaskBuffer, // Dedicated mask buffer with correct stride + inputBuffers->policyResults, + inputBuffers->policyPassResults, + inputBuffers->valueResults, + inputBuffers->scoreValuesResults, + inputBuffers->ownershipResults, + batchSize); + } else if(gpuHandle->mpsGraphOnlyHandle) { + // FP32 mode: Use MPSGraph only (GPU-only) + // Note: MPSGraphModelHandle.apply() doesn't take maskInput - it extracts from spatial input + gpuHandle->mpsGraphOnlyHandle.get().apply( + inputBuffers->userInputBuffer, + inputBuffers->userInputGlobalBuffer, + inputBuffers->userInputMetaBuffer, + inputBuffers->policyResults, + inputBuffers->policyPassResults, + inputBuffers->valueResults, + inputBuffers->scoreValuesResults, + inputBuffers->ownershipResults, + batchSize); + } else { + throw runtime_error("Core ML backend: No valid compute handle available"); + } for(int row = 0; row < batchSize; row++) { CoreMLProcess::processRow(row, gpuHandle, inputBuffers, inputBufs, outputs); diff --git a/cpp/neuralnet/coremlbackend.h b/cpp/neuralnet/coremlbackend.h index 28fa3ba1f..301482440 100644 --- a/cpp/neuralnet/coremlbackend.h +++ b/cpp/neuralnet/coremlbackend.h @@ -172,6 +172,12 @@ struct ComputeHandle { */ swift::Optional hybridHandle; + /** + * @brief The MPSGraph-only handle instance from Swift (used for FP32 mode). + * This handle dispatches work only to GPU, avoiding slow FP32 CPU+ANE execution. + */ + swift::Optional mpsGraphOnlyHandle; + /** * @brief Construct a new ComputeHandle object. * @param context The ComputeContext object to use for computation. diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index fbcb8c785..c3f3e178a 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -861,7 +861,7 @@ public func createHybridComputeHandle( numOwnershipChannels: numOwnershipChannels, context: context ) else { - printError("Hybrid backend \(serverThreadIdx): Failed to create CoreML handle") + printError("Core ML backend \(serverThreadIdx): Failed to create CoreML handle") return nil } @@ -872,14 +872,36 @@ public func createHybridComputeHandle( nnYLen: context.nnYLen, optimizeIdentityMask: requireExactNNLen ) else { - printError("Hybrid backend \(serverThreadIdx): Failed to create MPSGraph handle") + printError("Core ML backend \(serverThreadIdx): Failed to create MPSGraph handle") return nil } - printError("Hybrid backend \(serverThreadIdx): Initialized CoreML (CPU+ANE) + MPSGraph (GPU)") + printError("Core ML backend \(serverThreadIdx): Initialized CoreML (CPU+ANE) + MPSGraph (GPU)") return HybridComputeHandle( coremlHandle: coremlHandle, mpsGraphHandle: mpsGraphHandle ) } + +/// Create a GPU-only compute handle using MPSGraph +/// Used when useFP16=false to avoid slow FP32 CoreML execution on CPU+ANE +public func createMPSGraphOnlyHandle( + modelDesc: SWModelDesc, + serverThreadIdx: Int, + requireExactNNLen: Bool, + context: CoreMLComputeContext +) -> MPSGraphModelHandle? { + guard let mpsGraphHandle = MPSGraphModelHandle( + modelDesc: modelDesc, + nnXLen: context.nnXLen, + nnYLen: context.nnYLen, + optimizeIdentityMask: requireExactNNLen + ) else { + printError("Core ML backend \(serverThreadIdx): Failed to create MPSGraph handle") + return nil + } + + printError("Core ML backend \(serverThreadIdx): Initialized MPSGraph GPU-only mode") + return mpsGraphHandle +} From 20ab59710f1488a989e65928b023ca3e9f7aae08 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Wed, 21 Jan 2026 14:47:49 +0800 Subject: [PATCH 16/34] Use MPSGraph-only mode when batch size is too small for hybrid split The hybrid execution mode splits batches between CoreML (CPU+ANE) and MPSGraph (GPU), requiring at least 2 samples (1 for each backend). When maxBatchSize < 2, fall back to MPSGraph-only which provides more stable latency and avoids CoreML dispatch overhead. This also enables explicit single-threaded GPU-only execution via nnMaxBatchSize=1, useful for debugging or deterministic behavior. Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.cpp | 45 ++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/cpp/neuralnet/coremlbackend.cpp b/cpp/neuralnet/coremlbackend.cpp index 300be229e..88208ad29 100644 --- a/cpp/neuralnet/coremlbackend.cpp +++ b/cpp/neuralnet/coremlbackend.cpp @@ -21,6 +21,13 @@ using namespace std; namespace gfs = ghc::filesystem; +// Minimum batch sizes for hybrid execution mode. +// Hybrid splits batches between CoreML (CPU+ANE) and MPSGraph (GPU). +// When batch is too small to split, prefer MPSGraph-only for stability: +// MPSGraph has more predictable latency and avoids CoreML dispatch overhead. +static constexpr int MIN_COREML_BATCH = 1; +static constexpr int MIN_MPSGRAPH_BATCH = 1; + namespace CoreMLConversion { // Get temp directory for model conversion @@ -487,7 +494,7 @@ static swift::Optional convertAndCreateHybrid ); } -// Helper function to create hybrid handle if FP16 mode, otherwise returns none +// Helper function to create hybrid handle if FP16 mode with sufficient batch size, otherwise returns none static swift::Optional createHybridHandleIfNeeded( ComputeContext* context, const LoadedModel* loadedModel, @@ -499,24 +506,44 @@ static swift::Optional createHybridHandleIfNe // FP32 mode - don't create hybrid handle return swift::Optional::none(); } - // FP16 mode: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) + + // Hybrid mode splits batches: CoreML takes max(1, ...), MPSGraph takes remainder + // Minimum samples for meaningful split = 1 (CoreML) + 1 (MPSGraph) = 2 + // If batch can't be split, prefer MPSGraph-only for stability + if(maxBatchSize < MIN_COREML_BATCH + MIN_MPSGRAPH_BATCH) { + return swift::Optional::none(); + } + + // FP16 mode with sufficient batch size: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) return convertAndCreateHybridHandle(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx); } -// Helper function to create MPSGraph-only handle if FP32 mode, otherwise returns none -// Used when useFP16=false to avoid slow FP32 CoreML execution on CPU+ANE +// Helper function to create MPSGraph-only handle when needed +// Used when: (1) useFP16=false to avoid slow FP32 CoreML, or (2) batch too small for hybrid split static swift::Optional createMPSGraphHandleIfNeeded( ComputeContext* context, const LoadedModel* loadedModel, bool requireExactNNLen, + int maxBatchSize, int serverThreadIdx ) { - if(context->useFP16Mode != enabled_t::False) { - // FP16 mode - don't create MPSGraph-only handle + // Use MPSGraph-only when: + // 1. FP32 mode (CoreML FP32 on CPU+ANE is slow), OR + // 2. Batch too small to split (hybrid requires minCoreML + minMPSGraph samples) + bool batchTooSmallForHybrid = maxBatchSize < MIN_COREML_BATCH + MIN_MPSGRAPH_BATCH; + + if(context->useFP16Mode != enabled_t::False && !batchTooSmallForHybrid) { + // FP16 mode with sufficient batch - hybrid handle will be created instead return swift::Optional::none(); } - // FP32 mode: Use MPSGraph only (GPU-only, skip slow FP32 CoreML) - cerr << "Core ML backend " << serverThreadIdx << ": FP32 mode - using MPSGraph GPU-only (skipping CoreML converter)" << endl; + + // Log reason for MPSGraph-only mode + if(batchTooSmallForHybrid) { + cerr << "Core ML backend " << serverThreadIdx << ": Batch size " << maxBatchSize + << " too small for hybrid split - using MPSGraph GPU-only" << endl; + } else { + cerr << "Core ML backend " << serverThreadIdx << ": FP32 mode - using MPSGraph GPU-only (skipping CoreML converter)" << endl; + } // Convert model descriptor to Swift format for MPSGraph path // Note: No CoreML conversion needed - MPSGraph reads weights directly @@ -540,7 +567,7 @@ ComputeHandle::ComputeHandle( bool requireExactNNLen, int maxBatchSize): hybridHandle(createHybridHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)), -mpsGraphOnlyHandle(createMPSGraphHandleIfNeeded(context, loadedModel, requireExactNNLen, serverThreadIdx)) { +mpsGraphOnlyHandle(createMPSGraphHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)) { assert(((hybridHandle && !mpsGraphOnlyHandle) || (!hybridHandle && mpsGraphOnlyHandle)) && "Exactly one of hybridHandle or mpsGraphOnlyHandle must be valid"); From dad0daca51f726a3113b1f2fbb3a9447dd10aa07 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Wed, 21 Jan 2026 23:35:58 +0800 Subject: [PATCH 17/34] Improve ThroughputTracker for selfplay workloads Optimize hybrid dispatch convergence with adaptive EMA parameters: - Lower alpha (0.15) and warm-start ratio (0.47) for faster convergence - Adaptive warmup phase with variance-based transition - Remove unnecessary NSLock (thread-safe by design via single-owner access) - Add diagnostic logging via KATAGO_HYBRID_DIAG environment variable The thread safety is ensured without locks because: 1. Each server thread owns its own ThroughputTracker instance 2. Concurrent queue access is to disjoint fields only 3. group.wait() provides sequential barrier before shared reads Co-Authored-By: Claude Opus 4.5 --- cpp/neuralnet/coremlbackend.swift | 173 ++++++++++++++++++++++++++---- 1 file changed, 150 insertions(+), 23 deletions(-) diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift index c3f3e178a..95200a614 100644 --- a/cpp/neuralnet/coremlbackend.swift +++ b/cpp/neuralnet/coremlbackend.swift @@ -401,46 +401,137 @@ public func printCoreMLDevices() { // MARK: - Throughput Tracker for Adaptive Batch Sizing -/// Tracks throughput for CoreML and MPSGraph paths to adaptively adjust batch split ratio +/// Tracks throughput for CoreML and MPSGraph paths to adaptively adjust batch split ratio. +/// +/// # Thread Safety +/// +/// This class is thread-safe by design without requiring explicit locks: +/// +/// 1. **Single-Owner Access**: Each server thread owns its own `ComputeHandle` → +/// `HybridComputeHandle` → `ThroughputTracker` instance. There is no sharing +/// of `ThroughputTracker` instances between server threads. +/// +/// 2. **Disjoint Field Access**: Within a single `HybridComputeHandle.apply()` call, +/// concurrent dispatch queues access disjoint fields: +/// - `coremlQueue.async` calls `updateCoreML()` → writes `coreMLSamplesPerSec`, `totalCoreMLSamples` +/// - `mpsGraphQueue.async` calls `updateMPSGraph()` → writes `mpsGraphSamplesPerSec`, `totalMPSGraphSamples` +/// +/// Both read `warmupComplete`, `stableAlpha`, and `warmupAlpha`, but these are either +/// `let` constants or only written sequentially after `group.wait()`. +/// +/// 3. **Sequential Barrier**: `group.wait()` in `apply()` ensures all concurrent throughput +/// updates complete before `recordBatch()`, `shouldLogAndMark()`, or `getDiagnosticStats()` +/// are called. These methods run sequentially on the calling thread. +/// +/// Because of these invariants, no locks are needed. Removing `NSLock` was intentional +/// as it was unnecessary overhead given the access patterns above. public class ThroughputTracker { - private var coreMLSamplesPerSec: Double = 1.0 + private var coreMLSamplesPerSec: Double = 0.9 // Warm-start: initial ratio ~0.47 (closer to optimal ~0.45) private var mpsGraphSamplesPerSec: Double = 1.0 - private let alpha: Double = 0.3 // EMA smoothing factor (higher = faster adaptation) - private let lock = NSLock() - - /// Update CoreML throughput measurement + private let alpha: Double = 0.15 // EMA smoothing factor (lower = more stable for noisy workloads) + + // Diagnostic fields + private var batchCount: Int = 0 + private var totalCoreMLSamples: Int = 0 + private var totalMPSGraphSamples: Int = 0 + private var ratioHistory: [Float] = [] + private let maxHistorySize = 100 // Keep last 100 ratios for analysis + private var lastLogBatchCount: Int = 0 + private let logInterval: Int = 50 // Log every N batches + + // Adaptive alpha parameters + private var warmupComplete: Bool = false + private let warmupAlpha: Double = 0.25 // Faster adaptation during warmup + private let stableAlpha: Double = 0.10 // Slower adaptation after convergence + private let warmupBatches: Int = 100 // Min batches before checking warmup transition + private let warmupVarianceThreshold: Double = 0.005 // Variance threshold for warmup completion + + /// Update CoreML throughput measurement with adaptive alpha public func updateCoreML(samples: Int, duration: TimeInterval) { guard duration > 0, samples > 0 else { return } let newRate = Double(samples) / duration - lock.lock() - coreMLSamplesPerSec = alpha * newRate + (1 - alpha) * coreMLSamplesPerSec - lock.unlock() + let effectiveAlpha = warmupComplete ? stableAlpha : warmupAlpha + coreMLSamplesPerSec = effectiveAlpha * newRate + (1 - effectiveAlpha) * coreMLSamplesPerSec + totalCoreMLSamples += samples } - /// Update MPSGraph throughput measurement + /// Update MPSGraph throughput measurement with adaptive alpha public func updateMPSGraph(samples: Int, duration: TimeInterval) { guard duration > 0, samples > 0 else { return } let newRate = Double(samples) / duration - lock.lock() - mpsGraphSamplesPerSec = alpha * newRate + (1 - alpha) * mpsGraphSamplesPerSec - lock.unlock() + let effectiveAlpha = warmupComplete ? stableAlpha : warmupAlpha + mpsGraphSamplesPerSec = effectiveAlpha * newRate + (1 - effectiveAlpha) * mpsGraphSamplesPerSec + totalMPSGraphSamples += samples } /// Get optimal CoreML ratio (0.0 to 1.0) based on measured throughput public func getOptimalCoreMLRatio() -> Float { - lock.lock() let total = coreMLSamplesPerSec + mpsGraphSamplesPerSec - let ratio = total > 0 ? Float(coreMLSamplesPerSec / total) : 0.5 - lock.unlock() - return ratio + return total > 0 ? Float(coreMLSamplesPerSec / total) : 0.5 } /// Get current throughput stats for logging public func getStats() -> (coreML: Double, mpsGraph: Double, ratio: Float) { - lock.lock() - let stats = (coreMLSamplesPerSec, mpsGraphSamplesPerSec, getOptimalCoreMLRatio()) - lock.unlock() - return stats + return (coreMLSamplesPerSec, mpsGraphSamplesPerSec, getOptimalCoreMLRatio()) + } + + /// Record a batch for diagnostics (call after each apply) + public func recordBatch(ratio: Float) { + batchCount += 1 + if ratioHistory.count >= maxHistorySize { + ratioHistory.removeFirst() + } + ratioHistory.append(ratio) + // Check warmup transition + if !warmupComplete && batchCount >= warmupBatches && computeRatioVariance() < Float(warmupVarianceThreshold) { + warmupComplete = true + } + } + + /// Check if logging should occur this batch, and if so, mark as logged + /// Returns true if logging should occur (atomically checks and marks) + public func shouldLogAndMark() -> Bool { + if batchCount - lastLogBatchCount >= logInterval { + lastLogBatchCount = batchCount + return true + } + return false + } + + /// Get diagnostic stats for logging + public func getDiagnosticStats() -> ( + batchCount: Int, + coreMLSamplesPerSec: Double, + mpsGraphSamplesPerSec: Double, + ratio: Float, + totalCoreMLSamples: Int, + totalMPSGraphSamples: Int, + ratioVariance: Float + ) { + return ( + batchCount, + coreMLSamplesPerSec, + mpsGraphSamplesPerSec, + getOptimalCoreMLRatio(), + totalCoreMLSamples, + totalMPSGraphSamples, + computeRatioVariance() + ) + } + + /// Compute variance of recent ratios + private func computeRatioVariance() -> Float { + guard ratioHistory.count >= 10 else { return 0.0 } + let recentRatios = Array(ratioHistory.suffix(20)) + let mean = recentRatios.reduce(0.0, +) / Float(recentRatios.count) + let variance = recentRatios.map { ($0 - mean) * ($0 - mean) }.reduce(0.0, +) / Float(recentRatios.count) + return variance + } + + /// Check if ratio has converged (variance < threshold) + public func hasConverged(threshold: Float = 0.001) -> Bool { + let variance = computeRatioVariance() + return ratioHistory.count >= 20 && variance < threshold } } @@ -714,6 +805,14 @@ public class MPSGraphModelHandle { // MARK: - Hybrid Compute Handle +/// Global flag to enable/disable diagnostic logging (set via environment variable) +private let diagnosticLoggingEnabled: Bool = { + if let envValue = ProcessInfo.processInfo.environment["KATAGO_HYBRID_DIAG"] { + return envValue.lowercased() == "1" || envValue.lowercased() == "true" + } + return false +}() + /// Hybrid compute handle that dispatches to both CoreML (CPU+ANE) and MPSGraph (GPU) public class HybridComputeHandle { let coremlHandle: CoreMLComputeHandle @@ -723,13 +822,16 @@ public class HybridComputeHandle { let mpsGraphQueue: DispatchQueue let nnXLen: Int32 let nnYLen: Int32 + let serverThreadIdx: Int public init( coremlHandle: CoreMLComputeHandle, - mpsGraphHandle: MPSGraphModelHandle + mpsGraphHandle: MPSGraphModelHandle, + serverThreadIdx: Int = 0 ) { self.coremlHandle = coremlHandle self.mpsGraphHandle = mpsGraphHandle + self.serverThreadIdx = serverThreadIdx self.throughputTracker = ThroughputTracker() self.coremlQueue = DispatchQueue(label: "com.katago.coreml", qos: .userInitiated) self.mpsGraphQueue = DispatchQueue(label: "com.katago.mpsgraph", qos: .userInitiated) @@ -828,6 +930,25 @@ public class HybridComputeHandle { // Wait for both paths to complete group.wait() + + // Record batch for diagnostics + throughputTracker.recordBatch(ratio: ratio) + + // Periodic diagnostic logging + if diagnosticLoggingEnabled && throughputTracker.shouldLogAndMark() { + let stats = throughputTracker.getDiagnosticStats() + let converged = throughputTracker.hasConverged() + print(String(format: "[HybridDiag T%d] batch=%d ratio=%.3f coreml=%.1f/s mps=%.1f/s total=%d/%d var=%.5f conv=%@", + serverThreadIdx, + stats.batchCount, + stats.ratio, + stats.coreMLSamplesPerSec, + stats.mpsGraphSamplesPerSec, + stats.totalCoreMLSamples, + stats.totalMPSGraphSamples, + stats.ratioVariance, + converged ? "yes" : "no")) + } } } @@ -878,9 +999,15 @@ public func createHybridComputeHandle( printError("Core ML backend \(serverThreadIdx): Initialized CoreML (CPU+ANE) + MPSGraph (GPU)") + // Log if diagnostic mode is enabled + if diagnosticLoggingEnabled { + printError("Core ML backend \(serverThreadIdx): Diagnostic logging enabled (KATAGO_HYBRID_DIAG=1)") + } + return HybridComputeHandle( coremlHandle: coremlHandle, - mpsGraphHandle: mpsGraphHandle + mpsGraphHandle: mpsGraphHandle, + serverThreadIdx: serverThreadIdx ) } From 46eb2c5ae7b48c4e4f1b812ef546efb4ed37467d Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Sat, 24 Jan 2026 08:27:10 +0800 Subject: [PATCH 18/34] Unify CoreML and Metal into single Metal backend Consolidate the previously separate CoreML and MPSGraph backends into a unified Metal backend. The hybrid architecture (CPU+GPU+ANE) is preserved but exposed as a single -DUSE_BACKEND=METAL build option. Key changes: - Remove standalone CoreML backend files (coremlbackend.{cpp,h,swift}) - Merge CoreML logic into metalbackend.{cpp,h,swift} - Rename mpsgraphlayers.swift to metallayers.swift - Rename CoreMLProcess namespace to MetalProcess - Update CMakeLists.txt: remove COREML backend, keep METAL only - Update CI workflow to use METAL backend - Prefer MPSGraph over CoreML for batch size 1 (better latency) - Add autoreleasepool to CoreML dispatch path - Remove unused alpha constant from ThroughputTracker - Fix minor comment and log prefix inconsistencies Co-Authored-By: Claude Opus 4.5 --- .github/workflows/build.yml | 10 +- Compiling.md | 23 +- cpp/CMakeLists.txt | 52 +- cpp/main.cpp | 4 - cpp/neuralnet/coremlbackend.cpp | 1120 ----- cpp/neuralnet/coremlbackend.h | 259 -- cpp/neuralnet/coremlbackend.swift | 1034 ----- cpp/neuralnet/metalbackend.cpp | 1304 +++--- cpp/neuralnet/metalbackend.h | 201 +- cpp/neuralnet/metalbackend.swift | 3782 ++++------------- ...mpsgraphlayers.swift => metallayers.swift} | 549 +++ cpp/program/setup.cpp | 3 - 12 files changed, 2152 insertions(+), 6189 deletions(-) delete mode 100644 cpp/neuralnet/coremlbackend.cpp delete mode 100644 cpp/neuralnet/coremlbackend.h delete mode 100644 cpp/neuralnet/coremlbackend.swift rename cpp/neuralnet/{mpsgraphlayers.swift => metallayers.swift} (81%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fa9fa4daf..97184fc43 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -97,7 +97,7 @@ jobs: name: katago-macos-opencl path: cpp/katago - build-macos-coreml: + build-macos-metal: runs-on: macos-latest permissions: contents: read @@ -121,14 +121,14 @@ jobs: cpp/build.ninja cpp/.ninja_deps cpp/.ninja_log - key: ${{ runner.os }}-cmake-coreml-${{ hashFiles('**/CMakeLists.txt') }} + key: ${{ runner.os }}-cmake-metal-${{ hashFiles('**/CMakeLists.txt') }} restore-keys: | - ${{ runner.os }}-cmake-coreml- + ${{ runner.os }}-cmake-metal- - name: Configure CMake working-directory: cpp run: | - cmake . -G Ninja -DUSE_BACKEND=COREML -DCMAKE_BUILD_TYPE=Release + cmake . -G Ninja -DUSE_BACKEND=METAL -DCMAKE_BUILD_TYPE=Release - name: Build working-directory: cpp @@ -144,7 +144,7 @@ jobs: if: github.event_name == 'push' && github.ref == 'refs/heads/master' uses: actions/upload-artifact@v4 with: - name: katago-macos-coreml + name: katago-macos-metal path: cpp/katago build-windows: diff --git a/Compiling.md b/Compiling.md index 80cd7df36..acc1b3cbd 100644 --- a/Compiling.md +++ b/Compiling.md @@ -118,16 +118,7 @@ As also mentioned in the instructions below but repeated here for visibility, if * If using OpenCL, you will want to verify that KataGo is picking up the correct device (e.g. some systems may have both an Intel CPU OpenCL and GPU OpenCL, if KataGo appears to pick the wrong one, you can correct this by specifying `openclGpuToUse` in `configs/gtp_example.cfg`). ## MacOS - * TLDR (Metal backend - recommended for most users): - ``` - git clone https://github.com/lightvector/KataGo.git - cd KataGo/cpp - # If you get missing library errors, install the appropriate packages using your system package manager and try again. - # -DBUILD_DISTRIBUTED=1 is only needed if you want to contribute back to public training. - cmake -G Ninja -DUSE_BACKEND=METAL -DBUILD_DISTRIBUTED=1 - ninja - ``` - * TLDR (CoreML backend - hybrid CPU+GPU+Neural Engine for maximum throughput): + * TLDR (Metal backend - recommended for most users, hybrid CPU+GPU+Neural Engine for maximum throughput): ``` # First, install the katagocoreml library via Homebrew brew tap chinchangyang/katagocoreml-cpp @@ -135,15 +126,17 @@ As also mentioned in the instructions below but repeated here for visibility, if git clone https://github.com/lightvector/KataGo.git cd KataGo/cpp - cmake -G Ninja -DUSE_BACKEND=COREML -DBUILD_DISTRIBUTED=1 + # If you get missing library errors, install the appropriate packages using your system package manager and try again. + # -DBUILD_DISTRIBUTED=1 is only needed if you want to contribute back to public training. + cmake -G Ninja -DUSE_BACKEND=METAL -DBUILD_DISTRIBUTED=1 ninja ``` * Requirements * [Homebrew](https://brew.sh): `/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"` * CMake with a minimum version of 3.18.2: `brew install cmake`. * AppleClang and Swift compilers: `xcode-select --install`. - * If using the Metal or CoreML backend, [Ninja](https://ninja-build.org): `brew install ninja` - * If using the CoreML backend, katagocoreml library: `brew tap chinchangyang/katagocoreml-cpp && brew install katagocoreml` + * If using the Metal backend, [Ninja](https://ninja-build.org): `brew install ninja` + * If using the Metal backend, katagocoreml library: `brew tap chinchangyang/katagocoreml-cpp && brew install katagocoreml` * libzip: `brew install libzip`. * If you want to do self-play training and research, probably Google perftools `brew install gperftools` for TCMalloc or some other better malloc implementation. For unknown reasons, the allocation pattern in self-play with large numbers of threads and parallel games causes a lot of memory fragmentation under glibc malloc that will eventually run your machine out of memory, but better mallocs handle it fine. * If compiling to contribute to public distributed training runs, OpenSSL is required (`brew install openssl`). @@ -151,14 +144,14 @@ As also mentioned in the instructions below but repeated here for visibility, if * `git clone https://github.com/lightvector/KataGo.git` * Compile using CMake and make in the cpp directory: * `cd KataGo/cpp` - * `cmake . -G Ninja -DUSE_BACKEND=METAL` or `cmake . -G Ninja -DUSE_BACKEND=COREML` or `cmake . -DUSE_BACKEND=OPENCL` or `cmake . -DUSE_BACKEND=EIGEN` depending on which backend you want. + * `cmake . -G Ninja -DUSE_BACKEND=METAL` or `cmake . -DUSE_BACKEND=OPENCL` or `cmake . -DUSE_BACKEND=EIGEN` depending on which backend you want. * Specify also `-DUSE_TCMALLOC=1` if using TCMalloc. * Compiling will also call git commands to embed the git hash into the compiled executable, specify also `-DNO_GIT_REVISION=1` to disable it if this is causing issues for you. * Specify `-DUSE_AVX2=1` to also compile Eigen with AVX2 and FMA support, which will make it incompatible with old CPUs but much faster. Intel-based Macs with new processors support AVX2, but Apple Silicon Macs do not support AVX2 natively. (If you want to go further, you can also add `-DCMAKE_CXX_FLAGS='-march=native'` which will specialize to precisely your machine's CPU, but the exe might not run on other machines at all). * Specify `-DBUILD_DISTRIBUTED=1` to compile with support for contributing data to public distributed training runs. * If building distributed, you will also need to build with Git revision support, including building within a clone of the repo, as opposed to merely an unzipped copy of its source. * Only builds from specific tagged versions or branches can contribute, in particular, instead of the `master` branch, use either the latest [release](https://github.com/lightvector/KataGo/releases) tag or the tip of the `stable` branch. To minimize the chance of any data incompatibilities or bugs, please do NOT attempt to contribute with custom changes or circumvent these limitations. - * `ninja` for Metal or CoreML backend, or `make` for other backends. + * `ninja` for Metal backend, or `make` for other backends. * Done! You should now have a compiled `katago` executable in your working directory. * Pre-trained neural nets are available at [the main training website](https://katagotraining.org/). * You will probably want to edit `configs/gtp_example.cfg` (see "Tuning for Performance" above). diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index ab2b3dc69..4578732ae 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -32,7 +32,7 @@ endif() set(BUILD_DISTRIBUTED 0 CACHE BOOL "Build with http support for contributing to distributed training") set(USE_BACKEND CACHE STRING "Neural net backend") string(TOUPPER "${USE_BACKEND}" USE_BACKEND) -set_property(CACHE USE_BACKEND PROPERTY STRINGS "" CUDA TENSORRT OPENCL EIGEN METAL COREML) +set_property(CACHE USE_BACKEND PROPERTY STRINGS "" CUDA TENSORRT OPENCL EIGEN METAL) set(USE_TCMALLOC 0 CACHE BOOL "Use TCMalloc") set(NO_GIT_REVISION 0 CACHE BOOL "Disable embedding the git revision into the compiled exe") @@ -97,7 +97,7 @@ elseif(USE_BACKEND STREQUAL "TENSORRT") message(FATAL_ERROR "Combining USE_CACHE_TENSORRT_PLAN with BUILD_DISTRIBUTED is not supported - it would consume excessive disk space and might worsen performance every time models are updated. Use only one at a time in a given build of KataGo.") endif() elseif(USE_BACKEND STREQUAL "METAL") - message(STATUS "-DUSE_BACKEND=METAL, using Metal backend.") + message(STATUS "-DUSE_BACKEND=METAL, using Metal backend with hybrid MPSGraph + CoreML execution.") if(NOT "${CMAKE_GENERATOR}" STREQUAL "Ninja") message(FATAL_ERROR "Bidirectional C++ Interop requires Ninja generator. Have ${CMAKE_GENERATOR}") endif() @@ -107,6 +107,8 @@ elseif(USE_BACKEND STREQUAL "METAL") if(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") message(FATAL_ERROR "Project requires building with AppleClang. Have ${CMAKE_CXX_COMPILER_ID}") endif() + find_package(PkgConfig REQUIRED) + pkg_check_modules(KATAGOCOREML REQUIRED katagocoreml) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/external/macos/cmake/modules") include(InitializeSwift) include(AddSwift) @@ -115,7 +117,8 @@ elseif(USE_BACKEND STREQUAL "METAL") neuralnet/metalbackend.cpp ) add_library(KataGoSwift STATIC - neuralnet/metalbackend.swift) + neuralnet/metalbackend.swift + neuralnet/metallayers.swift) _swift_generate_cxx_header( KataGoSwift "${CMAKE_CURRENT_BINARY_DIR}/include/KataGoSwift/KataGoSwift-swift.h") @@ -123,37 +126,6 @@ elseif(USE_BACKEND STREQUAL "METAL") set_target_properties(KataGoSwift PROPERTIES Swift_MODULE_NAME "KataGoSwift") target_compile_options(KataGoSwift PUBLIC "$<$:-cxx-interoperability-mode=default>") -elseif(USE_BACKEND STREQUAL "COREML") - message(STATUS "-DUSE_BACKEND=COREML, using Core ML backend.") - if(NOT "${CMAKE_GENERATOR}" STREQUAL "Ninja") - message(FATAL_ERROR "Bidirectional C++ Interop requires Ninja generator. Have ${CMAKE_GENERATOR}") - endif() - if("${CMAKE_Swift_COMPILER_VERSION}" VERSION_LESS 5.9) - message(FATAL_ERROR "Bidirectional C++ Interop requires Swift 5.9 or greater. Have ${CMAKE_Swift_COMPILER_VERSION}") - endif() - if(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") - message(FATAL_ERROR "Project requires building with AppleClang. Have ${CMAKE_CXX_COMPILER_ID}") - endif() - # Find katagocoreml library for native Core ML model conversion using pkg-config - find_package(PkgConfig REQUIRED) - pkg_check_modules(KATAGOCOREML REQUIRED katagocoreml) - list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/external/macos/cmake/modules") - include(InitializeSwift) - include(AddSwift) - set(CMAKE_OSX_DEPLOYMENT_TARGET 13.0) - set(NEURALNET_BACKEND_SOURCES - neuralnet/coremlbackend.cpp - ) - add_library(KataGoCoreML STATIC - neuralnet/coremlbackend.swift - neuralnet/mpsgraphlayers.swift) - _swift_generate_cxx_header( - KataGoCoreML - "${CMAKE_CURRENT_BINARY_DIR}/include/KataGoCoreML/KataGoCoreML-swift.h") - target_include_directories(KataGoCoreML PUBLIC "${CMAKE_CURRENT_BINARY_DIR}/include") - set_target_properties(KataGoCoreML PROPERTIES Swift_MODULE_NAME "KataGoCoreML") - target_compile_options(KataGoCoreML PUBLIC - "$<$:-cxx-interoperability-mode=default>") elseif(USE_BACKEND STREQUAL "OPENCL") message(STATUS "-DUSE_BACKEND=OPENCL, using OpenCL backend.") set(NEURALNET_BACKEND_SOURCES @@ -429,22 +401,14 @@ elseif(USE_BACKEND STREQUAL "TENSORRT") target_link_libraries(katago CUDA::cudart_static ${TENSORRT_LIBRARY}) elseif(USE_BACKEND STREQUAL "METAL") target_compile_definitions(katago PRIVATE USE_METAL_BACKEND) - target_link_libraries(katago KataGoSwift) - if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64") - message(WARNING "You are currently running cmake on an Intel-based processor. It is known that running KataGo in this configuration may encounter performance issues. It is recommended to switch to a cmake version designed for ARM64 architecture for optimal performance.") - endif() -elseif(USE_BACKEND STREQUAL "COREML") - target_compile_definitions(katago PRIVATE USE_COREML_BACKEND) target_include_directories(katago PRIVATE ${KATAGOCOREML_INCLUDE_DIRS}) - # Link katagocoreml with full path and add all required library directories find_library(KATAGOCOREML_LIB katagocoreml HINTS /usr/local/lib REQUIRED) target_link_directories(katago PRIVATE ${KATAGOCOREML_LIBRARY_DIRS}) - # Link MetalPerformanceShadersGraph for hybrid MPSGraph + CoreML backend - target_link_libraries(katago KataGoCoreML ${KATAGOCOREML_LIB} ${KATAGOCOREML_LDFLAGS} + target_link_libraries(katago KataGoSwift ${KATAGOCOREML_LIB} ${KATAGOCOREML_LDFLAGS} "-framework MetalPerformanceShaders" "-framework MetalPerformanceShadersGraph") if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64") - message(WARNING "You are currently running cmake on an Intel-based processor. Core ML backend may not work optimally. ARM64 architecture is recommended.") + message(WARNING "Metal backend may not work optimally on Intel. ARM64 architecture is recommended.") endif() elseif(USE_BACKEND STREQUAL "OPENCL") target_compile_definitions(katago PRIVATE USE_OPENCL_BACKEND) diff --git a/cpp/main.cpp b/cpp/main.cpp index c15703cd8..0fcc36dea 100644 --- a/cpp/main.cpp +++ b/cpp/main.cpp @@ -244,8 +244,6 @@ string Version::getKataGoVersionFullInfo() { out << "Using TensorRT backend" << endl; #elif defined(USE_METAL_BACKEND) out << "Using Metal backend" << endl; -#elif defined(USE_COREML_BACKEND) - out << "Using Core ML backend" << endl; #elif defined(USE_OPENCL_BACKEND) out << "Using OpenCL backend" << endl; #elif defined(USE_EIGEN_BACKEND) @@ -282,8 +280,6 @@ string Version::getGitRevisionWithBackend() { s += "-trt"; #elif defined(USE_METAL_BACKEND) s += "-metal"; -#elif defined(USE_COREML_BACKEND) - s += "-coreml"; #elif defined(USE_OPENCL_BACKEND) s += "-opencl"; #elif defined(USE_EIGEN_BACKEND) diff --git a/cpp/neuralnet/coremlbackend.cpp b/cpp/neuralnet/coremlbackend.cpp deleted file mode 100644 index 88208ad29..000000000 --- a/cpp/neuralnet/coremlbackend.cpp +++ /dev/null @@ -1,1120 +0,0 @@ -#ifdef USE_COREML_BACKEND - -#include "../neuralnet/modelversion.h" -#include "../neuralnet/nneval.h" -#include "../neuralnet/nninputs.h" -#include "../neuralnet/nninterface.h" -#include "../neuralnet/coremlbackend.h" - -#include -#include -#include -#include -#include -#include // For getpid() - -using namespace std; - -//------------------------------------------------------------------------------ -// CoreML Model Conversion - Native C++ using katagocoreml library -//------------------------------------------------------------------------------ - -namespace gfs = ghc::filesystem; - -// Minimum batch sizes for hybrid execution mode. -// Hybrid splits batches between CoreML (CPU+ANE) and MPSGraph (GPU). -// When batch is too small to split, prefer MPSGraph-only for stability: -// MPSGraph has more predictable latency and avoids CoreML dispatch overhead. -static constexpr int MIN_COREML_BATCH = 1; -static constexpr int MIN_MPSGRAPH_BATCH = 1; - -namespace CoreMLConversion { - -// Get temp directory for model conversion -static string getTempDirectory() { - gfs::path tempDir = gfs::temp_directory_path() / "katago_coreml"; - std::error_code ec; - gfs::create_directories(tempDir, ec); - if(ec) { - throw runtime_error("Failed to create temp directory: " + ec.message()); - } - return tempDir.string(); -} - -// Generate unique temporary path for model conversion -static string generateTempPath(int serverThreadIdx) { - auto now = chrono::steady_clock::now().time_since_epoch().count(); - return getTempDirectory() + "/model_" + to_string(getpid()) + "_" + - to_string(serverThreadIdx) + "_" + to_string(now) + ".mlpackage"; -} - -// CoreML model metadata constants -static const string COREML_MODEL_AUTHOR = "KataGo"; -static const string COREML_MODEL_LICENSE = "See original model file for license terms"; - -// Convert KataGo model to CoreML in temp directory, returns path to .mlpackage -// The caller (Swift side) is responsible for deleting the temp file after loading -static string convertModelToTemp( - const string& modelPath, - int boardX, - int boardY, - bool useFP16, - bool optimizeMask, - int maxBatchSize, - int serverThreadIdx -) { - // maxBatchSize is validated upstream: cfg.getInt("nnMaxBatchSize", 1, 65536) in setup.cpp - // and NNEvaluator constructor throws if maxBatchSize <= 0. Assert for defensive documentation. - assert(maxBatchSize >= 1); - - string tempPath = generateTempPath(serverThreadIdx); - cerr << "Core ML backend " << serverThreadIdx << ": Converting model to " << tempPath << endl; - - katagocoreml::ConversionOptions opts; - opts.board_x_size = boardX; - opts.board_y_size = boardY; - opts.compute_precision = useFP16 ? "FLOAT16" : "FLOAT32"; - opts.optimize_identity_mask = optimizeMask; - opts.min_batch_size = 1; - opts.max_batch_size = maxBatchSize; - opts.author = COREML_MODEL_AUTHOR; - opts.license = COREML_MODEL_LICENSE; - - try { - katagocoreml::KataGoConverter::convert(modelPath, tempPath, opts); - } catch(const exception& e) { - // Clean up partial conversion on failure - std::error_code ec; - gfs::remove_all(tempPath, ec); - if(ec) { - cerr << "Core ML backend " << serverThreadIdx << ": Warning: Failed to clean up partial conversion at " << tempPath << ": " << ec.message() << endl; - } - throw runtime_error(string("Core ML model conversion failed: ") + e.what()); - } - - cerr << "Core ML backend " << serverThreadIdx << ": Conversion completed" << endl; - return tempPath; -} - -} // namespace CoreMLConversion - -//------------------------------------------------------------------------------ -// Model Descriptor Conversion - C++ to Swift types for MPSGraph -//------------------------------------------------------------------------------ - -namespace CoreMLProcess { - -/// Converts a ConvLayerDesc instance from C++ to Swift -SWConvLayerDesc convLayerDescToSwift(const ConvLayerDesc* desc) { - return createSWConvLayerDesc( - desc->convYSize, - desc->convXSize, - desc->inChannels, - desc->outChannels, - desc->dilationY, - desc->dilationX, - (float*)desc->weights.data()); -} - -/// Converts a BatchNormLayerDesc instance from C++ to Swift -SWBatchNormLayerDesc batchNormLayerDescToSwift(const BatchNormLayerDesc* desc) { - return createSWBatchNormLayerDesc( - desc->numChannels, - (float*)desc->mergedScale.data(), - (float*)desc->mergedBias.data()); -} - -/// Convert an activation layer description from C++ to Swift -ActivationKind activationLayerDescToSwift(const ActivationLayerDesc* desc) { - switch(desc->activation) { - case ACTIVATION_RELU: - return ActivationKind::relu(); - case ACTIVATION_MISH: - return ActivationKind::mish(); - case ACTIVATION_MISH_SCALE8: - return ActivationKind::identity(); // Metal/CoreML does not use scaled mish - case ACTIVATION_IDENTITY: - return ActivationKind::identity(); - default: - return ActivationKind::identity(); - } -} - -/// Convert a matrix multiplication layer description from C++ to Swift -SWMatMulLayerDesc matMulLayerDescToSwift(const MatMulLayerDesc* desc) { - return createSWMatMulLayerDesc( - desc->inChannels, - desc->outChannels, - (float*)desc->weights.data()); -} - -/// Convert a matrix bias layer description from C++ to Swift -SWMatBiasLayerDesc matBiasLayerDescToSwift(const MatBiasLayerDesc* desc) { - return createSWMatBiasLayerDesc(desc->numChannels, (float*)desc->weights.data()); -} - -/// Convert a residual block description from C++ to Swift -SWResidualBlockDesc residualBlockDescToSwift(const ResidualBlockDesc* desc) { - SWBatchNormLayerDesc preBN = batchNormLayerDescToSwift(&desc->preBN); - ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); - SWConvLayerDesc regularConv = convLayerDescToSwift(&desc->regularConv); - SWBatchNormLayerDesc midBN = batchNormLayerDescToSwift(&desc->midBN); - ActivationKind midActivationKind = activationLayerDescToSwift(&desc->midActivation); - SWConvLayerDesc finalConv = convLayerDescToSwift(&desc->finalConv); - - return createSWResidualBlockDesc( - preBN, - preActivationKind, - regularConv, - midBN, - midActivationKind, - finalConv); -} - -/// Convert a global pooling residual block description from C++ to Swift -SWGlobalPoolingResidualBlockDesc globalPoolingResidualBlockDescToSwift(const GlobalPoolingResidualBlockDesc* desc) { - SWBatchNormLayerDesc preBN = batchNormLayerDescToSwift(&desc->preBN); - ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); - SWConvLayerDesc regularConv = convLayerDescToSwift(&desc->regularConv); - SWConvLayerDesc gpoolConv = convLayerDescToSwift(&desc->gpoolConv); - SWBatchNormLayerDesc gpoolBN = batchNormLayerDescToSwift(&desc->gpoolBN); - ActivationKind gpoolActivationKind = activationLayerDescToSwift(&desc->gpoolActivation); - SWMatMulLayerDesc gpoolToBiasMul = matMulLayerDescToSwift(&desc->gpoolToBiasMul); - SWBatchNormLayerDesc midBN = batchNormLayerDescToSwift(&desc->midBN); - ActivationKind midActivationKind = activationLayerDescToSwift(&desc->midActivation); - SWConvLayerDesc finalConv = convLayerDescToSwift(&desc->finalConv); - - return createSWGlobalPoolingResidualBlockDesc( - preBN, - preActivationKind, - regularConv, - gpoolConv, - gpoolBN, - gpoolActivationKind, - gpoolToBiasMul, - midBN, - midActivationKind, - finalConv); -} - -// Forward declaration for mutual recursion -swift::Array residualBlocksToSwift(const vector>& blocks); - -/// Convert a nested bottleneck residual block description from C++ to Swift -SWNestedBottleneckResidualBlockDesc nestedBottleneckResidualBlockDescToSwift(const NestedBottleneckResidualBlockDesc* desc) { - SWBatchNormLayerDesc preBN = batchNormLayerDescToSwift(&desc->preBN); - ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); - SWConvLayerDesc preConv = convLayerDescToSwift(&desc->preConv); - auto swBlocks = residualBlocksToSwift(desc->blocks); - SWBatchNormLayerDesc postBN = batchNormLayerDescToSwift(&desc->postBN); - ActivationKind postActivationKind = activationLayerDescToSwift(&desc->postActivation); - SWConvLayerDesc postConv = convLayerDescToSwift(&desc->postConv); - - return createSWNestedBottleneckResidualBlockDesc( - preBN, - preActivationKind, - preConv, - swBlocks, - postBN, - postActivationKind, - postConv); -} - -/// Convert residual blocks from C++ to Swift -swift::Array residualBlocksToSwift(const vector>& blocks) { - auto builder = createBlockDescriptorBuilder(); - - for(size_t i = 0; i < blocks.size(); i++) { - void* blockDesc = blocks[i].second.get(); - - if(blocks[i].first == GLOBAL_POOLING_BLOCK_KIND) { - BlockDescriptor descriptor = globalPoolingResidualBlockDescToSwift((GlobalPoolingResidualBlockDesc*)blockDesc); - builder.enque(descriptor); - } else if(blocks[i].first == NESTED_BOTTLENECK_BLOCK_KIND) { - BlockDescriptor descriptor = nestedBottleneckResidualBlockDescToSwift((NestedBottleneckResidualBlockDesc*)blockDesc); - builder.enque(descriptor); - } else { - BlockDescriptor descriptor = residualBlockDescToSwift((ResidualBlockDesc*)blockDesc); - builder.enque(descriptor); - } - } - - return builder.getBlockDescriptors(); -} - -/// Convert a SGF metadata encoder description from C++ to Swift -swift::Optional sGFMetadataEncoderDescToSwift(const SGFMetadataEncoderDesc* desc) { - SWMatMulLayerDesc mul1 = matMulLayerDescToSwift(&desc->mul1); - SWMatBiasLayerDesc bias1 = matBiasLayerDescToSwift(&desc->bias1); - ActivationKind act1 = activationLayerDescToSwift(&desc->act1); - SWMatMulLayerDesc mul2 = matMulLayerDescToSwift(&desc->mul2); - SWMatBiasLayerDesc bias2 = matBiasLayerDescToSwift(&desc->bias2); - ActivationKind act2 = activationLayerDescToSwift(&desc->act2); - SWMatMulLayerDesc mul3 = matMulLayerDescToSwift(&desc->mul3); - - return createSWSGFMetadataEncoderDesc( - desc->metaEncoderVersion, - desc->numInputMetaChannels, - mul1, - bias1, - act1, - mul2, - bias2, - act2, - mul3); -} - -/// Convert a trunk description from C++ to Swift -SWTrunkDesc trunkDescToSwift(const TrunkDesc* trunk) { - SWConvLayerDesc initialConv = convLayerDescToSwift(&trunk->initialConv); - SWMatMulLayerDesc initialMatMul = matMulLayerDescToSwift(&trunk->initialMatMul); - auto sgfMetadataEncoder = sGFMetadataEncoderDescToSwift(&trunk->sgfMetadataEncoder); - auto swBlocks = residualBlocksToSwift(trunk->blocks); - SWBatchNormLayerDesc trunkTipBN = batchNormLayerDescToSwift(&trunk->trunkTipBN); - ActivationKind trunkTipActivation = activationLayerDescToSwift(&trunk->trunkTipActivation); - - return createSWTrunkDesc( - trunk->modelVersion, - trunk->trunkNumChannels, - trunk->midNumChannels, - trunk->regularNumChannels, - trunk->gpoolNumChannels, - initialConv, - initialMatMul, - sgfMetadataEncoder, - swBlocks, - trunkTipBN, - trunkTipActivation); -} - -/// Convert a policy head description from C++ to Swift -SWPolicyHeadDesc policyHeadDescToSwift(const PolicyHeadDesc* policyHead) { - SWConvLayerDesc p1Conv = convLayerDescToSwift(&policyHead->p1Conv); - SWConvLayerDesc g1Conv = convLayerDescToSwift(&policyHead->g1Conv); - SWBatchNormLayerDesc g1BN = batchNormLayerDescToSwift(&policyHead->g1BN); - ActivationKind g1Activation = activationLayerDescToSwift(&policyHead->g1Activation); - SWMatMulLayerDesc gpoolToBiasMul = matMulLayerDescToSwift(&policyHead->gpoolToBiasMul); - SWBatchNormLayerDesc p1BN = batchNormLayerDescToSwift(&policyHead->p1BN); - ActivationKind p1Activation = activationLayerDescToSwift(&policyHead->p1Activation); - SWConvLayerDesc p2Conv = convLayerDescToSwift(&policyHead->p2Conv); - SWMatMulLayerDesc gpoolToPassMul = matMulLayerDescToSwift(&policyHead->gpoolToPassMul); - SWMatBiasLayerDesc gpoolToPassBias = matBiasLayerDescToSwift(&policyHead->gpoolToPassBias); - ActivationKind passActivation = activationLayerDescToSwift(&policyHead->passActivation); - SWMatMulLayerDesc gpoolToPassMul2 = matMulLayerDescToSwift(&policyHead->gpoolToPassMul2); - - return createSWPolicyHeadDesc( - policyHead->modelVersion, - p1Conv, - g1Conv, - g1BN, - g1Activation, - gpoolToBiasMul, - p1BN, - p1Activation, - p2Conv, - gpoolToPassMul, - gpoolToPassBias, - passActivation, - gpoolToPassMul2); -} - -/// Convert a value head description from C++ to Swift -SWValueHeadDesc valueHeadDescToSwift(const ValueHeadDesc* valueHead) { - SWConvLayerDesc v1Conv = convLayerDescToSwift(&valueHead->v1Conv); - SWBatchNormLayerDesc v1BN = batchNormLayerDescToSwift(&valueHead->v1BN); - ActivationKind v1Activation = activationLayerDescToSwift(&valueHead->v1Activation); - SWMatMulLayerDesc v2Mul = matMulLayerDescToSwift(&valueHead->v2Mul); - SWMatBiasLayerDesc v2Bias = matBiasLayerDescToSwift(&valueHead->v2Bias); - ActivationKind v2Activation = activationLayerDescToSwift(&valueHead->v2Activation); - SWMatMulLayerDesc v3Mul = matMulLayerDescToSwift(&valueHead->v3Mul); - SWMatBiasLayerDesc v3Bias = matBiasLayerDescToSwift(&valueHead->v3Bias); - SWMatMulLayerDesc sv3Mul = matMulLayerDescToSwift(&valueHead->sv3Mul); - SWMatBiasLayerDesc sv3Bias = matBiasLayerDescToSwift(&valueHead->sv3Bias); - SWConvLayerDesc vOwnershipConv = convLayerDescToSwift(&valueHead->vOwnershipConv); - - return createSWValueHeadDesc( - valueHead->modelVersion, - v1Conv, - v1BN, - v1Activation, - v2Mul, - v2Bias, - v2Activation, - v3Mul, - v3Bias, - sv3Mul, - sv3Bias, - vOwnershipConv); -} - -/// Convert a model description from C++ to Swift -SWModelDesc modelDescToSwift(const ModelDesc* modelDesc) { - return createSWModelDesc( - modelDesc->modelVersion, - swift::String(modelDesc->name), - modelDesc->numInputChannels, - modelDesc->numInputGlobalChannels, - modelDesc->numInputMetaChannels, - modelDesc->numValueChannels, - modelDesc->numScoreValueChannels, - modelDesc->numOwnershipChannels, - modelDesc->numPolicyChannels, - trunkDescToSwift(&modelDesc->trunk), - policyHeadDescToSwift(&modelDesc->policyHead), - valueHeadDescToSwift(&modelDesc->valueHead)); -} - -} // namespace CoreMLProcess - -//------------------------------------------------------------------------------ -// LoadedModel implementation -//------------------------------------------------------------------------------ - -LoadedModel::LoadedModel(const string& fileName, const string& expectedSha256) { - modelPath = fileName; - ModelDesc::loadFromFileMaybeGZipped(fileName, modelDesc, expectedSha256); -} - -//------------------------------------------------------------------------------ -// NeuralNet namespace - Global functions -//------------------------------------------------------------------------------ - -void NeuralNet::globalInitialize() { - // No global initialization needed for Core ML -} - -void NeuralNet::globalCleanup() { - // No cleanup needed - temp files are deleted immediately after loading -} - -LoadedModel* NeuralNet::loadModelFile(const string& file, const string& expectedSha256) { - LoadedModel* loadedModel = new LoadedModel(file, expectedSha256); - return loadedModel; -} - -void NeuralNet::freeLoadedModel(LoadedModel* loadedModel) { - delete loadedModel; -} - -const ModelDesc& NeuralNet::getModelDesc(const LoadedModel* loadedModel) { - return loadedModel->modelDesc; -} - -//------------------------------------------------------------------------------ -// ComputeContext implementation -//------------------------------------------------------------------------------ - -ComputeContext::ComputeContext(int nnX, int nnY, enabled_t useFP16Mode, enabled_t useNHWCMode): -coremlContext(createCoreMLComputeContext(nnX, nnY, useFP16Mode != enabled_t::False)) { - this->useFP16Mode = useFP16Mode; - this->nnXLen = nnX; - this->nnYLen = nnY; - (void)useNHWCMode; -} - -ComputeContext::~ComputeContext() { -} - -ComputeContext* NeuralNet::createComputeContext( - const vector& gpuIdxs, - Logger* logger, - int nnXLen, - int nnYLen, - const string& openCLTunerFile, - const string& homeDataDirOverride, - bool openCLReTunePerBoardSize, - enabled_t useFP16Mode, - enabled_t useNHWCMode, - const LoadedModel* loadedModel) { - - (void)gpuIdxs; - (void)logger; - (void)openCLTunerFile; - (void)homeDataDirOverride; - (void)openCLReTunePerBoardSize; - (void)loadedModel; - - return new ComputeContext(nnXLen, nnYLen, useFP16Mode, useNHWCMode); -} - -void NeuralNet::freeComputeContext(ComputeContext* computeContext) { - delete computeContext; -} - -//------------------------------------------------------------------------------ -// ComputeHandle implementation -//------------------------------------------------------------------------------ - -static mutex computeHandleMutex; - -// Helper function to convert model and create hybrid compute handle -// This is needed because Swift Optional doesn't support assignment in C++ -static swift::Optional convertAndCreateHybridHandle( - ComputeContext* context, - const LoadedModel* loadedModel, - bool requireExactNNLen, - int maxBatchSize, - int serverThreadIdx -) { - auto coremlContext = context->coremlContext; - int nnXLen = coremlContext.getNnXLen(); - int nnYLen = coremlContext.getNnYLen(); - bool useFP16 = (context->useFP16Mode != enabled_t::False); - bool optimizeMask = requireExactNNLen; - - // Convert model to CoreML format in temp directory - // The Swift side will delete the temp file after loading - string coremlModelPath = CoreMLConversion::convertModelToTemp( - loadedModel->modelPath, - nnXLen, - nnYLen, - useFP16, - optimizeMask, - maxBatchSize, - serverThreadIdx - ); - - // Convert model descriptor to Swift format for MPSGraph path - SWModelDesc swModelDesc = CoreMLProcess::modelDescToSwift(&loadedModel->modelDesc); - - // Create hybrid compute handle (CoreML on CPU+ANE, MPSGraph on GPU) - return createHybridComputeHandle( - swift::String(coremlModelPath), - swModelDesc, - serverThreadIdx, - requireExactNNLen, - loadedModel->modelDesc.numInputChannels, - loadedModel->modelDesc.numInputGlobalChannels, - loadedModel->modelDesc.numInputMetaChannels, - loadedModel->modelDesc.numPolicyChannels, - loadedModel->modelDesc.numValueChannels, - loadedModel->modelDesc.numScoreValueChannels, - loadedModel->modelDesc.numOwnershipChannels, - coremlContext - ); -} - -// Helper function to create hybrid handle if FP16 mode with sufficient batch size, otherwise returns none -static swift::Optional createHybridHandleIfNeeded( - ComputeContext* context, - const LoadedModel* loadedModel, - bool requireExactNNLen, - int maxBatchSize, - int serverThreadIdx -) { - if(context->useFP16Mode == enabled_t::False) { - // FP32 mode - don't create hybrid handle - return swift::Optional::none(); - } - - // Hybrid mode splits batches: CoreML takes max(1, ...), MPSGraph takes remainder - // Minimum samples for meaningful split = 1 (CoreML) + 1 (MPSGraph) = 2 - // If batch can't be split, prefer MPSGraph-only for stability - if(maxBatchSize < MIN_COREML_BATCH + MIN_MPSGRAPH_BATCH) { - return swift::Optional::none(); - } - - // FP16 mode with sufficient batch size: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) - return convertAndCreateHybridHandle(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx); -} - -// Helper function to create MPSGraph-only handle when needed -// Used when: (1) useFP16=false to avoid slow FP32 CoreML, or (2) batch too small for hybrid split -static swift::Optional createMPSGraphHandleIfNeeded( - ComputeContext* context, - const LoadedModel* loadedModel, - bool requireExactNNLen, - int maxBatchSize, - int serverThreadIdx -) { - // Use MPSGraph-only when: - // 1. FP32 mode (CoreML FP32 on CPU+ANE is slow), OR - // 2. Batch too small to split (hybrid requires minCoreML + minMPSGraph samples) - bool batchTooSmallForHybrid = maxBatchSize < MIN_COREML_BATCH + MIN_MPSGRAPH_BATCH; - - if(context->useFP16Mode != enabled_t::False && !batchTooSmallForHybrid) { - // FP16 mode with sufficient batch - hybrid handle will be created instead - return swift::Optional::none(); - } - - // Log reason for MPSGraph-only mode - if(batchTooSmallForHybrid) { - cerr << "Core ML backend " << serverThreadIdx << ": Batch size " << maxBatchSize - << " too small for hybrid split - using MPSGraph GPU-only" << endl; - } else { - cerr << "Core ML backend " << serverThreadIdx << ": FP32 mode - using MPSGraph GPU-only (skipping CoreML converter)" << endl; - } - - // Convert model descriptor to Swift format for MPSGraph path - // Note: No CoreML conversion needed - MPSGraph reads weights directly - SWModelDesc swModelDesc = CoreMLProcess::modelDescToSwift(&loadedModel->modelDesc); - - // Create MPSGraph-only handle (GPU only) - return createMPSGraphOnlyHandle( - swModelDesc, - serverThreadIdx, - requireExactNNLen, - context->coremlContext - ); -} - -ComputeHandle::ComputeHandle( - ComputeContext* context, - const LoadedModel* loadedModel, - bool inputsUseNHWC, - int gpuIdx, - int serverThreadIdx, - bool requireExactNNLen, - int maxBatchSize): -hybridHandle(createHybridHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)), -mpsGraphOnlyHandle(createMPSGraphHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)) { - assert(((hybridHandle && !mpsGraphOnlyHandle) || (!hybridHandle && mpsGraphOnlyHandle)) && - "Exactly one of hybridHandle or mpsGraphOnlyHandle must be valid"); - - const ModelDesc* modelDesc = &loadedModel->modelDesc; - auto coremlContext = context->coremlContext; - - nnXLen = coremlContext.getNnXLen(); - nnYLen = coremlContext.getNnYLen(); - gpuIndex = gpuIdx; - version = modelDesc->modelVersion; - metaEncoderVersion = modelDesc->metaEncoderVersion; - this->inputsUseNHWC = inputsUseNHWC; - this->requireExactNNLen = requireExactNNLen; - useFP16 = (context->useFP16Mode != enabled_t::False); -} - -ComputeHandle::~ComputeHandle() { -} - -ComputeHandle* NeuralNet::createComputeHandle( - ComputeContext* context, - const LoadedModel* loadedModel, - Logger* logger, - int maxBatchSize, - bool requireExactNNLen, - bool inputsUseNHWC, - int gpuIdxForThisThread, - int serverThreadIdx) { - - (void)logger; - - int gpuIdx = (gpuIdxForThisThread == -1) ? 0 : gpuIdxForThisThread; - ComputeHandle* handle = nullptr; - - { - lock_guard lock(computeHandleMutex); - handle = new ComputeHandle(context, loadedModel, inputsUseNHWC, gpuIdx, serverThreadIdx, requireExactNNLen, maxBatchSize); - } - - return handle; -} - -void NeuralNet::freeComputeHandle(ComputeHandle* handle) { - delete handle; -} - -bool NeuralNet::isUsingFP16(const ComputeHandle* handle) { - return handle->useFP16; -} - -//------------------------------------------------------------------------------ -// Device information -//------------------------------------------------------------------------------ - -void NeuralNet::printDevices() { - printCoreMLDevices(); -} - -//------------------------------------------------------------------------------ -// InputBuffers implementation -//------------------------------------------------------------------------------ - -InputBuffers::InputBuffers(const LoadedModel* loadedModel, int maxBatchSz, int nnXLen, int nnYLen) { - const ModelDesc& m = loadedModel->modelDesc; - - maxBatchSize = maxBatchSz; - policyResultChannels = m.policyHead.p2Conv.outChannels; - - assert(((m.modelVersion < 16) || (policyResultChannels == 4)) && - ((m.modelVersion >= 16) || (m.modelVersion < 12) || (policyResultChannels == 2)) && - ((m.modelVersion >= 12) || (policyResultChannels == 1))); - - singleSpatialElts = (size_t)m.numInputChannels * nnXLen * nnYLen; - singleInputElts = (size_t)m.numInputChannels * nnXLen * nnYLen; - singleInputGlobalElts = (size_t)m.numInputGlobalChannels; - singleInputMetaElts = (size_t)m.numInputMetaChannels; - singlePolicyResultElts = (size_t)(nnXLen * nnYLen); - singlePolicyPassResultElts = 1; - singlePolicyProbsElts = (size_t)((nnXLen * nnYLen) + 1); - singleValueResultElts = (size_t)m.numValueChannels; - singleOwnershipResultElts = (size_t)m.numOwnershipChannels * nnXLen * nnYLen; - singleOwnerMapElts = (size_t)m.numOwnershipChannels * nnXLen * nnYLen; - singleScoreValuesResultElts = (size_t)m.numScoreValueChannels; - singleMaskElts = (size_t)nnXLen * nnYLen; - - assert(NNModelVersion::getNumSpatialFeatures(m.modelVersion) == m.numInputChannels); - assert(NNModelVersion::getNumGlobalFeatures(m.modelVersion) == m.numInputGlobalChannels); - assert(singleValueResultElts == 3); - - rowSpatialBufferElts = (size_t)maxBatchSz * singleSpatialElts; - userInputBufferElts = (size_t)maxBatchSize * singleInputElts; - userInputGlobalBufferElts = (size_t)maxBatchSize * singleInputGlobalElts; - userInputMetaBufferElts = (size_t)maxBatchSize * singleInputMetaElts; - policyResultBufferElts = (size_t)maxBatchSize * singlePolicyResultElts * policyResultChannels; - policyPassResultBufferElts = (size_t)maxBatchSize * singlePolicyPassResultElts * policyResultChannels; - policyProbsBufferElts = (size_t)maxBatchSize * singlePolicyProbsElts * policyResultChannels; - valueResultBufferElts = (size_t)maxBatchSize * singleValueResultElts; - ownershipResultBufferElts = (size_t)maxBatchSize * singleOwnershipResultElts; - ownerMapBufferElts = (size_t)maxBatchSz * singleOwnerMapElts; - scoreValuesResultBufferElts = (size_t)maxBatchSize * singleScoreValuesResultElts; - userInputMaskBufferElts = (size_t)maxBatchSize * singleMaskElts; - - rowSpatialBuffer = new float[rowSpatialBufferElts]; - userInputBuffer = new float[userInputBufferElts]; - memset(&userInputBuffer[0], 0, userInputBufferElts * sizeof(userInputBuffer[0])); - - userInputGlobalBuffer = new float[userInputGlobalBufferElts]; - userInputMetaBuffer = new float[userInputMetaBufferElts]; - policyResults = new float[policyResultBufferElts]; - policyPassResults = new float[policyPassResultBufferElts]; - policyProbsBuffer = new float[policyProbsBufferElts]; - valueResults = new float[valueResultBufferElts]; - ownershipResults = new float[ownershipResultBufferElts]; - ownerMapBuffer = new float[ownerMapBufferElts]; - scoreValuesResults = new float[scoreValuesResultBufferElts]; - userInputMaskBuffer = new float[userInputMaskBufferElts]; - memset(&userInputMaskBuffer[0], 0, userInputMaskBufferElts * sizeof(userInputMaskBuffer[0])); -} - -InputBuffers::~InputBuffers() { - delete[] rowSpatialBuffer; - delete[] userInputBuffer; - delete[] userInputGlobalBuffer; - delete[] userInputMetaBuffer; - delete[] policyResults; - delete[] policyPassResults; - delete[] policyProbsBuffer; - delete[] valueResults; - delete[] ownershipResults; - delete[] ownerMapBuffer; - delete[] scoreValuesResults; - delete[] userInputMaskBuffer; -} - -InputBuffers* NeuralNet::createInputBuffers(const LoadedModel* loadedModel, int maxBatchSize, int nnXLen, int nnYLen) { - return new InputBuffers(loadedModel, maxBatchSize, nnXLen, nnYLen); -} - -void NeuralNet::freeInputBuffers(InputBuffers* inputBuffers) { - delete inputBuffers; -} - -//------------------------------------------------------------------------------ -// CoreMLProcess namespace - Helper functions -//------------------------------------------------------------------------------ - -void CoreMLProcess::copyRowData(float* dest, const float* src, size_t numElements) { - copy(src, src + numElements, dest); -} - -void CoreMLProcess::convertNCHW( - float* rowSpatialInput, - const int C, - const int H, - const int W, - const bool inputsUseNHWC) { - - if ((!inputsUseNHWC) || (C <= 0) || (H <= 0) || (W <= 0)) { - return; - } - - const int totalSize = H * W * C; - - if (totalSize <= 1) - return; - - const int HW = H * W; - - auto get_nchw_target_index = [C, W, HW](int nhwc_index) -> int { - int c = nhwc_index % C; - int temp = nhwc_index / C; - int x = temp % W; - int y = temp / W; - return (c * HW) + (y * W) + x; - }; - - std::vector processed(totalSize, false); - - for (int i = 0; i < totalSize; ++i) { - if (processed[i]) - continue; - - int target_i = get_nchw_target_index(i); - - if (target_i == i) { - processed[i] = true; - continue; - } - - int current_idx = i; - float value_in_hand = rowSpatialInput[i]; - - while (true) { - int target_idx = get_nchw_target_index(current_idx); - float value_at_target = rowSpatialInput[target_idx]; - rowSpatialInput[target_idx] = value_in_hand; - processed[target_idx] = true; - value_in_hand = value_at_target; - current_idx = target_idx; - - if (current_idx == i) - break; - } - } -} - -void CoreMLProcess::processRowData(size_t row, ComputeHandle* gpuHandle, InputBuffers* inputBuffers, NNResultBuf** inputBufs) { - int nnXLen = gpuHandle->nnXLen; - int nnYLen = gpuHandle->nnYLen; - int numSpatialFeatures = NNModelVersion::getNumSpatialFeatures(gpuHandle->version); - - float* rowSpatialInput = &inputBuffers->userInputBuffer[inputBuffers->singleSpatialElts * row]; - float* rowGlobalInput = &inputBuffers->userInputGlobalBuffer[inputBuffers->singleInputGlobalElts * row]; - float* rowMetaInput = &inputBuffers->userInputMetaBuffer[inputBuffers->singleInputMetaElts * row]; - const float* rowGlobal = inputBufs[row]->rowGlobalBuf.data(); - const float* rowSpatial = inputBufs[row]->rowSpatialBuf.data(); - const float* rowMeta = inputBufs[row]->rowMetaBuf.data(); - - CoreMLProcess::copyRowData(rowGlobalInput, rowGlobal, inputBuffers->singleInputGlobalElts); - CoreMLProcess::copyRowData(rowMetaInput, rowMeta, inputBuffers->singleInputMetaElts); - - SymmetryHelpers::copyInputsWithSymmetry( - rowSpatial, - rowSpatialInput, - 1, - nnYLen, - nnXLen, - numSpatialFeatures, - gpuHandle->inputsUseNHWC, - inputBufs[row]->symmetry); - - CoreMLProcess::convertNCHW( - rowSpatialInput, - numSpatialFeatures, - nnYLen, - nnXLen, - gpuHandle->inputsUseNHWC); - - // Copy first channel of spatial input (mask) to dedicated mask buffer - // After NCHW conversion, the first nnXLen*nnYLen elements are the mask channel - float* rowMaskInput = &inputBuffers->userInputMaskBuffer[inputBuffers->singleMaskElts * row]; - copy(rowSpatialInput, rowSpatialInput + inputBuffers->singleMaskElts, rowMaskInput); -} - -float CoreMLProcess::policyOptimismCalc(const double policyOptimism, const float p, const float pOpt) { - return p + ((pOpt - p) * policyOptimism); -} - -void CoreMLProcess::processOptimism( - InputBuffers* inputBuffers, - NNOutput* currentOutput, - const double policyOptimism, - size_t row) { - auto& buffers = *inputBuffers; - const auto singlePolicyResultElts = buffers.singlePolicyResultElts; - float* targetBuffer = &buffers.policyProbsBuffer[row * singlePolicyResultElts]; - float* policyOutputBuf = &buffers.policyResults[row * singlePolicyResultElts * buffers.policyResultChannels]; - - for(size_t i = 0; i < singlePolicyResultElts; ++i) { - const float p = policyOutputBuf[i]; - const float pOpt = policyOutputBuf[i + singlePolicyResultElts]; - targetBuffer[i] = CoreMLProcess::policyOptimismCalc(policyOptimism, p, pOpt); - } - - const auto p = buffers.policyPassResults[row * buffers.policyResultChannels]; - const auto pOpt = buffers.policyPassResults[row * buffers.policyResultChannels + 1]; - currentOutput->policyProbs[buffers.singlePolicyProbsElts - 1] = CoreMLProcess::policyOptimismCalc(policyOptimism, p, pOpt); -} - -void CoreMLProcess::processPolicy( - InputBuffers* inputBuffers, - NNOutput* currentOutput, - const ComputeHandle* gpuHandle, - NNResultBuf* inputBuf, - size_t row) { - auto& buffers = *inputBuffers; - float* targetBuffer = &buffers.policyResults[row * buffers.singlePolicyResultElts * buffers.policyResultChannels]; - const auto policyOptimism = inputBuf->policyOptimism; - - if(buffers.policyResultChannels == 1) { - currentOutput->policyProbs[buffers.singlePolicyProbsElts - 1] = - buffers.policyPassResults[row * buffers.policyResultChannels]; - } else { - CoreMLProcess::processOptimism(inputBuffers, currentOutput, policyOptimism, row); - targetBuffer = &buffers.policyProbsBuffer[row * buffers.singlePolicyResultElts]; - } - - SymmetryHelpers::copyOutputsWithSymmetry( - targetBuffer, currentOutput->policyProbs, 1, gpuHandle->nnYLen, gpuHandle->nnXLen, inputBuf->symmetry); -} - -void CoreMLProcess::processValue( - const InputBuffers* inputBuffers, - NNOutput* currentOutput, - const size_t row) { - const size_t singleValueResultElts = inputBuffers->singleValueResultElts; - assert(singleValueResultElts == 3); - const float* valueOutputBuf = &inputBuffers->valueResults[row * singleValueResultElts]; - currentOutput->whiteWinProb = valueOutputBuf[0]; - currentOutput->whiteLossProb = valueOutputBuf[1]; - currentOutput->whiteNoResultProb = valueOutputBuf[2]; -} - -void CoreMLProcess::processOwnership( - const InputBuffers* inputBuffers, - NNOutput* currentOutput, - const ComputeHandle* gpuHandle, - const int symmetry, - const size_t row) { - const int nnXLen = gpuHandle->nnXLen; - const int nnYLen = gpuHandle->nnYLen; - const size_t singleOwnershipResultElts = inputBuffers->singleOwnershipResultElts; - const size_t ownershipOutputBufOffset = row * singleOwnershipResultElts; - - if(currentOutput->whiteOwnerMap != nullptr) { - const float* ownershipOutputBuf = &inputBuffers->ownershipResults[ownershipOutputBufOffset]; - SymmetryHelpers::copyOutputsWithSymmetry( - ownershipOutputBuf, currentOutput->whiteOwnerMap, 1, nnYLen, nnXLen, symmetry); - } -} - -void CoreMLProcess::processScoreValues( - const InputBuffers* inputBuffers, - NNOutput* currentOutput, - const int modelVersion, - const size_t row) { - const size_t offset = row * inputBuffers->singleScoreValuesResultElts; - const float* currentScoreValueData = &inputBuffers->scoreValuesResults[offset]; - - if(modelVersion >= 9) { - size_t numScoreValueChannels = inputBuffers->singleScoreValuesResultElts; - assert(numScoreValueChannels == 6); - currentOutput->whiteScoreMean = currentScoreValueData[0]; - currentOutput->whiteScoreMeanSq = currentScoreValueData[1]; - currentOutput->whiteLead = currentScoreValueData[2]; - currentOutput->varTimeLeft = currentScoreValueData[3]; - currentOutput->shorttermWinlossError = currentScoreValueData[4]; - currentOutput->shorttermScoreError = currentScoreValueData[5]; - } - else if(modelVersion >= 8) { - size_t numScoreValueChannels = inputBuffers->singleScoreValuesResultElts; - assert(numScoreValueChannels == 4); - currentOutput->whiteScoreMean = currentScoreValueData[0]; - currentOutput->whiteScoreMeanSq = currentScoreValueData[1]; - currentOutput->whiteLead = currentScoreValueData[2]; - currentOutput->varTimeLeft = currentScoreValueData[3]; - currentOutput->shorttermWinlossError = 0; - currentOutput->shorttermScoreError = 0; - } - else if(modelVersion >= 4) { - size_t numScoreValueChannels = inputBuffers->singleScoreValuesResultElts; - assert(numScoreValueChannels == 2); - currentOutput->whiteScoreMean = currentScoreValueData[0]; - currentOutput->whiteScoreMeanSq = currentScoreValueData[1]; - currentOutput->whiteLead = currentOutput->whiteScoreMean; - currentOutput->varTimeLeft = 0; - currentOutput->shorttermWinlossError = 0; - currentOutput->shorttermScoreError = 0; - } - else { - assert(modelVersion >= 3); - size_t numScoreValueChannels = inputBuffers->singleScoreValuesResultElts; - assert(numScoreValueChannels == 1); - currentOutput->whiteScoreMean = currentScoreValueData[0]; - currentOutput->whiteScoreMeanSq = currentOutput->whiteScoreMean * currentOutput->whiteScoreMean; - currentOutput->whiteLead = currentOutput->whiteScoreMean; - currentOutput->varTimeLeft = 0; - currentOutput->shorttermWinlossError = 0; - currentOutput->shorttermScoreError = 0; - } -} - -void CoreMLProcess::processRow( - size_t row, - const ComputeHandle* gpuHandle, - InputBuffers* inputBuffers, - NNResultBuf** inputBufs, - vector& outputs) { - NNOutput* currentOutput = outputs[row]; - assert(currentOutput->nnXLen == gpuHandle->nnXLen); - assert(currentOutput->nnYLen == gpuHandle->nnYLen); - CoreMLProcess::processPolicy(inputBuffers, currentOutput, gpuHandle, inputBufs[row], row); - CoreMLProcess::processValue(inputBuffers, currentOutput, row); - CoreMLProcess::processOwnership(inputBuffers, currentOutput, gpuHandle, inputBufs[row]->symmetry, row); - CoreMLProcess::processScoreValues(inputBuffers, currentOutput, gpuHandle->version, row); -} - -void CoreMLProcess::getCoreMLOutput( - ComputeHandle* gpuHandle, - InputBuffers* inputBuffers, - int numBatchEltsFilled, - NNResultBuf** inputBufs, - vector& outputs) { - assert(numBatchEltsFilled > 0); - - int batchSize = numBatchEltsFilled; - - assert(batchSize <= inputBuffers->maxBatchSize); - assert((NNModelVersion::getNumSpatialFeatures(gpuHandle->version) * gpuHandle->nnXLen * gpuHandle->nnYLen) <= (int)inputBuffers->singleInputElts); - assert(NNModelVersion::getNumGlobalFeatures(gpuHandle->version) == (int)inputBuffers->singleInputGlobalElts); - - if(gpuHandle->metaEncoderVersion > 0) { - assert(SGFMetadata::METADATA_INPUT_NUM_CHANNELS == (int)inputBuffers->singleInputMetaElts); - } - - assert(inputBuffers->singleValueResultElts == 3); - - for(int row = 0; row < batchSize; row++) { - CoreMLProcess::processRowData(row, gpuHandle, inputBuffers, inputBufs); - } - - // Dispatch to appropriate handle based on mode - if(gpuHandle->hybridHandle) { - // FP16 mode: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) - // Mask buffer has correct stride (singleMaskElts = H*W per batch element) - // When requireExactNNLen is true, mask operations can be optimized (optimize_identity_mask) - gpuHandle->hybridHandle.get().apply( - inputBuffers->userInputBuffer, - inputBuffers->userInputGlobalBuffer, - inputBuffers->userInputMetaBuffer, - inputBuffers->userInputMaskBuffer, // Dedicated mask buffer with correct stride - inputBuffers->policyResults, - inputBuffers->policyPassResults, - inputBuffers->valueResults, - inputBuffers->scoreValuesResults, - inputBuffers->ownershipResults, - batchSize); - } else if(gpuHandle->mpsGraphOnlyHandle) { - // FP32 mode: Use MPSGraph only (GPU-only) - // Note: MPSGraphModelHandle.apply() doesn't take maskInput - it extracts from spatial input - gpuHandle->mpsGraphOnlyHandle.get().apply( - inputBuffers->userInputBuffer, - inputBuffers->userInputGlobalBuffer, - inputBuffers->userInputMetaBuffer, - inputBuffers->policyResults, - inputBuffers->policyPassResults, - inputBuffers->valueResults, - inputBuffers->scoreValuesResults, - inputBuffers->ownershipResults, - batchSize); - } else { - throw runtime_error("Core ML backend: No valid compute handle available"); - } - - for(int row = 0; row < batchSize; row++) { - CoreMLProcess::processRow(row, gpuHandle, inputBuffers, inputBufs, outputs); - } -} - -void NeuralNet::getOutput( - ComputeHandle* gpuHandle, - InputBuffers* inputBuffers, - int numBatchEltsFilled, - NNResultBuf** inputBufs, - vector& outputs) { - - CoreMLProcess::getCoreMLOutput(gpuHandle, inputBuffers, numBatchEltsFilled, inputBufs, outputs); -} - -//------------------------------------------------------------------------------ -// Test functions - not supported for Core ML backend -//------------------------------------------------------------------------------ - -bool NeuralNet::testEvaluateConv( - const ConvLayerDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - bool useFP16, - bool useNHWC, - const vector& inputBuffer, - vector& outputBuffer) { - (void)desc; - (void)batchSize; - (void)nnXLen; - (void)nnYLen; - (void)useFP16; - (void)useNHWC; - (void)inputBuffer; - (void)outputBuffer; - return false; -} - -bool NeuralNet::testEvaluateBatchNorm( - const BatchNormLayerDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - bool useFP16, - bool useNHWC, - const vector& inputBuffer, - const vector& maskBuffer, - vector& outputBuffer) { - (void)desc; - (void)batchSize; - (void)nnXLen; - (void)nnYLen; - (void)useFP16; - (void)useNHWC; - (void)inputBuffer; - (void)maskBuffer; - (void)outputBuffer; - return false; -} - -bool NeuralNet::testEvaluateResidualBlock( - const ResidualBlockDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - bool useFP16, - bool useNHWC, - const vector& inputBuffer, - const vector& maskBuffer, - vector& outputBuffer) { - (void)desc; - (void)batchSize; - (void)nnXLen; - (void)nnYLen; - (void)useFP16; - (void)useNHWC; - (void)inputBuffer; - (void)maskBuffer; - (void)outputBuffer; - return false; -} - -bool NeuralNet::testEvaluateGlobalPoolingResidualBlock( - const GlobalPoolingResidualBlockDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - bool useFP16, - bool useNHWC, - const vector& inputBuffer, - const vector& maskBuffer, - vector& outputBuffer) { - (void)desc; - (void)batchSize; - (void)nnXLen; - (void)nnYLen; - (void)useFP16; - (void)useNHWC; - (void)inputBuffer; - (void)maskBuffer; - (void)outputBuffer; - return false; -} - -#endif // USE_COREML_BACKEND diff --git a/cpp/neuralnet/coremlbackend.h b/cpp/neuralnet/coremlbackend.h deleted file mode 100644 index 301482440..000000000 --- a/cpp/neuralnet/coremlbackend.h +++ /dev/null @@ -1,259 +0,0 @@ -#pragma once - -#ifdef USE_COREML_BACKEND - -#include -#include "desc.h" -#include "../core/commontypes.h" -#include "../neuralnet/modelversion.h" -#include "../neuralnet/nneval.h" -#include "../neuralnet/nninputs.h" -#include "../neuralnet/nninterface.h" -#include - -using namespace std; -using namespace KataGoCoreML; - -namespace CoreMLProcess { - -void copyRowData(float* dest, const float* src, size_t numElements); -void convertNCHW(float* rowSpatialInput, int C, int H, int W, bool inputsUseNHWC); -void processRowData(size_t row, ComputeHandle* gpuHandle, InputBuffers* inputBuffers, NNResultBuf** inputBufs); -float policyOptimismCalc(const double policyOptimism, const float p, const float pOpt); -void processOptimism(InputBuffers* inputBuffers, NNOutput* currentOutput, const double policyOptimism, size_t row); - -void processPolicy(InputBuffers* inputBuffers, - NNOutput* currentOutput, - const ComputeHandle* gpuHandle, - NNResultBuf* inputBuf, - size_t row); - -void processValue(const InputBuffers* inputBuffers, NNOutput* currentOutput, const size_t row); - -void processOwnership(const InputBuffers* inputBuffers, - NNOutput* currentOutput, - const ComputeHandle* gpuHandle, - const int symmetry, - const size_t row); - -void -processScoreValues(const InputBuffers* inputBuffers, NNOutput* currentOutput, const int modelVersion, const size_t row); - -void processRow(size_t row, - const ComputeHandle* gpuHandle, - InputBuffers* inputBuffers, - NNResultBuf** inputBufs, - vector& outputs); - -void getCoreMLOutput(ComputeHandle* gpuHandle, - InputBuffers* inputBuffers, - int numBatchEltsFilled, - NNResultBuf** inputBufs, - vector& outputs); -}; - -/** - * @brief Represents a loaded neural network model. - * A LoadedModel object contains a ModelDesc object that describes the characteristics of the loaded model. - * For Core ML backend, we also store the model path for on-demand conversion. - */ -struct LoadedModel { - /** - * @brief The description of the loaded model. - */ - ModelDesc modelDesc; - - /** - * @brief Path to the original .bin.gz model file for conversion. - */ - string modelPath; - - /** - * @brief Construct a new Loaded Model object - * This constructor loads a machine learning model from a file and sets the modelDesc field. - * @param fileName The name of the file containing the machine learning model. - * @param expectedSha256 The expected SHA-256 hash of the model file. - */ - LoadedModel(const string& fileName, const string& expectedSha256); - - LoadedModel() = delete; - LoadedModel(const LoadedModel&) = delete; - LoadedModel& operator=(const LoadedModel&) = delete; -}; - -/** - * @brief Context for computing neural network operations using Core ML. - * Contains global configuration settings for neural network computations. - */ -struct ComputeContext { - /** - * @brief Whether to use FP16 mode for computations. - */ - enabled_t useFP16Mode; - - /** - * @brief The width of the neural network input. - */ - int nnXLen; - - /** - * @brief The height of the neural network input. - */ - int nnYLen; - - /** - * @brief Core ML compute context instance from Swift. - */ - CoreMLComputeContext coremlContext; - - /** - * @brief Constructs a ComputeContext object. - * @param nnX The width of the input tensor. - * @param nnY The height of the input tensor. - * @param useFP16Mode Whether to use half-precision floating-point (FP16) mode. - * @param useNHWCMode Whether to use the NHWC format for input tensors. - */ - ComputeContext(int nnX, int nnY, enabled_t useFP16Mode, enabled_t useNHWCMode); - - ~ComputeContext(); - ComputeContext() = delete; - ComputeContext(const ComputeContext&) = delete; - ComputeContext& operator=(const ComputeContext&) = delete; -}; - -/** - * @brief A handle for performing neural network computations using Core ML. - * This struct represents a per-thread handle for computing neural network operations. - */ -struct ComputeHandle { - /** - * @brief The x length of the neural network. - */ - int nnXLen; - - /** - * @brief The y length of the neural network. - */ - int nnYLen; - - /** - * @brief The index of the GPU to use for computation. - */ - int gpuIndex; - - /** - * @brief The version of the loaded model. - */ - int version; - - /** - * @brief The version of the metadata encoder. - */ - int metaEncoderVersion; - - /** - * @brief Whether the input data uses NHWC format. - */ - bool inputsUseNHWC; - - /** - * @brief Whether to use 16-bit floating-point precision. - */ - bool useFP16; - - /** - * @brief Whether exact neural net length is required (enables mask optimization). - */ - bool requireExactNNLen; - - /** - * @brief The hybrid compute handle instance from Swift. - * This handle dispatches work to both CoreML (CPU+ANE) and MPSGraph (GPU). - */ - swift::Optional hybridHandle; - - /** - * @brief The MPSGraph-only handle instance from Swift (used for FP32 mode). - * This handle dispatches work only to GPU, avoiding slow FP32 CPU+ANE execution. - */ - swift::Optional mpsGraphOnlyHandle; - - /** - * @brief Construct a new ComputeHandle object. - * @param context The ComputeContext object to use for computation. - * @param loadedModel A pointer to the LoadedModel object. - * @param inputsUseNHWC Whether the input data uses NHWC format. - * @param gpuIdx The index of the GPU to use. - * @param serverThreadIdx The index of the server thread. - * @param requireExactNNLen Whether exact NN length is required. - * @param maxBatchSize Maximum batch size for dynamic batch support. - */ - ComputeHandle( - ComputeContext* context, - const LoadedModel* loadedModel, - bool inputsUseNHWC, - int gpuIdx, - int serverThreadIdx, - bool requireExactNNLen, - int maxBatchSize); - - ~ComputeHandle(); - ComputeHandle() = delete; - ComputeHandle(const ComputeHandle&) = delete; - ComputeHandle& operator=(const ComputeHandle&) = delete; -}; - -/** - * @brief Input and output buffers for neural network inference. - */ -struct InputBuffers { - int maxBatchSize; - size_t policyResultChannels; - - size_t singleSpatialElts; - size_t singleInputElts; - size_t singleInputGlobalElts; - size_t singleInputMetaElts; - size_t singlePolicyResultElts; - size_t singlePolicyPassResultElts; - size_t singlePolicyProbsElts; - size_t singleValueResultElts; - size_t singleOwnershipResultElts; - size_t singleOwnerMapElts; - size_t singleScoreValuesResultElts; - size_t singleMaskElts; - - size_t rowSpatialBufferElts; - size_t userInputBufferElts; - size_t userInputGlobalBufferElts; - size_t userInputMetaBufferElts; - size_t policyResultBufferElts; - size_t policyPassResultBufferElts; - size_t policyProbsBufferElts; - size_t valueResultBufferElts; - size_t ownershipResultBufferElts; - size_t ownerMapBufferElts; - size_t scoreValuesResultBufferElts; - size_t userInputMaskBufferElts; - - float* rowSpatialBuffer; - float* userInputBuffer; - float* userInputGlobalBuffer; - float* userInputMetaBuffer; - float* policyResults; - float* policyPassResults; - float* policyProbsBuffer; - float* valueResults; - float* ownershipResults; - float* ownerMapBuffer; - float* scoreValuesResults; - float* userInputMaskBuffer; - - InputBuffers(const LoadedModel* loadedModel, int maxBatchSz, int nnXLen, int nnYLen); - ~InputBuffers(); - InputBuffers() = delete; - InputBuffers(const InputBuffers&) = delete; - InputBuffers& operator=(const InputBuffers&) = delete; -}; - -#endif // USE_COREML_BACKEND diff --git a/cpp/neuralnet/coremlbackend.swift b/cpp/neuralnet/coremlbackend.swift deleted file mode 100644 index 95200a614..000000000 --- a/cpp/neuralnet/coremlbackend.swift +++ /dev/null @@ -1,1034 +0,0 @@ -import Foundation -import CoreML -import MetalPerformanceShaders -import MetalPerformanceShadersGraph - -/// A class that handles output to standard error. -class StandardError: TextOutputStream { - func write(_ string: String) { - try? FileHandle.standardError.write(contentsOf: Data(string.utf8)) - } -} - -/// Print to standard error -func printError(_ item: Any) { - var instance = StandardError() - print(item, to: &instance) -} - -// NOTE: Model caching and conversion are now handled in C++ using the native katagocoreml library. -// The Python-based CoreMLConverter and ModelCacheManager have been removed to eliminate Python dependency. - -/// Context storing board dimensions and settings -public class CoreMLComputeContext { - public let nnXLen: Int32 - public let nnYLen: Int32 - public let useFP16: Bool - - init(nnXLen: Int32, nnYLen: Int32, useFP16: Bool) { - self.nnXLen = nnXLen - self.nnYLen = nnYLen - self.useFP16 = useFP16 - } -} - -/// Create a Core ML compute context -public func createCoreMLComputeContext( - nnXLen: Int32, - nnYLen: Int32, - useFP16: Bool -) -> CoreMLComputeContext { - return CoreMLComputeContext(nnXLen: nnXLen, nnYLen: nnYLen, useFP16: useFP16) -} - -/// Handle that wraps the loaded MLModel for inference -public class CoreMLComputeHandle { - let model: MLModel - let nnXLen: Int32 - let nnYLen: Int32 - let optimizeIdentityMask: Bool - let numInputChannels: Int - let numInputGlobalChannels: Int - let numInputMetaChannels: Int - let numPolicyChannels: Int - let numValueChannels: Int - let numScoreValueChannels: Int - let numOwnershipChannels: Int - - /// Model input/output names matching KataGoCoremltools output - struct IONames { - static let spatialInput = "spatial_input" - static let globalInput = "global_input" - static let inputMask = "input_mask" - static let metaInput = "meta_input" - - static let policyOutput = "policy_p2_conv" - static let policyPassOutput = "policy_pass" - static let valueOutput = "value_v3_bias" - static let ownershipOutput = "value_ownership_conv" - static let scoreValueOutput = "value_sv3_bias" - } - - init(model: MLModel, nnXLen: Int32, nnYLen: Int32, - optimizeIdentityMask: Bool, - numInputChannels: Int, - numInputGlobalChannels: Int, - numInputMetaChannels: Int, - numPolicyChannels: Int, - numValueChannels: Int, - numScoreValueChannels: Int, - numOwnershipChannels: Int) { - self.model = model - self.nnXLen = nnXLen - self.nnYLen = nnYLen - self.optimizeIdentityMask = optimizeIdentityMask - self.numInputChannels = numInputChannels - self.numInputGlobalChannels = numInputGlobalChannels - self.numInputMetaChannels = numInputMetaChannels - self.numPolicyChannels = numPolicyChannels - self.numValueChannels = numValueChannels - self.numScoreValueChannels = numScoreValueChannels - self.numOwnershipChannels = numOwnershipChannels - } - - /// Run inference on a batch of inputs - public func apply( - spatialInput: UnsafeMutablePointer, - globalInput: UnsafeMutablePointer, - metaInput: UnsafeMutablePointer, - maskInput: UnsafeMutablePointer, - policy: UnsafeMutablePointer, - policyPass: UnsafeMutablePointer, - value: UnsafeMutablePointer, - scoreValue: UnsafeMutablePointer, - ownership: UnsafeMutablePointer, - batchSize: Int - ) { - // Process batch elements in parallel using Grand Central Dispatch - // Each inference is independent, reading/writing to different buffer offsets - DispatchQueue.concurrentPerform(iterations: batchSize) { b in - autoreleasepool { - do { - try runSingleInference( - batchIndex: b, - spatialInput: spatialInput, - globalInput: globalInput, - metaInput: metaInput, - maskInput: maskInput, - policy: policy, - policyPass: policyPass, - value: value, - scoreValue: scoreValue, - ownership: ownership - ) - } catch { - printError("Core ML inference error: \(error)") - } - } - } - } - - private func runSingleInference( - batchIndex: Int, - spatialInput: UnsafeMutablePointer, - globalInput: UnsafeMutablePointer, - metaInput: UnsafeMutablePointer, - maskInput: UnsafeMutablePointer, - policy: UnsafeMutablePointer, - policyPass: UnsafeMutablePointer, - value: UnsafeMutablePointer, - scoreValue: UnsafeMutablePointer, - ownership: UnsafeMutablePointer - ) throws { - let spatialSize = Int(nnXLen) * Int(nnYLen) * numInputChannels - let spatialOffset = batchIndex * spatialSize - - // Create MLMultiArray for spatial input (1, C, H, W) - let spatialArray = try MLMultiArray( - shape: [1, NSNumber(value: numInputChannels), - NSNumber(value: nnYLen), NSNumber(value: nnXLen)], - dataType: .float32) - - // Copy spatial data using fast memcpy - let spatialPtr = spatialArray.dataPointer.assumingMemoryBound(to: Float32.self) - memcpy(spatialPtr, spatialInput.advanced(by: spatialOffset), spatialSize * MemoryLayout.size) - - // Create global input array (1, C) - rank 2 as expected by converter - let globalArray = try MLMultiArray( - shape: [1, NSNumber(value: numInputGlobalChannels)], - dataType: .float32) - let globalPtr = globalArray.dataPointer.assumingMemoryBound(to: Float32.self) - let globalOffset = batchIndex * numInputGlobalChannels - memcpy(globalPtr, globalInput.advanced(by: globalOffset), numInputGlobalChannels * MemoryLayout.size) - - // Build feature provider dictionary - var inputDict: [String: MLFeatureValue] = [ - IONames.spatialInput: MLFeatureValue(multiArray: spatialArray), - IONames.globalInput: MLFeatureValue(multiArray: globalArray) - ] - - // Add mask input (always required, even with optimize_identity_mask=True) - // When optimize_identity_mask=True, the mask is still required as input but - // internal mask operations are optimized away for ~6.5% speedup - let maskArray = try MLMultiArray( - shape: [1, 1, NSNumber(value: nnYLen), NSNumber(value: nnXLen)], - dataType: .float32) - let maskPtr = maskArray.dataPointer.assumingMemoryBound(to: Float32.self) - let maskSize = Int(nnXLen) * Int(nnYLen) - let maskOffset = batchIndex * maskSize - memcpy(maskPtr, maskInput.advanced(by: maskOffset), maskSize * MemoryLayout.size) - inputDict[IONames.inputMask] = MLFeatureValue(multiArray: maskArray) - - // Add meta input if model has it - if numInputMetaChannels > 0 { - let metaArray = try MLMultiArray( - shape: [1, NSNumber(value: numInputMetaChannels)], - dataType: .float32) - let metaPtr = metaArray.dataPointer.assumingMemoryBound(to: Float32.self) - let metaOffset = batchIndex * numInputMetaChannels - memcpy(metaPtr, metaInput.advanced(by: metaOffset), numInputMetaChannels * MemoryLayout.size) - inputDict[IONames.metaInput] = MLFeatureValue(multiArray: metaArray) - } - - // Run prediction - let featureProvider = try MLDictionaryFeatureProvider(dictionary: inputDict) - let prediction = try model.prediction(from: featureProvider) - - // Extract outputs and copy to output buffers - extractOutputs( - prediction: prediction, - batchIndex: batchIndex, - policy: policy, - policyPass: policyPass, - value: value, - scoreValue: scoreValue, - ownership: ownership - ) - } - - /// Copy MLMultiArray data to destination buffer, respecting strides. - /// Core ML may return non-contiguous arrays, especially for spatial outputs after GPU computation. - private func copyMultiArray( - _ array: MLMultiArray, - to dest: UnsafeMutablePointer, - destOffset: Int - ) { - let shape = array.shape.map { $0.intValue } - let strides = array.strides.map { $0.intValue } - let ptr = array.dataPointer.assumingMemoryBound(to: Float32.self) - let totalElements = shape.reduce(1, *) - - // Check if contiguous (strides match expected for row-major C-order) - var isContiguous = true - var expectedStride = 1 - for i in (0...size) - } else { - // Slow path: copy with strides (handles non-contiguous layouts) - copyWithStrides( - from: ptr, - to: dest, - destOffset: destOffset, - shape: shape, - strides: strides, - dim: 0, - srcOffset: 0, - destIdx: 0 - ) - } - } - - /// Recursively copy array elements respecting strides (NCHW order) - @discardableResult - private func copyWithStrides( - from src: UnsafePointer, - to dest: UnsafeMutablePointer, - destOffset: Int, - shape: [Int], - strides: [Int], - dim: Int, - srcOffset: Int, - destIdx: Int - ) -> Int { - var currentDestIdx = destIdx - - if dim == shape.count - 1 { - // Innermost dimension: copy elements - for i in 0.., - policyPass: UnsafeMutablePointer, - value: UnsafeMutablePointer, - scoreValue: UnsafeMutablePointer, - ownership: UnsafeMutablePointer - ) { - // Extract policy output (1, policyChannels, H, W) - // Must use stride-aware copy as Core ML may return non-contiguous arrays - if let policyArray = prediction.featureValue(for: IONames.policyOutput)?.multiArrayValue { - let policyOffset = batchIndex * Int(nnXLen) * Int(nnYLen) * numPolicyChannels - copyMultiArray(policyArray, to: policy, destOffset: policyOffset) - } - - // Extract policy pass output (1, numPolicyChannels) - if let passArray = prediction.featureValue(for: IONames.policyPassOutput)?.multiArrayValue { - let passOffset = batchIndex * numPolicyChannels - copyMultiArray(passArray, to: policyPass, destOffset: passOffset) - } - - // Extract value output (1, 3) - if let valueArray = prediction.featureValue(for: IONames.valueOutput)?.multiArrayValue { - let valueOffset = batchIndex * numValueChannels - copyMultiArray(valueArray, to: value, destOffset: valueOffset) - } - - // Extract score value output (1, numScoreValueChannels) - if let svArray = prediction.featureValue(for: IONames.scoreValueOutput)?.multiArrayValue { - let svOffset = batchIndex * numScoreValueChannels - copyMultiArray(svArray, to: scoreValue, destOffset: svOffset) - } - - // Extract ownership output (1, 1, H, W) - // Must use stride-aware copy as Core ML may return non-contiguous arrays - if let ownArray = prediction.featureValue(for: IONames.ownershipOutput)?.multiArrayValue { - let ownOffset = batchIndex * Int(nnXLen) * Int(nnYLen) * numOwnershipChannels - copyMultiArray(ownArray, to: ownership, destOffset: ownOffset) - } - } -} - -/// Delete the source .mlpackage after compilation -/// CoreML caches the compiled model, so the source is no longer needed -private func deleteSourceModel(at url: URL, serverThreadIdx: Int) { - do { - try FileManager.default.removeItem(at: url) - printError("Core ML backend \(serverThreadIdx): Deleted temp model") - } catch { - printError("Core ML backend \(serverThreadIdx): Warning: Failed to delete temp model: \(error)") - } -} - -/// Create compute handle - loads pre-converted Core ML model -/// Model conversion is now handled in C++ using the native katagocoreml library -public func createCoreMLComputeHandle( - coremlModelPath: String, - serverThreadIdx: Int, - requireExactNNLen: Bool, - numInputChannels: Int32, - numInputGlobalChannels: Int32, - numInputMetaChannels: Int32, - numPolicyChannels: Int32, - numValueChannels: Int32, - numScoreValueChannels: Int32, - numOwnershipChannels: Int32, - context: CoreMLComputeContext -) -> CoreMLComputeHandle? { - - let optimizeMask = requireExactNNLen // When true: skips internal mask operations (~6.5% speedup) - let mlpackagePath = URL(fileURLWithPath: coremlModelPath) - - // Ensure temp file is deleted regardless of success/failure - defer { deleteSourceModel(at: mlpackagePath, serverThreadIdx: serverThreadIdx) } - - // Load Core ML model (already converted by C++ katagocoreml library) - do { - let config = MLModelConfiguration() - config.computeUnits = .cpuAndNeuralEngine // Exclude GPU for hybrid mode - - printError("Core ML backend \(serverThreadIdx): Compiling model...") - let compiledURL = try MLModel.compileModel(at: mlpackagePath) - - printError("Core ML backend \(serverThreadIdx): Loading compiled model...") - let model = try MLModel(contentsOf: compiledURL, configuration: config) - - printError("Core ML backend \(serverThreadIdx): Model loaded successfully, \(context.nnXLen)x\(context.nnYLen)") - - return CoreMLComputeHandle( - model: model, - nnXLen: context.nnXLen, - nnYLen: context.nnYLen, - optimizeIdentityMask: optimizeMask, - numInputChannels: Int(numInputChannels), - numInputGlobalChannels: Int(numInputGlobalChannels), - numInputMetaChannels: Int(numInputMetaChannels), - numPolicyChannels: Int(numPolicyChannels), - numValueChannels: Int(numValueChannels), - numScoreValueChannels: Int(numScoreValueChannels), - numOwnershipChannels: Int(numOwnershipChannels) - ) - } catch { - printError("Core ML backend: Failed to load model: \(error)") - return nil - } -} - -/// Print available Core ML compute units -public func printCoreMLDevices() { - printError("Core ML backend: Hybrid mode - CoreML (CPU+ANE) + MPSGraph (GPU)") -} - -// MARK: - Throughput Tracker for Adaptive Batch Sizing - -/// Tracks throughput for CoreML and MPSGraph paths to adaptively adjust batch split ratio. -/// -/// # Thread Safety -/// -/// This class is thread-safe by design without requiring explicit locks: -/// -/// 1. **Single-Owner Access**: Each server thread owns its own `ComputeHandle` → -/// `HybridComputeHandle` → `ThroughputTracker` instance. There is no sharing -/// of `ThroughputTracker` instances between server threads. -/// -/// 2. **Disjoint Field Access**: Within a single `HybridComputeHandle.apply()` call, -/// concurrent dispatch queues access disjoint fields: -/// - `coremlQueue.async` calls `updateCoreML()` → writes `coreMLSamplesPerSec`, `totalCoreMLSamples` -/// - `mpsGraphQueue.async` calls `updateMPSGraph()` → writes `mpsGraphSamplesPerSec`, `totalMPSGraphSamples` -/// -/// Both read `warmupComplete`, `stableAlpha`, and `warmupAlpha`, but these are either -/// `let` constants or only written sequentially after `group.wait()`. -/// -/// 3. **Sequential Barrier**: `group.wait()` in `apply()` ensures all concurrent throughput -/// updates complete before `recordBatch()`, `shouldLogAndMark()`, or `getDiagnosticStats()` -/// are called. These methods run sequentially on the calling thread. -/// -/// Because of these invariants, no locks are needed. Removing `NSLock` was intentional -/// as it was unnecessary overhead given the access patterns above. -public class ThroughputTracker { - private var coreMLSamplesPerSec: Double = 0.9 // Warm-start: initial ratio ~0.47 (closer to optimal ~0.45) - private var mpsGraphSamplesPerSec: Double = 1.0 - private let alpha: Double = 0.15 // EMA smoothing factor (lower = more stable for noisy workloads) - - // Diagnostic fields - private var batchCount: Int = 0 - private var totalCoreMLSamples: Int = 0 - private var totalMPSGraphSamples: Int = 0 - private var ratioHistory: [Float] = [] - private let maxHistorySize = 100 // Keep last 100 ratios for analysis - private var lastLogBatchCount: Int = 0 - private let logInterval: Int = 50 // Log every N batches - - // Adaptive alpha parameters - private var warmupComplete: Bool = false - private let warmupAlpha: Double = 0.25 // Faster adaptation during warmup - private let stableAlpha: Double = 0.10 // Slower adaptation after convergence - private let warmupBatches: Int = 100 // Min batches before checking warmup transition - private let warmupVarianceThreshold: Double = 0.005 // Variance threshold for warmup completion - - /// Update CoreML throughput measurement with adaptive alpha - public func updateCoreML(samples: Int, duration: TimeInterval) { - guard duration > 0, samples > 0 else { return } - let newRate = Double(samples) / duration - let effectiveAlpha = warmupComplete ? stableAlpha : warmupAlpha - coreMLSamplesPerSec = effectiveAlpha * newRate + (1 - effectiveAlpha) * coreMLSamplesPerSec - totalCoreMLSamples += samples - } - - /// Update MPSGraph throughput measurement with adaptive alpha - public func updateMPSGraph(samples: Int, duration: TimeInterval) { - guard duration > 0, samples > 0 else { return } - let newRate = Double(samples) / duration - let effectiveAlpha = warmupComplete ? stableAlpha : warmupAlpha - mpsGraphSamplesPerSec = effectiveAlpha * newRate + (1 - effectiveAlpha) * mpsGraphSamplesPerSec - totalMPSGraphSamples += samples - } - - /// Get optimal CoreML ratio (0.0 to 1.0) based on measured throughput - public func getOptimalCoreMLRatio() -> Float { - let total = coreMLSamplesPerSec + mpsGraphSamplesPerSec - return total > 0 ? Float(coreMLSamplesPerSec / total) : 0.5 - } - - /// Get current throughput stats for logging - public func getStats() -> (coreML: Double, mpsGraph: Double, ratio: Float) { - return (coreMLSamplesPerSec, mpsGraphSamplesPerSec, getOptimalCoreMLRatio()) - } - - /// Record a batch for diagnostics (call after each apply) - public func recordBatch(ratio: Float) { - batchCount += 1 - if ratioHistory.count >= maxHistorySize { - ratioHistory.removeFirst() - } - ratioHistory.append(ratio) - // Check warmup transition - if !warmupComplete && batchCount >= warmupBatches && computeRatioVariance() < Float(warmupVarianceThreshold) { - warmupComplete = true - } - } - - /// Check if logging should occur this batch, and if so, mark as logged - /// Returns true if logging should occur (atomically checks and marks) - public func shouldLogAndMark() -> Bool { - if batchCount - lastLogBatchCount >= logInterval { - lastLogBatchCount = batchCount - return true - } - return false - } - - /// Get diagnostic stats for logging - public func getDiagnosticStats() -> ( - batchCount: Int, - coreMLSamplesPerSec: Double, - mpsGraphSamplesPerSec: Double, - ratio: Float, - totalCoreMLSamples: Int, - totalMPSGraphSamples: Int, - ratioVariance: Float - ) { - return ( - batchCount, - coreMLSamplesPerSec, - mpsGraphSamplesPerSec, - getOptimalCoreMLRatio(), - totalCoreMLSamples, - totalMPSGraphSamples, - computeRatioVariance() - ) - } - - /// Compute variance of recent ratios - private func computeRatioVariance() -> Float { - guard ratioHistory.count >= 10 else { return 0.0 } - let recentRatios = Array(ratioHistory.suffix(20)) - let mean = recentRatios.reduce(0.0, +) / Float(recentRatios.count) - let variance = recentRatios.map { ($0 - mean) * ($0 - mean) }.reduce(0.0, +) / Float(recentRatios.count) - return variance - } - - /// Check if ratio has converged (variance < threshold) - public func hasConverged(threshold: Float = 0.001) -> Bool { - let variance = computeRatioVariance() - return ratioHistory.count >= 20 && variance < threshold - } -} - -// MARK: - MPSGraph-based Model for GPU Inference - -/// GPU-based model using MPSGraph for inference -public class MPSGraphModelHandle { - let device: MTLDevice - let commandQueue: MTLCommandQueue - let graph: MPSGraph - let nnXLen: Int32 - let nnYLen: Int32 - let numInputChannels: Int - let numInputGlobalChannels: Int - let numInputMetaChannels: Int - let numPolicyChannels: Int - let numValueChannels: Int - let numScoreValueChannels: Int - let numOwnershipChannels: Int - - // Layers - let input: InputLayer - let inputGlobal: InputGlobalLayer - let inputMeta: InputMetaLayer - let mask: MaskLayer - let trunk: Trunk - let policyHead: PolicyHead - let valueHead: ValueHead - let targetTensors: [MPSGraphTensor] - - public init?( - modelDesc: SWModelDesc, - nnXLen: Int32, - nnYLen: Int32, - optimizeIdentityMask: Bool = false - ) { - guard let device = MTLCreateSystemDefaultDevice() else { - printError("MPSGraph backend: Failed to create Metal device") - return nil - } - - self.device = device - guard let queue = device.makeCommandQueue() else { - printError("MPSGraph backend: Failed to create command queue") - return nil - } - self.commandQueue = queue - self.graph = MPSGraph() - self.nnXLen = nnXLen - self.nnYLen = nnYLen - self.numInputChannels = modelDesc.numInputChannels.intValue - self.numInputGlobalChannels = modelDesc.numInputGlobalChannels.intValue - self.numInputMetaChannels = modelDesc.numInputMetaChannels.intValue - self.numPolicyChannels = modelDesc.numPolicyChannels.intValue - self.numValueChannels = modelDesc.numValueChannels.intValue - self.numScoreValueChannels = modelDesc.numScoreValueChannels.intValue - self.numOwnershipChannels = modelDesc.numOwnershipChannels.intValue - - let nnXLenNS = nnXLen as NSNumber - let nnYLenNS = nnYLen as NSNumber - - input = InputLayer( - graph: graph, - nnXLen: nnXLenNS, - nnYLen: nnYLenNS, - numChannels: modelDesc.numInputChannels) - - inputGlobal = InputGlobalLayer( - graph: graph, - numGlobalFeatures: modelDesc.numInputGlobalChannels) - - inputMeta = InputMetaLayer( - graph: graph, - numMetaFeatures: modelDesc.numInputMetaChannels) - - mask = MaskLayer( - graph: graph, - nnXLen: nnXLenNS, - nnYLen: nnYLenNS) - - // Use constant tensors when mask is all 1s (requireExactNNLen=true) - let maskSum: MaskSumLayer - let maskSumSqrtS14M01: MaskSumSqrtS14M01Layer - let maskSumSqrtS14M01SquareS01: MaskSumSqrtS14M01SquareS01Layer - - if optimizeIdentityMask { - maskSum = MaskSumLayer( - graph: graph, - nnXLen: nnXLenNS, - nnYLen: nnYLenNS) - maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( - graph: graph, - nnXLen: nnXLenNS, - nnYLen: nnYLenNS) - maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( - graph: graph, - nnXLen: nnXLenNS, - nnYLen: nnYLenNS) - } else { - maskSum = MaskSumLayer( - graph: graph, - maskTensor: mask.tensor) - maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( - graph: graph, - maskSum: maskSum) - maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( - graph: graph, - maskSumSqrtS14M01: maskSumSqrtS14M01) - } - - trunk = Trunk( - graph: graph, - descriptor: modelDesc.trunk, - inputTensor: input.tensor, - inputGlobalTensor: inputGlobal.tensor, - inputMetaTensor: inputMeta.tensor, - maskTensor: mask.tensor, - maskSumTensor: maskSum.tensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, - nnXLen: nnXLenNS, - nnYLen: nnYLenNS, - optimizeIdentityMask: optimizeIdentityMask) - - policyHead = PolicyHead( - graph: graph, - descriptor: modelDesc.policyHead, - sourceTensor: trunk.resultTensor, - maskTensor: mask.tensor, - maskSumTensor: maskSum.tensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, - nnXLen: nnXLenNS, - nnYLen: nnYLenNS, - optimizeIdentityMask: optimizeIdentityMask) - - valueHead = ValueHead( - graph: graph, - descriptor: modelDesc.valueHead, - sourceTensor: trunk.resultTensor, - maskTensor: mask.tensor, - maskSumTensor: maskSum.tensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, - maskSumSqrtS14M01SquareS01Tensor: maskSumSqrtS14M01SquareS01.tensor, - nnXLen: nnXLenNS, - nnYLen: nnYLenNS, - optimizeIdentityMask: optimizeIdentityMask) - - targetTensors = [ - policyHead.policyTensor, - policyHead.policyPassTensor, - valueHead.valueTensor, - valueHead.scoreValueTensor, - valueHead.ownershipTensor, - ] - - printError("MPSGraph backend: Initialized on \(device.name)\(optimizeIdentityMask ? " (mask optimized)" : "")") - } - - /// Run inference on a batch using MPSGraph (GPU) - public func apply( - input inputPointer: UnsafeMutablePointer, - inputGlobal inputGlobalPointer: UnsafeMutablePointer, - inputMeta inputMetaPointer: UnsafeMutablePointer, - policy: UnsafeMutablePointer, - policyPass: UnsafeMutablePointer, - value: UnsafeMutablePointer, - scoreValue: UnsafeMutablePointer, - ownership: UnsafeMutablePointer, - batchSize: Int - ) { - let channelAxis = InputShape.getChannelAxis() - let numInputChannels = input.shape[channelAxis] - let nnXLenNS = nnXLen as NSNumber - let nnYLenNS = nnYLen as NSNumber - - let inputShape = InputShape.create( - batchSize: batchSize as NSNumber, - numChannels: numInputChannels, - nnYLen: nnYLenNS, - nnXLen: nnXLenNS) - - let inputDescriptor = MPSNDArrayDescriptor( - dataType: input.tensor.dataType, - shape: inputShape) - - let inputArray = MPSNDArray( - device: device, - descriptor: inputDescriptor) - - inputArray.writeBytes(inputPointer) - - let numInputGlobalChannels = inputGlobal.shape[channelAxis] - - let inputGlobalShape = InputShape.create( - batchSize: batchSize as NSNumber, - numChannels: numInputGlobalChannels, - nnYLen: 1, - nnXLen: 1) - - let inputGlobalDescriptor = MPSNDArrayDescriptor( - dataType: inputGlobal.tensor.dataType, - shape: inputGlobalShape) - - let inputGlobalArray = MPSNDArray( - device: device, - descriptor: inputGlobalDescriptor) - - inputGlobalArray.writeBytes(inputGlobalPointer) - - let numInputMetaChannels = inputMeta.shape[channelAxis] - - let inputMetaShape = InputShape.create( - batchSize: batchSize as NSNumber, - numChannels: numInputMetaChannels, - nnYLen: 1, - nnXLen: 1) - - let inputMetaDescriptor = MPSNDArrayDescriptor( - dataType: inputMeta.tensor.dataType, - shape: inputMetaShape) - - let inputMetaArray = MPSNDArray( - device: device, - descriptor: inputMetaDescriptor) - - inputMetaArray.writeBytes(inputMetaPointer) - - let maskShape = InputShape.create( - batchSize: batchSize as NSNumber, - numChannels: 1, - nnYLen: nnYLenNS, - nnXLen: nnXLenNS) - - let maskDescriptor = MPSNDArrayDescriptor( - dataType: mask.tensor.dataType, - shape: maskShape) - - let maskArray = MPSNDArray( - device: device, - descriptor: maskDescriptor) - - // Extract mask from first channel of spatial input - var maskStrideArray = [ - MemoryLayout.size, - Int(nnXLen) * MemoryLayout.size, - Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, - numInputChannels.intValue * Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, - ] - - maskArray.writeBytes(inputPointer, strideBytes: &maskStrideArray) - - let feeds = [ - input.tensor: MPSGraphTensorData(inputArray), - inputGlobal.tensor: MPSGraphTensorData(inputGlobalArray), - inputMeta.tensor: MPSGraphTensorData(inputMetaArray), - mask.tensor: MPSGraphTensorData(maskArray), - ] - - let fetch = graph.run( - with: commandQueue, - feeds: feeds, - targetTensors: targetTensors, - targetOperations: nil) - - fetch[policyHead.policyTensor]?.mpsndarray().readBytes(policy) - fetch[policyHead.policyPassTensor]?.mpsndarray().readBytes(policyPass) - fetch[valueHead.valueTensor]?.mpsndarray().readBytes(value) - fetch[valueHead.scoreValueTensor]?.mpsndarray().readBytes(scoreValue) - fetch[valueHead.ownershipTensor]?.mpsndarray().readBytes(ownership) - } -} - -// MARK: - Hybrid Compute Handle - -/// Global flag to enable/disable diagnostic logging (set via environment variable) -private let diagnosticLoggingEnabled: Bool = { - if let envValue = ProcessInfo.processInfo.environment["KATAGO_HYBRID_DIAG"] { - return envValue.lowercased() == "1" || envValue.lowercased() == "true" - } - return false -}() - -/// Hybrid compute handle that dispatches to both CoreML (CPU+ANE) and MPSGraph (GPU) -public class HybridComputeHandle { - let coremlHandle: CoreMLComputeHandle - let mpsGraphHandle: MPSGraphModelHandle - let throughputTracker: ThroughputTracker - let coremlQueue: DispatchQueue - let mpsGraphQueue: DispatchQueue - let nnXLen: Int32 - let nnYLen: Int32 - let serverThreadIdx: Int - - public init( - coremlHandle: CoreMLComputeHandle, - mpsGraphHandle: MPSGraphModelHandle, - serverThreadIdx: Int = 0 - ) { - self.coremlHandle = coremlHandle - self.mpsGraphHandle = mpsGraphHandle - self.serverThreadIdx = serverThreadIdx - self.throughputTracker = ThroughputTracker() - self.coremlQueue = DispatchQueue(label: "com.katago.coreml", qos: .userInitiated) - self.mpsGraphQueue = DispatchQueue(label: "com.katago.mpsgraph", qos: .userInitiated) - self.nnXLen = coremlHandle.nnXLen - self.nnYLen = coremlHandle.nnYLen - } - - /// Run hybrid inference - splits batch between CoreML and MPSGraph - public func apply( - spatialInput: UnsafeMutablePointer, - globalInput: UnsafeMutablePointer, - metaInput: UnsafeMutablePointer, - maskInput: UnsafeMutablePointer, - policy: UnsafeMutablePointer, - policyPass: UnsafeMutablePointer, - value: UnsafeMutablePointer, - scoreValue: UnsafeMutablePointer, - ownership: UnsafeMutablePointer, - batchSize: Int - ) { - // Get optimal split ratio based on throughput - let ratio = throughputTracker.getOptimalCoreMLRatio() - let coreMLBatchSize = max(1, min(batchSize - 1, Int(Float(batchSize) * ratio))) - let mpsGraphBatchSize = batchSize - coreMLBatchSize - - // Calculate buffer offsets - let spatialSize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numInputChannels - let globalSize = coremlHandle.numInputGlobalChannels - let metaSize = coremlHandle.numInputMetaChannels - let policySize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numPolicyChannels - let valueSize = coremlHandle.numValueChannels - let scoreValueSize = coremlHandle.numScoreValueChannels - let ownershipSize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numOwnershipChannels - - let group = DispatchGroup() - - // CoreML path (CPU + ANE) - if coreMLBatchSize > 0 { - group.enter() - coremlQueue.async { [self] in - let start = CFAbsoluteTimeGetCurrent() - - coremlHandle.apply( - spatialInput: spatialInput, - globalInput: globalInput, - metaInput: metaInput, - maskInput: maskInput, - policy: policy, - policyPass: policyPass, - value: value, - scoreValue: scoreValue, - ownership: ownership, - batchSize: coreMLBatchSize - ) - - let duration = CFAbsoluteTimeGetCurrent() - start - throughputTracker.updateCoreML(samples: coreMLBatchSize, duration: duration) - group.leave() - } - } - - // MPSGraph path (GPU) - if mpsGraphBatchSize > 0 { - group.enter() - mpsGraphQueue.async { [self] in - let start = CFAbsoluteTimeGetCurrent() - - // Offset pointers for MPSGraph batch portion - let spatialOffset = coreMLBatchSize * spatialSize - let globalOffset = coreMLBatchSize * globalSize - let metaOffset = coreMLBatchSize * metaSize - let policyOffset = coreMLBatchSize * policySize - let valueOffset = coreMLBatchSize * valueSize - let scoreValueOffset = coreMLBatchSize * scoreValueSize - let ownershipOffset = coreMLBatchSize * ownershipSize - - autoreleasepool { - mpsGraphHandle.apply( - input: spatialInput.advanced(by: spatialOffset), - inputGlobal: globalInput.advanced(by: globalOffset), - inputMeta: metaInput.advanced(by: metaOffset), - policy: policy.advanced(by: policyOffset), - policyPass: policyPass.advanced(by: coreMLBatchSize * coremlHandle.numPolicyChannels), - value: value.advanced(by: valueOffset), - scoreValue: scoreValue.advanced(by: scoreValueOffset), - ownership: ownership.advanced(by: ownershipOffset), - batchSize: mpsGraphBatchSize - ) - } - - let duration = CFAbsoluteTimeGetCurrent() - start - throughputTracker.updateMPSGraph(samples: mpsGraphBatchSize, duration: duration) - group.leave() - } - } - - // Wait for both paths to complete - group.wait() - - // Record batch for diagnostics - throughputTracker.recordBatch(ratio: ratio) - - // Periodic diagnostic logging - if diagnosticLoggingEnabled && throughputTracker.shouldLogAndMark() { - let stats = throughputTracker.getDiagnosticStats() - let converged = throughputTracker.hasConverged() - print(String(format: "[HybridDiag T%d] batch=%d ratio=%.3f coreml=%.1f/s mps=%.1f/s total=%d/%d var=%.5f conv=%@", - serverThreadIdx, - stats.batchCount, - stats.ratio, - stats.coreMLSamplesPerSec, - stats.mpsGraphSamplesPerSec, - stats.totalCoreMLSamples, - stats.totalMPSGraphSamples, - stats.ratioVariance, - converged ? "yes" : "no")) - } - } -} - -/// Create a hybrid compute handle -public func createHybridComputeHandle( - coremlModelPath: String, - modelDesc: SWModelDesc, - serverThreadIdx: Int, - requireExactNNLen: Bool, - numInputChannels: Int32, - numInputGlobalChannels: Int32, - numInputMetaChannels: Int32, - numPolicyChannels: Int32, - numValueChannels: Int32, - numScoreValueChannels: Int32, - numOwnershipChannels: Int32, - context: CoreMLComputeContext -) -> HybridComputeHandle? { - - // Create CoreML handle (CPU + ANE) - guard let coremlHandle = createCoreMLComputeHandle( - coremlModelPath: coremlModelPath, - serverThreadIdx: serverThreadIdx, - requireExactNNLen: requireExactNNLen, - numInputChannels: numInputChannels, - numInputGlobalChannels: numInputGlobalChannels, - numInputMetaChannels: numInputMetaChannels, - numPolicyChannels: numPolicyChannels, - numValueChannels: numValueChannels, - numScoreValueChannels: numScoreValueChannels, - numOwnershipChannels: numOwnershipChannels, - context: context - ) else { - printError("Core ML backend \(serverThreadIdx): Failed to create CoreML handle") - return nil - } - - // Create MPSGraph handle (GPU) - guard let mpsGraphHandle = MPSGraphModelHandle( - modelDesc: modelDesc, - nnXLen: context.nnXLen, - nnYLen: context.nnYLen, - optimizeIdentityMask: requireExactNNLen - ) else { - printError("Core ML backend \(serverThreadIdx): Failed to create MPSGraph handle") - return nil - } - - printError("Core ML backend \(serverThreadIdx): Initialized CoreML (CPU+ANE) + MPSGraph (GPU)") - - // Log if diagnostic mode is enabled - if diagnosticLoggingEnabled { - printError("Core ML backend \(serverThreadIdx): Diagnostic logging enabled (KATAGO_HYBRID_DIAG=1)") - } - - return HybridComputeHandle( - coremlHandle: coremlHandle, - mpsGraphHandle: mpsGraphHandle, - serverThreadIdx: serverThreadIdx - ) -} - -/// Create a GPU-only compute handle using MPSGraph -/// Used when useFP16=false to avoid slow FP32 CoreML execution on CPU+ANE -public func createMPSGraphOnlyHandle( - modelDesc: SWModelDesc, - serverThreadIdx: Int, - requireExactNNLen: Bool, - context: CoreMLComputeContext -) -> MPSGraphModelHandle? { - guard let mpsGraphHandle = MPSGraphModelHandle( - modelDesc: modelDesc, - nnXLen: context.nnXLen, - nnYLen: context.nnYLen, - optimizeIdentityMask: requireExactNNLen - ) else { - printError("Core ML backend \(serverThreadIdx): Failed to create MPSGraph handle") - return nil - } - - printError("Core ML backend \(serverThreadIdx): Initialized MPSGraph GPU-only mode") - return mpsGraphHandle -} diff --git a/cpp/neuralnet/metalbackend.cpp b/cpp/neuralnet/metalbackend.cpp index 58ff2c4a3..76e236537 100644 --- a/cpp/neuralnet/metalbackend.cpp +++ b/cpp/neuralnet/metalbackend.cpp @@ -5,62 +5,156 @@ #include "../neuralnet/nninputs.h" #include "../neuralnet/nninterface.h" #include "../neuralnet/metalbackend.h" -#include "../core/test.h" -/// Converts a ConvLayerDesc instance from C++ to Swift by creating a new SWConvLayerDesc instance with the same properties. -/// - Parameter desc: The ConvLayerDesc instance to convert. -/// - Returns: A SWConvLayerDesc instance with the same properties as the input ConvLayerDesc. -SWConvLayerDesc MetalProcess::convLayerDescToSwift(const ConvLayerDesc * desc) { +#include +#include +#include +#include +#include +#include // For getpid() - SWConvLayerDesc swDesc = createSWConvLayerDesc(desc->convYSize, - desc->convXSize, - desc->inChannels, - desc->outChannels, - desc->dilationY, - desc->dilationX, - (float*)desc->weights.data()); +using namespace std; - return swDesc; +//------------------------------------------------------------------------------ +// CoreML Model Conversion - Native C++ using katagocoreml library +//------------------------------------------------------------------------------ + +namespace gfs = ghc::filesystem; + +// Minimum batch sizes for hybrid execution mode. +// Hybrid splits batches between CoreML (CPU+ANE) and MPSGraph (GPU). +// When batch is too small to split, prefer MPSGraph-only for stability: +// MPSGraph has more predictable latency and avoids CoreML dispatch overhead. +static constexpr int MIN_COREML_BATCH = 1; +static constexpr int MIN_MPSGRAPH_BATCH = 1; + +namespace CoreMLConversion { + +// Get temp directory for model conversion +static string getTempDirectory() { + gfs::path tempDir = gfs::temp_directory_path() / "katago_coreml"; + std::error_code ec; + gfs::create_directories(tempDir, ec); + if(ec) { + throw runtime_error("Failed to create temp directory: " + ec.message()); + } + return tempDir.string(); } -/// Converts a BatchNormLayerDesc instance from C++ to Swift by creating a new SWBatchNormLayerDesc instance with the same properties. -/// - Parameter desc: The BatchNormLayerDesc instance to convert. -/// - Returns: A SWBatchNormLayerDesc instance with the same properties as the input BatchNormLayerDesc. -SWBatchNormLayerDesc MetalProcess::batchNormLayerDescToSwift(const BatchNormLayerDesc * desc) { +// Generate unique temporary path for model conversion +static string generateTempPath(int serverThreadIdx) { + auto now = chrono::steady_clock::now().time_since_epoch().count(); + return getTempDirectory() + "/model_" + to_string(getpid()) + "_" + + to_string(serverThreadIdx) + "_" + to_string(now) + ".mlpackage"; +} - SWBatchNormLayerDesc swDesc = - createSWBatchNormLayerDesc(desc->numChannels, - (float*)desc->mergedScale.data(), - (float*)desc->mergedBias.data()); +// CoreML model metadata constants +static const string COREML_MODEL_AUTHOR = "KataGo"; +static const string COREML_MODEL_LICENSE = "See original model file for license terms"; - return swDesc; +// Convert KataGo model to CoreML in temp directory, returns path to .mlpackage +// The caller (Swift side) is responsible for deleting the temp file after loading +static string convertModelToTemp( + const string& modelPath, + int boardX, + int boardY, + bool useFP16, + bool optimizeMask, + int maxBatchSize, + int serverThreadIdx +) { + // maxBatchSize is validated upstream: cfg.getInt("nnMaxBatchSize", 1, 65536) in setup.cpp + // and NNEvaluator constructor throws if maxBatchSize <= 0. Assert for defensive documentation. + assert(maxBatchSize >= 1); + + string tempPath = generateTempPath(serverThreadIdx); + cerr << "Metal backend " << serverThreadIdx << ": Converting model to " << tempPath << endl; + + katagocoreml::ConversionOptions opts; + opts.board_x_size = boardX; + opts.board_y_size = boardY; + opts.compute_precision = useFP16 ? "FLOAT16" : "FLOAT32"; + opts.optimize_identity_mask = optimizeMask; + opts.min_batch_size = 1; + opts.max_batch_size = maxBatchSize; + opts.author = COREML_MODEL_AUTHOR; + opts.license = COREML_MODEL_LICENSE; + + try { + katagocoreml::KataGoConverter::convert(modelPath, tempPath, opts); + } catch(const exception& e) { + // Clean up partial conversion on failure + std::error_code ec; + gfs::remove_all(tempPath, ec); + if(ec) { + cerr << "Metal backend " << serverThreadIdx << ": Warning: Failed to clean up partial conversion at " << tempPath << ": " << ec.message() << endl; + } + throw runtime_error(string("Metal backend ") + to_string(serverThreadIdx) + ": Core ML model conversion failed: " + e.what()); + } + + cerr << "Metal backend " << serverThreadIdx << ": Conversion completed" << endl; + return tempPath; } -/// Convert an activation layer description from C++ to Swift -/// - Parameter desc: An activation layer description -ActivationKind MetalProcess::activationLayerDescToSwift(const ActivationLayerDesc * desc) { +} // namespace CoreMLConversion + +//------------------------------------------------------------------------------ +// Model Descriptor Conversion - C++ to Swift types for MPSGraph +//------------------------------------------------------------------------------ + +namespace MetalProcess { + +/// Converts a ConvLayerDesc instance from C++ to Swift +SWConvLayerDesc convLayerDescToSwift(const ConvLayerDesc* desc) { + return createSWConvLayerDesc( + desc->convYSize, + desc->convXSize, + desc->inChannels, + desc->outChannels, + desc->dilationY, + desc->dilationX, + (float*)desc->weights.data()); +} - switch (desc->activation) { +/// Converts a BatchNormLayerDesc instance from C++ to Swift +SWBatchNormLayerDesc batchNormLayerDescToSwift(const BatchNormLayerDesc* desc) { + return createSWBatchNormLayerDesc( + desc->numChannels, + (float*)desc->mergedScale.data(), + (float*)desc->mergedBias.data()); +} + +/// Convert an activation layer description from C++ to Swift +ActivationKind activationLayerDescToSwift(const ActivationLayerDesc* desc) { + switch(desc->activation) { case ACTIVATION_RELU: return ActivationKind::relu(); case ACTIVATION_MISH: return ActivationKind::mish(); case ACTIVATION_MISH_SCALE8: - testAssert(false); // Metal does not use scaled mish activations due to no fp16 - return ActivationKind::identity(); // Placeholder for compilation + return ActivationKind::identity(); // Metal/CoreML does not use scaled mish case ACTIVATION_IDENTITY: return ActivationKind::identity(); default: - testAssert(false); - return ActivationKind::identity(); // Placeholder for compilation + return ActivationKind::identity(); } } -/// Convert a residual block description from C++ to Swift -/// - Parameter desc: A residual block description -/// - Returns: The residual block description converted to SWResidualBlockDesc -SWResidualBlockDesc MetalProcess::residualBlockDescToSwift(const ResidualBlockDesc * desc) { +/// Convert a matrix multiplication layer description from C++ to Swift +SWMatMulLayerDesc matMulLayerDescToSwift(const MatMulLayerDesc* desc) { + return createSWMatMulLayerDesc( + desc->inChannels, + desc->outChannels, + (float*)desc->weights.data()); +} +/// Convert a matrix bias layer description from C++ to Swift +SWMatBiasLayerDesc matBiasLayerDescToSwift(const MatBiasLayerDesc* desc) { + return createSWMatBiasLayerDesc(desc->numChannels, (float*)desc->weights.data()); +} + +/// Convert a residual block description from C++ to Swift +SWResidualBlockDesc residualBlockDescToSwift(const ResidualBlockDesc* desc) { SWBatchNormLayerDesc preBN = batchNormLayerDescToSwift(&desc->preBN); ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); SWConvLayerDesc regularConv = convLayerDescToSwift(&desc->regularConv); @@ -68,34 +162,17 @@ SWResidualBlockDesc MetalProcess::residualBlockDescToSwift(const ResidualBlockDe ActivationKind midActivationKind = activationLayerDescToSwift(&desc->midActivation); SWConvLayerDesc finalConv = convLayerDescToSwift(&desc->finalConv); - SWResidualBlockDesc swDesc = - createSWResidualBlockDesc(preBN, - preActivationKind, - regularConv, - midBN, - midActivationKind, - finalConv); - - return swDesc; -} - -/// Convert a matrix multiplication layer description from C++ to Swift -/// - Parameter desc: A matrix multiplication layer description -/// - Returns: The matrix multiplication layer description converted to SWMatMulLayerDesc -SWMatMulLayerDesc MetalProcess::matMulLayerDescToSwift(const MatMulLayerDesc * desc) { - - SWMatMulLayerDesc swDesc = createSWMatMulLayerDesc(desc->inChannels, - desc->outChannels, - (float*)desc->weights.data()); - - return swDesc; + return createSWResidualBlockDesc( + preBN, + preActivationKind, + regularConv, + midBN, + midActivationKind, + finalConv); } /// Convert a global pooling residual block description from C++ to Swift -/// - Parameter desc: A global pooling residual block description -/// - Returns: The global pooling residual block description converted to SWGlobalPoolingResidualBlockDesc -SWGlobalPoolingResidualBlockDesc MetalProcess::globalPoolingResidualBlockDescToSwift(const GlobalPoolingResidualBlockDesc* desc) { - +SWGlobalPoolingResidualBlockDesc globalPoolingResidualBlockDescToSwift(const GlobalPoolingResidualBlockDesc* desc) { SWBatchNormLayerDesc preBN = batchNormLayerDescToSwift(&desc->preBN); ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); SWConvLayerDesc regularConv = convLayerDescToSwift(&desc->regularConv); @@ -107,37 +184,53 @@ SWGlobalPoolingResidualBlockDesc MetalProcess::globalPoolingResidualBlockDescToS ActivationKind midActivationKind = activationLayerDescToSwift(&desc->midActivation); SWConvLayerDesc finalConv = convLayerDescToSwift(&desc->finalConv); - SWGlobalPoolingResidualBlockDesc swDesc = - createSWGlobalPoolingResidualBlockDesc(preBN, - preActivationKind, - regularConv, - gpoolConv, - gpoolBN, - gpoolActivationKind, - gpoolToBiasMul, - midBN, - midActivationKind, - finalConv); + return createSWGlobalPoolingResidualBlockDesc( + preBN, + preActivationKind, + regularConv, + gpoolConv, + gpoolBN, + gpoolActivationKind, + gpoolToBiasMul, + midBN, + midActivationKind, + finalConv); +} + +// Forward declaration for mutual recursion +swift::Array residualBlocksToSwift(const vector>& blocks); + +/// Convert a nested bottleneck residual block description from C++ to Swift +SWNestedBottleneckResidualBlockDesc nestedBottleneckResidualBlockDescToSwift(const NestedBottleneckResidualBlockDesc* desc) { + SWBatchNormLayerDesc preBN = batchNormLayerDescToSwift(&desc->preBN); + ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); + SWConvLayerDesc preConv = convLayerDescToSwift(&desc->preConv); + auto swBlocks = residualBlocksToSwift(desc->blocks); + SWBatchNormLayerDesc postBN = batchNormLayerDescToSwift(&desc->postBN); + ActivationKind postActivationKind = activationLayerDescToSwift(&desc->postActivation); + SWConvLayerDesc postConv = convLayerDescToSwift(&desc->postConv); - return swDesc; + return createSWNestedBottleneckResidualBlockDesc( + preBN, + preActivationKind, + preConv, + swBlocks, + postBN, + postActivationKind, + postConv); } /// Convert residual blocks from C++ to Swift -/// - Parameters: -/// - blocks: Residual blocks -/// - swBlocks: A pointer to an array of BlockDescriptor -swift::Array MetalProcess::residualBlocksToSwift(const vector>& blocks) { - +swift::Array residualBlocksToSwift(const vector>& blocks) { auto builder = createBlockDescriptorBuilder(); - for (int i = 0; i < blocks.size(); i++) { + for(size_t i = 0; i < blocks.size(); i++) { + void* blockDesc = blocks[i].second.get(); - void * blockDesc = blocks[i].second.get(); - - if (blocks[i].first == GLOBAL_POOLING_BLOCK_KIND) { + if(blocks[i].first == GLOBAL_POOLING_BLOCK_KIND) { BlockDescriptor descriptor = globalPoolingResidualBlockDescToSwift((GlobalPoolingResidualBlockDesc*)blockDesc); builder.enque(descriptor); - } else if (blocks[i].first == NESTED_BOTTLENECK_BLOCK_KIND) { + } else if(blocks[i].first == NESTED_BOTTLENECK_BLOCK_KIND) { BlockDescriptor descriptor = nestedBottleneckResidualBlockDescToSwift((NestedBottleneckResidualBlockDesc*)blockDesc); builder.enque(descriptor); } else { @@ -149,35 +242,8 @@ swift::Array MetalProcess::residualBlocksToSwift(const vector

preBN); - ActivationKind preActivationKind = activationLayerDescToSwift(&desc->preActivation); - SWConvLayerDesc preConv = convLayerDescToSwift(&desc->preConv); - auto swBlocks = residualBlocksToSwift(desc->blocks); - SWBatchNormLayerDesc postBN = batchNormLayerDescToSwift(&desc->postBN); - ActivationKind postActivationKind = activationLayerDescToSwift(&desc->postActivation); - SWConvLayerDesc postConv = convLayerDescToSwift(&desc->postConv); - - SWNestedBottleneckResidualBlockDesc swDesc = - createSWNestedBottleneckResidualBlockDesc(preBN, - preActivationKind, - preConv, - swBlocks, - postBN, - postActivationKind, - postConv); - - return swDesc; -} - /// Convert a SGF metadata encoder description from C++ to Swift -/// - Parameter desc: A SGF metadata encoder description -/// - Returns: The SGF metadata encoder description converted to SWSGFMetadataEncoderDesc -swift::Optional MetalProcess::sGFMetadataEncoderDescToSwift(const SGFMetadataEncoderDesc * desc) { - +swift::Optional sGFMetadataEncoderDescToSwift(const SGFMetadataEncoderDesc* desc) { SWMatMulLayerDesc mul1 = matMulLayerDescToSwift(&desc->mul1); SWMatBiasLayerDesc bias1 = matBiasLayerDescToSwift(&desc->bias1); ActivationKind act1 = activationLayerDescToSwift(&desc->act1); @@ -186,24 +252,20 @@ swift::Optional MetalProcess::sGFMetadataEncoderDescTo ActivationKind act2 = activationLayerDescToSwift(&desc->act2); SWMatMulLayerDesc mul3 = matMulLayerDescToSwift(&desc->mul3); - auto swSGFMetadataEncoderDesc = createSWSGFMetadataEncoderDesc(desc->metaEncoderVersion, - desc->numInputMetaChannels, - mul1, - bias1, - act1, - mul2, - bias2, - act2, - mul3); - - return swSGFMetadataEncoderDesc; + return createSWSGFMetadataEncoderDesc( + desc->metaEncoderVersion, + desc->numInputMetaChannels, + mul1, + bias1, + act1, + mul2, + bias2, + act2, + mul3); } /// Convert a trunk description from C++ to Swift -/// - Parameter trunk: A trunk description -/// - Returns: The trunk description converted to SWTrunkDesc -SWTrunkDesc MetalProcess::trunkDescToSwift(const TrunkDesc * trunk) { - +SWTrunkDesc trunkDescToSwift(const TrunkDesc* trunk) { SWConvLayerDesc initialConv = convLayerDescToSwift(&trunk->initialConv); SWMatMulLayerDesc initialMatMul = matMulLayerDescToSwift(&trunk->initialMatMul); auto sgfMetadataEncoder = sGFMetadataEncoderDescToSwift(&trunk->sgfMetadataEncoder); @@ -211,26 +273,22 @@ SWTrunkDesc MetalProcess::trunkDescToSwift(const TrunkDesc * trunk) { SWBatchNormLayerDesc trunkTipBN = batchNormLayerDescToSwift(&trunk->trunkTipBN); ActivationKind trunkTipActivation = activationLayerDescToSwift(&trunk->trunkTipActivation); - SWTrunkDesc swTrunkDesc = createSWTrunkDesc(trunk->modelVersion, - trunk->trunkNumChannels, - trunk->midNumChannels, - trunk->regularNumChannels, - trunk->gpoolNumChannels, - initialConv, - initialMatMul, - sgfMetadataEncoder, - swBlocks, - trunkTipBN, - trunkTipActivation); - - return swTrunkDesc; + return createSWTrunkDesc( + trunk->modelVersion, + trunk->trunkNumChannels, + trunk->midNumChannels, + trunk->regularNumChannels, + trunk->gpoolNumChannels, + initialConv, + initialMatMul, + sgfMetadataEncoder, + swBlocks, + trunkTipBN, + trunkTipActivation); } /// Convert a policy head description from C++ to Swift -/// - Parameter policyHead: A policy head description -/// - Returns: The policy head description converted to SWPolicyHeadDesc -SWPolicyHeadDesc MetalProcess::policyHeadDescToSwift(const PolicyHeadDesc * policyHead) { - +SWPolicyHeadDesc policyHeadDescToSwift(const PolicyHeadDesc* policyHead) { SWConvLayerDesc p1Conv = convLayerDescToSwift(&policyHead->p1Conv); SWConvLayerDesc g1Conv = convLayerDescToSwift(&policyHead->g1Conv); SWBatchNormLayerDesc g1BN = batchNormLayerDescToSwift(&policyHead->g1BN); @@ -244,38 +302,24 @@ SWPolicyHeadDesc MetalProcess::policyHeadDescToSwift(const PolicyHeadDesc * poli ActivationKind passActivation = activationLayerDescToSwift(&policyHead->passActivation); SWMatMulLayerDesc gpoolToPassMul2 = matMulLayerDescToSwift(&policyHead->gpoolToPassMul2); - SWPolicyHeadDesc swPolicyHead = createSWPolicyHeadDesc(policyHead->modelVersion, - p1Conv, - g1Conv, - g1BN, - g1Activation, - gpoolToBiasMul, - p1BN, - p1Activation, - p2Conv, - gpoolToPassMul, - gpoolToPassBias, - passActivation, - gpoolToPassMul2); - - return swPolicyHead; -} - -/// Convert a matrix bias layer description from C++ to Swift -/// - Parameter desc: A matrix bias layer description -/// - Returns: The matrix bias layer description converted to SWMatBiasLayerDesc -SWMatBiasLayerDesc MetalProcess::matBiasLayerDescToSwift(const MatBiasLayerDesc * desc) { - - SWMatBiasLayerDesc swDesc = createSWMatBiasLayerDesc(desc->numChannels, (float*)desc->weights.data()); - - return swDesc; + return createSWPolicyHeadDesc( + policyHead->modelVersion, + p1Conv, + g1Conv, + g1BN, + g1Activation, + gpoolToBiasMul, + p1BN, + p1Activation, + p2Conv, + gpoolToPassMul, + gpoolToPassBias, + passActivation, + gpoolToPassMul2); } /// Convert a value head description from C++ to Swift -/// - Parameter valueHead: A value head description -/// - Returns: The value head description converted to SWValueHeadDesc -SWValueHeadDesc MetalProcess::valueHeadDescToSwift(const ValueHeadDesc * valueHead) { - +SWValueHeadDesc valueHeadDescToSwift(const ValueHeadDesc* valueHead) { SWConvLayerDesc v1Conv = convLayerDescToSwift(&valueHead->v1Conv); SWBatchNormLayerDesc v1BN = batchNormLayerDescToSwift(&valueHead->v1BN); ActivationKind v1Activation = activationLayerDescToSwift(&valueHead->v1Activation); @@ -288,136 +332,90 @@ SWValueHeadDesc MetalProcess::valueHeadDescToSwift(const ValueHeadDesc * valueHe SWMatBiasLayerDesc sv3Bias = matBiasLayerDescToSwift(&valueHead->sv3Bias); SWConvLayerDesc vOwnershipConv = convLayerDescToSwift(&valueHead->vOwnershipConv); - SWValueHeadDesc swDesc = createSWValueHeadDesc(valueHead->modelVersion, - v1Conv, - v1BN, - v1Activation, - v2Mul, - v2Bias, - v2Activation, - v3Mul, - v3Bias, - sv3Mul, - sv3Bias, - vOwnershipConv); - - return swDesc; -} - -SWModelDesc MetalProcess::modelDescToSwift(const ModelDesc* modelDesc) { - return createSWModelDesc(modelDesc->modelVersion, - swift::String(modelDesc->name), - modelDesc->numInputChannels, - modelDesc->numInputGlobalChannels, - modelDesc->numInputMetaChannels, - modelDesc->numValueChannels, - modelDesc->numScoreValueChannels, - modelDesc->numOwnershipChannels, - trunkDescToSwift(&modelDesc->trunk), - policyHeadDescToSwift(&modelDesc->policyHead), - valueHeadDescToSwift(&modelDesc->valueHead)); -} - -//--------------------------------------------------------------------------------------------------------- - -/** - * @brief This function initializes the global state of the NeuralNet class upon program startup. - * This function should be called only once upon program startup. It ensures that the global state - * of the NeuralNet class is properly initialized, enabling it to function correctly throughout - * the lifetime of the program. - * Note that this function does not take any input parameters or return any values. - */ + return createSWValueHeadDesc( + valueHead->modelVersion, + v1Conv, + v1BN, + v1Activation, + v2Mul, + v2Bias, + v2Activation, + v3Mul, + v3Bias, + sv3Mul, + sv3Bias, + vOwnershipConv); +} + +/// Convert a model description from C++ to Swift +SWModelDesc modelDescToSwift(const ModelDesc* modelDesc) { + return createSWModelDesc( + modelDesc->modelVersion, + swift::String(modelDesc->name), + modelDesc->numInputChannels, + modelDesc->numInputGlobalChannels, + modelDesc->numInputMetaChannels, + modelDesc->numValueChannels, + modelDesc->numScoreValueChannels, + modelDesc->numOwnershipChannels, + modelDesc->numPolicyChannels, + trunkDescToSwift(&modelDesc->trunk), + policyHeadDescToSwift(&modelDesc->policyHead), + valueHeadDescToSwift(&modelDesc->valueHead)); +} + +} // namespace MetalProcess + +//------------------------------------------------------------------------------ +// LoadedModel implementation +//------------------------------------------------------------------------------ + +LoadedModel::LoadedModel(const string& fileName, const string& expectedSha256) { + modelPath = fileName; + ModelDesc::loadFromFileMaybeGZipped(fileName, modelDesc, expectedSha256); +} + +//------------------------------------------------------------------------------ +// NeuralNet namespace - Global functions +//------------------------------------------------------------------------------ + void NeuralNet::globalInitialize() { - // Do nothing. + // No global initialization needed for Metal backend } -/** - * @brief This function cleans up the global state of the NeuralNet class at program termination. - * This function should be called once at program termination. It ensures that the global state of - * the NeuralNet class is properly cleaned up, freeing any resources that were allocated during the - * lifetime of the program. - * Note that this function does not take any input parameters or return any values. - */ void NeuralNet::globalCleanup() { - // Do nothing. -} - -/** - * @brief Loads a neural network model from a file. - * This function creates a LoadedModel object by loading a neural network model from a file specified by - * the `file` parameter and expected SHA-256 hash specified by the `expectedSha256` parameter. The LoadedModel - * object is returned as a pointer. - * @param file The name of the file containing the neural network model. - * @param expectedSha256 The expected SHA-256 hash of the model file. - * @return A pointer to the LoadedModel object created by loading the model file. - */ + // No cleanup needed - temp files are deleted immediately after loading +} + LoadedModel* NeuralNet::loadModelFile(const string& file, const string& expectedSha256) { LoadedModel* loadedModel = new LoadedModel(file, expectedSha256); return loadedModel; } -/** - * @brief Frees memory used by a LoadedModel object. - * This function deallocates memory used by a LoadedModel object specified by the `loadedModel` parameter. - * @param loadedModel A pointer to the LoadedModel object to deallocate memory for. - */ void NeuralNet::freeLoadedModel(LoadedModel* loadedModel) { delete loadedModel; } -/** - * @brief Retrieves the model description associated with the loaded model. - * - * This function accesses the model description from a given LoadedModel instance. - * It returns a constant reference to the ModelDesc, which contains details - * about the structure and parameters of the neural network model. - * - * @param loadedModel Pointer to the LoadedModel instance from which to retrieve - * the model description. This should not be null. - * @return const ModelDesc& A constant reference to the model description of - * the loaded model. - */ const ModelDesc& NeuralNet::getModelDesc(const LoadedModel* loadedModel) { return loadedModel->modelDesc; } +//------------------------------------------------------------------------------ +// ComputeContext implementation //------------------------------------------------------------------------------ ComputeContext::ComputeContext(int nnX, int nnY, enabled_t useFP16Mode, enabled_t useNHWCMode): -metalComputeContext(createMetalComputeContext(nnX, nnY)) { +metalContext(createMetalComputeContext(nnX, nnY, useFP16Mode != enabled_t::False)) { this->useFP16Mode = useFP16Mode; - - SWEnable swUseFP16Mode = - (useFP16Mode == enabled_t::False) ? SWEnable::False() : - (useFP16Mode == enabled_t::True) ? SWEnable::True() : - SWEnable::Auto(); - - SWEnable swUseNHWCMode = - (useNHWCMode == enabled_t::False) ? SWEnable::False() : - (useNHWCMode == enabled_t::True) ? SWEnable::True() : - SWEnable::Auto(); + this->nnXLen = nnX; + this->nnYLen = nnY; + // Metal backend only supports NCHW layout (MPSGraph native format) + (void)useNHWCMode; } ComputeContext::~ComputeContext() { } -/** - * @brief Creates a ComputeContext object for computing neural network operations. - * This function creates a ComputeContext object by setting configuration settings for neural network computations, - * such as whether to use half-precision floating-point (FP16) mode and whether to use the NHWC format for input - * tensors. The ComputeContext object is returned as a pointer. - * @param gpuIdxs (Unused) A vector of GPU indices to use for computations. - * @param logger (Unused) A pointer to a Logger object to use for logging messages. - * @param nnXLen The width of the input tensor. - * @param nnYLen The height of the input tensor. - * @param openCLTunerFile (Unused) The name of a file containing OpenCL tuning parameters. - * @param homeDataDirOverride (Unused) A directory to use for storing data. - * @param openCLReTunePerBoardSize (Unused) Whether to re-tune OpenCL parameters for different board sizes. - * @param useFP16Mode Whether to use half-precision floating-point (FP16) mode for computations. - * @param useNHWCMode Whether to use the NHWC format for input tensors. - * @param loadedModel (Unused) A pointer to a LoadedModel object containing a loaded neural network model. - * @return A pointer to the ComputeContext object created. - */ ComputeContext* NeuralNet::createComputeContext( const vector& gpuIdxs, Logger* logger, @@ -440,29 +438,148 @@ ComputeContext* NeuralNet::createComputeContext( return new ComputeContext(nnXLen, nnYLen, useFP16Mode, useNHWCMode); } -/** - * @brief Frees memory used by a ComputeContext object. - * This function deallocates memory used by a ComputeContext object specified by the `computeContext` parameter. - * @param computeContext A pointer to the ComputeContext object to deallocate memory for. - */ void NeuralNet::freeComputeContext(ComputeContext* computeContext) { delete computeContext; } -//-------------------------------------------------------------- +//------------------------------------------------------------------------------ +// ComputeHandle implementation +//------------------------------------------------------------------------------ + +static mutex computeHandleMutex; + +// Helper function to convert model and create hybrid compute handle +// This is needed because Swift Optional doesn't support assignment in C++ +static swift::Optional convertAndCreateHybridHandle( + ComputeContext* context, + const LoadedModel* loadedModel, + bool requireExactNNLen, + int maxBatchSize, + int serverThreadIdx +) { + auto metalContext = context->metalContext; + int nnXLen = metalContext.getNnXLen(); + int nnYLen = metalContext.getNnYLen(); + bool useFP16 = (context->useFP16Mode != enabled_t::False); + bool optimizeMask = requireExactNNLen; + + // Convert model to CoreML format in temp directory + // The Swift side will delete the temp file after loading + string coremlModelPath = CoreMLConversion::convertModelToTemp( + loadedModel->modelPath, + nnXLen, + nnYLen, + useFP16, + optimizeMask, + maxBatchSize, + serverThreadIdx + ); + + // Convert model descriptor to Swift format for MPSGraph path + SWModelDesc swModelDesc = MetalProcess::modelDescToSwift(&loadedModel->modelDesc); + + // Create hybrid compute handle (CoreML on CPU+ANE, MPSGraph on GPU) + return createHybridComputeHandle( + swift::String(coremlModelPath), + swModelDesc, + serverThreadIdx, + requireExactNNLen, + loadedModel->modelDesc.numInputChannels, + loadedModel->modelDesc.numInputGlobalChannels, + loadedModel->modelDesc.numInputMetaChannels, + loadedModel->modelDesc.numPolicyChannels, + loadedModel->modelDesc.numValueChannels, + loadedModel->modelDesc.numScoreValueChannels, + loadedModel->modelDesc.numOwnershipChannels, + metalContext + ); +} + +// Helper function to create hybrid handle if FP16 mode with sufficient batch size, otherwise returns none +static swift::Optional createHybridHandleIfNeeded( + ComputeContext* context, + const LoadedModel* loadedModel, + bool requireExactNNLen, + int maxBatchSize, + int serverThreadIdx +) { + if(context->useFP16Mode == enabled_t::False) { + // FP32 mode - don't create hybrid handle + return swift::Optional::none(); + } + + // Hybrid mode splits batches: CoreML takes max(1, ...), MPSGraph takes remainder + // Minimum samples for meaningful split = 1 (CoreML) + 1 (MPSGraph) = 2 + // If batch can't be split, prefer MPSGraph-only for stability + if(maxBatchSize < MIN_COREML_BATCH + MIN_MPSGRAPH_BATCH) { + return swift::Optional::none(); + } -ComputeHandle::ComputeHandle(ComputeContext* context, - const LoadedModel* loadedModel, - bool inputsUseNHWC, - int gpuIdx, - int serverThreadIdx): -metalhandle(maybeCreateMetalComputeHandle((gpuIdx < 100), - serverThreadIdx, - MetalProcess::modelDescToSwift(&loadedModel->modelDesc), - context->metalComputeContext)) { + // FP16 mode with sufficient batch size: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) + return convertAndCreateHybridHandle(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx); +} + +// Helper function to create MPSGraph-only handle when needed +// Used when: (1) useFP16=false to avoid slow FP32 CoreML, or (2) batch too small for hybrid split +static swift::Optional createMPSGraphHandleIfNeeded( + ComputeContext* context, + const LoadedModel* loadedModel, + bool requireExactNNLen, + int maxBatchSize, + int serverThreadIdx +) { + // Use MPSGraph-only when: + // 1. FP32 mode (CoreML FP32 on CPU+ANE is slow), OR + // 2. Batch too small to split (hybrid requires minCoreML + minMPSGraph samples) + bool batchTooSmallForHybrid = maxBatchSize < MIN_COREML_BATCH + MIN_MPSGRAPH_BATCH; + + if(context->useFP16Mode != enabled_t::False && !batchTooSmallForHybrid) { + // FP16 mode with sufficient batch - hybrid handle will be created instead + return swift::Optional::none(); + } + + // Log reason for MPSGraph-only mode + if(batchTooSmallForHybrid) { + cerr << "Metal backend " << serverThreadIdx << ": Batch size " << maxBatchSize + << " too small for hybrid split - using MPSGraph GPU-only" << endl; + } else { + cerr << "Metal backend " << serverThreadIdx << ": FP32 mode - using MPSGraph GPU-only (skipping CoreML converter)" << endl; + } + + // Convert model descriptor to Swift format for MPSGraph path + // Note: No CoreML conversion needed - MPSGraph reads weights directly + SWModelDesc swModelDesc = MetalProcess::modelDescToSwift(&loadedModel->modelDesc); + + // Create MPSGraph-only handle (GPU only) + return createMPSGraphOnlyHandle( + swModelDesc, + serverThreadIdx, + requireExactNNLen, + context->metalContext + ); +} + +ComputeHandle::ComputeHandle( + ComputeContext* context, + const LoadedModel* loadedModel, + bool inputsUseNHWC, + int gpuIdx, + int serverThreadIdx, + bool requireExactNNLen, + int maxBatchSize): +hybridHandle(createHybridHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)), +mpsGraphOnlyHandle(createMPSGraphHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)) { + bool hasHybrid = static_cast(hybridHandle); + bool hasMPSGraph = static_cast(mpsGraphOnlyHandle); + if(hasHybrid && hasMPSGraph) { + throw runtime_error("Metal backend: Logic error - both hybridHandle and mpsGraphOnlyHandle are valid"); + } + if(!hasHybrid && !hasMPSGraph) { + throw runtime_error("Metal backend: Failed to create compute handle - both CoreML and MPSGraph initialization failed (check logs above for details)"); + } const ModelDesc* modelDesc = &loadedModel->modelDesc; - auto metalContext = context->metalComputeContext; + auto metalContext = context->metalContext; nnXLen = metalContext.getNnXLen(); nnYLen = metalContext.getNnYLen(); @@ -470,34 +587,13 @@ metalhandle(maybeCreateMetalComputeHandle((gpuIdx < 100), version = modelDesc->modelVersion; metaEncoderVersion = modelDesc->metaEncoderVersion; this->inputsUseNHWC = inputsUseNHWC; - - /* Use FP16 mode if the model supports it and the user has not explicitly - * disabled it. */ + this->requireExactNNLen = requireExactNNLen; useFP16 = (context->useFP16Mode != enabled_t::False); - - (void)serverThreadIdx; } ComputeHandle::~ComputeHandle() { } -static mutex computeHandleMutex; - -/** - * @brief Create a new ComputeHandle object for performing neural network computations. - * This function creates a new ComputeHandle object for performing neural network computations, - * using the specified parameters and settings. The object is allocated on the heap using the - * 'new' operator and returned as a pointer. - * @param context A pointer to the ComputeContext object to use for computation. - * @param loadedModel A pointer to the LoadedModel object containing the neural network model to use. - * @param logger A pointer to the Logger object to use for logging messages. - * @param maxBatchSize The maximum batch size to use for computation. - * @param requireExactNNLen Whether the neural network length must match the input data length exactly. - * @param inputsUseNHWC Whether the input data uses NHWC format. - * @param gpuIdxForThisThread The index of the GPU to use for computation. - * @param serverThreadIdx The index of the server thread to use for computation. - * @return A pointer to the newly-created ComputeHandle object. - */ ComputeHandle* NeuralNet::createComputeHandle( ComputeContext* context, const LoadedModel* loadedModel, @@ -509,63 +605,38 @@ ComputeHandle* NeuralNet::createComputeHandle( int serverThreadIdx) { (void)logger; - (void)maxBatchSize; - // Current implementation always tolerates excess nn len - (void)requireExactNNLen; - // Transfer the default GPU index into physical GPU index 0 int gpuIdx = (gpuIdxForThisThread == -1) ? 0 : gpuIdxForThisThread; ComputeHandle* handle = nullptr; { lock_guard lock(computeHandleMutex); - handle = new ComputeHandle(context, loadedModel, inputsUseNHWC, gpuIdx, serverThreadIdx); + handle = new ComputeHandle(context, loadedModel, inputsUseNHWC, gpuIdx, serverThreadIdx, requireExactNNLen, maxBatchSize); } return handle; } -/** - * @brief Free the memory used by a ComputeHandle object. - * This function frees the memory used by the specified ComputeHandle object, which was - * previously allocated on the heap using the 'new' operator. - * @param handle A pointer to the ComputeHandle object to free. - */ void NeuralNet::freeComputeHandle(ComputeHandle* handle) { delete handle; } -/** - * @brief Check whether a ComputeHandle object is using 16-bit floating-point precision. - * This function checks whether the specified ComputeHandle object is using 16-bit floating-point - * precision for computation, and returns a boolean value indicating the result. - * @param handle A pointer to the ComputeHandle object to check. - * @return True if the ComputeHandle object is using 16-bit floating-point precision, false otherwise. - */ bool NeuralNet::isUsingFP16(const ComputeHandle* handle) { return handle->useFP16; } +//------------------------------------------------------------------------------ +// Device information //------------------------------------------------------------------------------ -/** - * @brief Print information about the available devices. - */ void NeuralNet::printDevices() { printMetalDevices(); } -//-------------------------------------------------------------- +//------------------------------------------------------------------------------ +// InputBuffers implementation +//------------------------------------------------------------------------------ -/** - * @brief Construct a new InputBuffers object for storing input data for neural network computation. - * This constructor initializes a new InputBuffers object for storing input data for neural network - * computation, based on the specified parameters and settings. - * @param loadedModel A pointer to the LoadedModel object containing the neural network model to use. - * @param maxBatchSz The maximum batch size to use for computation. - * @param nnXLen The x length of the neural network computation context. - * @param nnYLen The y length of the neural network computation context. - */ InputBuffers::InputBuffers(const LoadedModel* loadedModel, int maxBatchSz, int nnXLen, int nnYLen) { const ModelDesc& m = loadedModel->modelDesc; @@ -587,6 +658,7 @@ InputBuffers::InputBuffers(const LoadedModel* loadedModel, int maxBatchSz, int n singleOwnershipResultElts = (size_t)m.numOwnershipChannels * nnXLen * nnYLen; singleOwnerMapElts = (size_t)m.numOwnershipChannels * nnXLen * nnYLen; singleScoreValuesResultElts = (size_t)m.numScoreValueChannels; + singleMaskElts = (size_t)nnXLen * nnYLen; assert(NNModelVersion::getNumSpatialFeatures(m.modelVersion) == m.numInputChannels); assert(NNModelVersion::getNumGlobalFeatures(m.modelVersion) == m.numInputGlobalChannels); @@ -603,10 +675,10 @@ InputBuffers::InputBuffers(const LoadedModel* loadedModel, int maxBatchSz, int n ownershipResultBufferElts = (size_t)maxBatchSize * singleOwnershipResultElts; ownerMapBufferElts = (size_t)maxBatchSz * singleOwnerMapElts; scoreValuesResultBufferElts = (size_t)maxBatchSize * singleScoreValuesResultElts; + userInputMaskBufferElts = (size_t)maxBatchSize * singleMaskElts; rowSpatialBuffer = new float[rowSpatialBufferElts]; userInputBuffer = new float[userInputBufferElts]; - // Zero out the input buffer for arbitrary board sizes memset(&userInputBuffer[0], 0, userInputBufferElts * sizeof(userInputBuffer[0])); userInputGlobalBuffer = new float[userInputGlobalBufferElts]; @@ -618,13 +690,10 @@ InputBuffers::InputBuffers(const LoadedModel* loadedModel, int maxBatchSz, int n ownershipResults = new float[ownershipResultBufferElts]; ownerMapBuffer = new float[ownerMapBufferElts]; scoreValuesResults = new float[scoreValuesResultBufferElts]; + userInputMaskBuffer = new float[userInputMaskBufferElts]; + memset(&userInputMaskBuffer[0], 0, userInputMaskBufferElts * sizeof(userInputMaskBuffer[0])); } -/** - * @brief Destroy the InputBuffers object and free all associated memory. - * This destructor destroys the InputBuffers object and frees all memory associated with it, - * including all input and output buffers used for neural network computation. - */ InputBuffers::~InputBuffers() { delete[] rowSpatialBuffer; delete[] userInputBuffer; @@ -637,48 +706,25 @@ InputBuffers::~InputBuffers() { delete[] ownershipResults; delete[] ownerMapBuffer; delete[] scoreValuesResults; + delete[] userInputMaskBuffer; } -/** - * @brief Create a new InputBuffers object for storing input data for neural network computation. - * This function creates a new InputBuffers object for storing input data for neural network computation, - * using the specified parameters and settings. The object is allocated on the heap using the 'new' operator - * and returned as a pointer. - * @param loadedModel A pointer to the LoadedModel object containing the neural network model to use. - * @param maxBatchSize The maximum batch size to use for computation. - * @param nnXLen The x length of the neural network computation context. - * @param nnYLen The y length of the neural network computation context. - * @return A pointer to the newly-created InputBuffers object. - */ InputBuffers* NeuralNet::createInputBuffers(const LoadedModel* loadedModel, int maxBatchSize, int nnXLen, int nnYLen) { return new InputBuffers(loadedModel, maxBatchSize, nnXLen, nnYLen); } -/** - * @brief Free the memory used by an InputBuffers object. - * This function frees the memory used by the specified InputBuffers object, which was - * previously allocated on the heap using the 'new' operator. - * @param inputBuffers A pointer to the InputBuffers object to free. - */ void NeuralNet::freeInputBuffers(InputBuffers* inputBuffers) { delete inputBuffers; } -//-------------------------------------------------------------- +//------------------------------------------------------------------------------ +// MetalProcess namespace - Helper functions +//------------------------------------------------------------------------------ void MetalProcess::copyRowData(float* dest, const float* src, size_t numElements) { copy(src, src + numElements, dest); } -/** - * @brief Convert input data from NHWC format to NCHW format in-place if necessary. - * - * @param rowSpatialInput Pointer to the input data (single batch element assumed). - * @param C Number of channels. - * @param H Height. - * @param W Width. - * @param inputsUseNHWC Flag indicating if the input data is currently in NHWC format. - */ void MetalProcess::convertNCHW( float* rowSpatialInput, const int C, @@ -766,6 +812,11 @@ void MetalProcess::processRowData(size_t row, ComputeHandle* gpuHandle, InputBuf nnYLen, nnXLen, gpuHandle->inputsUseNHWC); + + // Copy first channel of spatial input (mask) to dedicated mask buffer + // After NCHW conversion, the first nnXLen*nnYLen elements are the mask channel + float* rowMaskInput = &inputBuffers->userInputMaskBuffer[inputBuffers->singleMaskElts * row]; + copy(rowSpatialInput, rowSpatialInput + inputBuffers->singleMaskElts, rowMaskInput); } float MetalProcess::policyOptimismCalc(const double policyOptimism, const float p, const float pOpt) { @@ -782,7 +833,7 @@ void MetalProcess::processOptimism( float* targetBuffer = &buffers.policyProbsBuffer[row * singlePolicyResultElts]; float* policyOutputBuf = &buffers.policyResults[row * singlePolicyResultElts * buffers.policyResultChannels]; - for(auto i = 0; i < singlePolicyResultElts; ++i) { + for(size_t i = 0; i < singlePolicyResultElts; ++i) { const float p = policyOutputBuf[i]; const float pOpt = policyOutputBuf[i + singlePolicyResultElts]; targetBuffer[i] = MetalProcess::policyOptimismCalc(policyOptimism, p, pOpt); @@ -801,7 +852,6 @@ void MetalProcess::processPolicy( size_t row) { auto& buffers = *inputBuffers; float* targetBuffer = &buffers.policyResults[row * buffers.singlePolicyResultElts * buffers.policyResultChannels]; - const auto symmetry = inputBuf->symmetry; const auto policyOptimism = inputBuf->policyOptimism; if(buffers.policyResultChannels == 1) { @@ -813,7 +863,7 @@ void MetalProcess::processPolicy( } SymmetryHelpers::copyOutputsWithSymmetry( - targetBuffer, currentOutput->policyProbs, 1, gpuHandle->nnYLen, gpuHandle->nnXLen, symmetry); + targetBuffer, currentOutput->policyProbs, 1, gpuHandle->nnYLen, gpuHandle->nnXLen, inputBuf->symmetry); } void MetalProcess::processValue( @@ -839,7 +889,6 @@ void MetalProcess::processOwnership( const size_t singleOwnershipResultElts = inputBuffers->singleOwnershipResultElts; const size_t ownershipOutputBufOffset = row * singleOwnershipResultElts; - // Copy ownership results with symmetry if available if(currentOutput->whiteOwnerMap != nullptr) { const float* ownershipOutputBuf = &inputBuffers->ownershipResults[ownershipOutputBufOffset]; SymmetryHelpers::copyOutputsWithSymmetry( @@ -890,7 +939,6 @@ void MetalProcess::processScoreValues( size_t numScoreValueChannels = inputBuffers->singleScoreValuesResultElts; assert(numScoreValueChannels == 1); currentOutput->whiteScoreMean = currentScoreValueData[0]; - //Version 3 neural nets don't have any second moment currentOutput, implicitly already folding it in, so we just use the mean squared currentOutput->whiteScoreMeanSq = currentOutput->whiteScoreMean * currentOutput->whiteScoreMean; currentOutput->whiteLead = currentOutput->whiteScoreMean; currentOutput->varTimeLeft = 0; @@ -914,16 +962,6 @@ void MetalProcess::processRow( MetalProcess::processScoreValues(inputBuffers, currentOutput, gpuHandle->version, row); } -/** - * @brief Compute the neural network output using Metal API and the specified input data and GPU handle. - * This function computes the neural network output using the Metal API and the specified input data and ComputeHandle - * object for GPU acceleration. The computed output is stored in the specified vector of NNOutput pointers. - * @param gpuHandle A pointer to the ComputeHandle object to use for GPU computation. - * @param inputBuffers A pointer to the InputBuffers object containing the input data for computation. - * @param numBatchEltsFilled The number of batch elements filled in the input buffer. - * @param inputBufs An array of pointers to NNResultBuf objects containing the neural network input data. - * @param outputs A vector of NNOutput pointers to store the computed output. - */ void MetalProcess::getMetalOutput( ComputeHandle* gpuHandle, InputBuffers* inputBuffers, @@ -935,47 +973,57 @@ void MetalProcess::getMetalOutput( int batchSize = numBatchEltsFilled; assert(batchSize <= inputBuffers->maxBatchSize); - assert((NNModelVersion::getNumSpatialFeatures(gpuHandle->version) * gpuHandle->nnXLen * gpuHandle->nnYLen) <= inputBuffers->singleInputElts); - assert(NNModelVersion::getNumGlobalFeatures(gpuHandle->version) == inputBuffers->singleInputGlobalElts); + assert((NNModelVersion::getNumSpatialFeatures(gpuHandle->version) * gpuHandle->nnXLen * gpuHandle->nnYLen) <= (int)inputBuffers->singleInputElts); + assert(NNModelVersion::getNumGlobalFeatures(gpuHandle->version) == (int)inputBuffers->singleInputGlobalElts); if(gpuHandle->metaEncoderVersion > 0) { - assert(SGFMetadata::METADATA_INPUT_NUM_CHANNELS == inputBuffers->singleInputMetaElts); + assert(SGFMetadata::METADATA_INPUT_NUM_CHANNELS == (int)inputBuffers->singleInputMetaElts); } assert(inputBuffers->singleValueResultElts == 3); - for(size_t row = 0; row < batchSize; row++) { + for(int row = 0; row < batchSize; row++) { MetalProcess::processRowData(row, gpuHandle, inputBuffers, inputBufs); } - auto metalHandle = gpuHandle->metalhandle; - assert(metalHandle); - - metalHandle.get().apply(inputBuffers->userInputBuffer, - inputBuffers->userInputGlobalBuffer, - inputBuffers->userInputMetaBuffer, - inputBuffers->policyResults, - inputBuffers->policyPassResults, - inputBuffers->valueResults, - inputBuffers->scoreValuesResults, - inputBuffers->ownershipResults, - batchSize); + // Dispatch to appropriate handle based on mode + if(gpuHandle->hybridHandle) { + // FP16 mode: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) + // Mask buffer has correct stride (singleMaskElts = H*W per batch element) + // When requireExactNNLen is true, mask operations can be optimized (optimize_identity_mask) + gpuHandle->hybridHandle.get().apply( + inputBuffers->userInputBuffer, + inputBuffers->userInputGlobalBuffer, + inputBuffers->userInputMetaBuffer, + inputBuffers->userInputMaskBuffer, // Dedicated mask buffer with correct stride + inputBuffers->policyResults, + inputBuffers->policyPassResults, + inputBuffers->valueResults, + inputBuffers->scoreValuesResults, + inputBuffers->ownershipResults, + batchSize); + } else if(gpuHandle->mpsGraphOnlyHandle) { + // FP32 mode: Use MPSGraph only (GPU-only) + // Mask is extracted internally from channel 0 of spatial input via strided reads + gpuHandle->mpsGraphOnlyHandle.get().apply( + inputBuffers->userInputBuffer, + inputBuffers->userInputGlobalBuffer, + inputBuffers->userInputMetaBuffer, + inputBuffers->policyResults, + inputBuffers->policyPassResults, + inputBuffers->valueResults, + inputBuffers->scoreValuesResults, + inputBuffers->ownershipResults, + batchSize); + } else { + throw runtime_error("Metal backend: No valid compute handle available"); + } - for(size_t row = 0; row < batchSize; row++) { + for(int row = 0; row < batchSize; row++) { MetalProcess::processRow(row, gpuHandle, inputBuffers, inputBufs, outputs); } } -/** - * @brief Compute the neural network output using the specified input data and GPU handle. - * This function computes the neural network output using the specified input data and ComputeHandle object - * for GPU acceleration. The computed output is stored in the specified vector of NNOutput pointers. - * @param gpuHandle A pointer to the ComputeHandle object to use for GPU computation. - * @param inputBuffers A pointer to the InputBuffers object containing the input data for computation. - * @param numBatchEltsFilled The number of batch elements filled in the input buffer. - * @param inputBufs An array of pointers to NNResultBuf objects containing the neural network input data. - * @param outputs A vector of NNOutput pointers to store the computed output. - */ void NeuralNet::getOutput( ComputeHandle* gpuHandle, InputBuffers* inputBuffers, @@ -986,41 +1034,254 @@ void NeuralNet::getOutput( MetalProcess::getMetalOutput(gpuHandle, inputBuffers, numBatchEltsFilled, inputBufs, outputs); } -bool MetalProcess::testEvaluateConv(const ConvLayerDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - const vector& inputBuffer, - vector& outputBuffer) { +//------------------------------------------------------------------------------ +// Test functions - Metal backend uses NCHW layout (not NHWC) +//------------------------------------------------------------------------------ + +namespace MetalProcess { + +// Helper function to compute merged scale and bias from raw values +// This is needed because test descriptors are created manually without computing merged values +static void computeMergedBatchNormValues( + const BatchNormLayerDesc* desc, + vector& mergedScale, + vector& mergedBias) { + + int numChannels = desc->numChannels; + mergedScale.resize(numChannels); + mergedBias.resize(numChannels); + + // If merged values are already computed, use them + if(!desc->mergedScale.empty() && !desc->mergedBias.empty()) { + mergedScale = desc->mergedScale; + mergedBias = desc->mergedBias; + return; + } + + // Otherwise compute from raw values: mergedScale = scale / sqrt(variance + epsilon) + // mergedBias = bias - mergedScale * mean + // Note: Use scale/bias values from vectors if available, regardless of hasScale/hasBias flags + // This matches how desc.cpp computes merged values during model loading + for(int c = 0; c < numChannels; c++) { + float scale = c < (int)desc->scale.size() ? desc->scale[c] : 1.0f; + float bias = c < (int)desc->bias.size() ? desc->bias[c] : 0.0f; + float mean = c < (int)desc->mean.size() ? desc->mean[c] : 0.0f; + float variance = c < (int)desc->variance.size() ? desc->variance[c] : 1.0f; + float epsilon = desc->epsilon; + + mergedScale[c] = scale / sqrt(variance + epsilon); + mergedBias[c] = bias - mergedScale[c] * mean; + } +} + +// Helper to convert BatchNormLayerDesc to Swift with computed merged values +static SWBatchNormLayerDesc batchNormLayerDescToSwiftWithMerge( + const BatchNormLayerDesc* desc, + vector& mergedScaleStorage, + vector& mergedBiasStorage) { + + computeMergedBatchNormValues(desc, mergedScaleStorage, mergedBiasStorage); + + return createSWBatchNormLayerDesc( + desc->numChannels, + mergedScaleStorage.data(), + mergedBiasStorage.data()); +} + +// Helper to convert ResidualBlockDesc to Swift with computed merged values +static SWResidualBlockDesc residualBlockDescToSwiftWithMerge( + const ResidualBlockDesc* desc, + vector& mergedScalePreBN, + vector& mergedBiasPreBN, + vector& mergedScaleMidBN, + vector& mergedBiasMidBN) { + + computeMergedBatchNormValues(&desc->preBN, mergedScalePreBN, mergedBiasPreBN); + computeMergedBatchNormValues(&desc->midBN, mergedScaleMidBN, mergedBiasMidBN); + + SWBatchNormLayerDesc preBN = createSWBatchNormLayerDesc( + desc->preBN.numChannels, + mergedScalePreBN.data(), + mergedBiasPreBN.data()); + + ActivationKind preActivationKind = MetalProcess::activationLayerDescToSwift(&desc->preActivation); + SWConvLayerDesc regularConv = MetalProcess::convLayerDescToSwift(&desc->regularConv); + + SWBatchNormLayerDesc midBN = createSWBatchNormLayerDesc( + desc->midBN.numChannels, + mergedScaleMidBN.data(), + mergedBiasMidBN.data()); + + ActivationKind midActivationKind = MetalProcess::activationLayerDescToSwift(&desc->midActivation); + SWConvLayerDesc finalConv = MetalProcess::convLayerDescToSwift(&desc->finalConv); + + return createSWResidualBlockDesc( + preBN, + preActivationKind, + regularConv, + midBN, + midActivationKind, + finalConv); +} + +// Helper to convert GlobalPoolingResidualBlockDesc to Swift with computed merged values +static SWGlobalPoolingResidualBlockDesc globalPoolingResidualBlockDescToSwiftWithMerge( + const GlobalPoolingResidualBlockDesc* desc, + vector& mergedScalePreBN, + vector& mergedBiasPreBN, + vector& mergedScaleMidBN, + vector& mergedBiasMidBN, + vector& mergedScaleGpoolBN, + vector& mergedBiasGpoolBN) { + + computeMergedBatchNormValues(&desc->preBN, mergedScalePreBN, mergedBiasPreBN); + computeMergedBatchNormValues(&desc->gpoolBN, mergedScaleGpoolBN, mergedBiasGpoolBN); + computeMergedBatchNormValues(&desc->midBN, mergedScaleMidBN, mergedBiasMidBN); + + SWBatchNormLayerDesc preBN = createSWBatchNormLayerDesc( + desc->preBN.numChannels, + mergedScalePreBN.data(), + mergedBiasPreBN.data()); + + ActivationKind preActivationKind = MetalProcess::activationLayerDescToSwift(&desc->preActivation); + SWConvLayerDesc regularConv = MetalProcess::convLayerDescToSwift(&desc->regularConv); + SWConvLayerDesc gpoolConv = MetalProcess::convLayerDescToSwift(&desc->gpoolConv); + + SWBatchNormLayerDesc gpoolBN = createSWBatchNormLayerDesc( + desc->gpoolBN.numChannels, + mergedScaleGpoolBN.data(), + mergedBiasGpoolBN.data()); + + ActivationKind gpoolActivationKind = MetalProcess::activationLayerDescToSwift(&desc->gpoolActivation); + SWMatMulLayerDesc gpoolToBiasMul = MetalProcess::matMulLayerDescToSwift(&desc->gpoolToBiasMul); + + SWBatchNormLayerDesc midBN = createSWBatchNormLayerDesc( + desc->midBN.numChannels, + mergedScaleMidBN.data(), + mergedBiasMidBN.data()); + + ActivationKind midActivationKind = MetalProcess::activationLayerDescToSwift(&desc->midActivation); + SWConvLayerDesc finalConv = MetalProcess::convLayerDescToSwift(&desc->finalConv); + + return createSWGlobalPoolingResidualBlockDesc( + preBN, + preActivationKind, + regularConv, + gpoolConv, + gpoolBN, + gpoolActivationKind, + gpoolToBiasMul, + midBN, + midActivationKind, + finalConv); +} + +bool testEvaluateConv( + const ConvLayerDesc* desc, + int batchSize, + int nnXLen, + int nnYLen, + const vector& inputBuffer, + vector& outputBuffer) { + + SWConvLayerDesc swDesc = MetalProcess::convLayerDescToSwift(desc); size_t numOutputFloats = (size_t)batchSize * nnXLen * nnYLen * desc->outChannels; outputBuffer.resize(numOutputFloats); - testConvLayer(convLayerDescToSwift(desc), - nnXLen, - nnYLen, - batchSize, - (float*)inputBuffer.data(), - (float*)outputBuffer.data()); - - return true; -} - -/** - * @brief Evaluate a convolutional layer using Metal API for testing purposes. - * This function evaluates a convolutional layer using the Metal API for testing purposes. - * The input buffer and output buffer are specified as vectors of floats, and the result of the computation - * is stored in the output buffer. The function returns true if the evaluation is implemented. - * @param desc A pointer to the ConvLayerDesc object describing the convolutional layer to evaluate. - * @param batchSize The batch size to use for computation. - * @param nnXLen The x length of the neural network computation context. - * @param nnYLen The y length of the neural network computation context. - * @param useFP16 A boolean indicating whether to use half-precision floating point format for computation. - * @param useNHWC A boolean indicating whether to use NHWC layout for input and output buffers. - * @param inputBuffer A vector of floats containing the input buffer data. - * @param outputBuffer A vector of floats to store the computed output. - * @return true if the convolutional layer evaluation is implemented, false otherwise. - */ + return testConvLayer( + swDesc, + batchSize, + nnXLen, + nnYLen, + (float*)inputBuffer.data(), + outputBuffer.data()); +} + +bool testEvaluateBatchNorm( + const BatchNormLayerDesc* desc, + int batchSize, + int nnXLen, + int nnYLen, + const vector& inputBuffer, + const vector& maskBuffer, + vector& outputBuffer) { + + vector mergedScaleStorage; + vector mergedBiasStorage; + SWBatchNormLayerDesc swDesc = batchNormLayerDescToSwiftWithMerge(desc, mergedScaleStorage, mergedBiasStorage); + + size_t numOutputFloats = (size_t)batchSize * nnXLen * nnYLen * desc->numChannels; + outputBuffer.resize(numOutputFloats); + + return testBatchNormLayer( + swDesc, + batchSize, + nnXLen, + nnYLen, + (float*)inputBuffer.data(), + (float*)maskBuffer.data(), + outputBuffer.data()); +} + +bool testEvaluateResidualBlock( + const ResidualBlockDesc* desc, + int batchSize, + int nnXLen, + int nnYLen, + const vector& inputBuffer, + const vector& maskBuffer, + vector& outputBuffer) { + + vector mergedScalePreBN, mergedBiasPreBN; + vector mergedScaleMidBN, mergedBiasMidBN; + SWResidualBlockDesc swDesc = residualBlockDescToSwiftWithMerge( + desc, mergedScalePreBN, mergedBiasPreBN, mergedScaleMidBN, mergedBiasMidBN); + + size_t numOutputFloats = (size_t)batchSize * nnXLen * nnYLen * desc->preBN.numChannels; + outputBuffer.resize(numOutputFloats); + + return testResidualBlock( + swDesc, + batchSize, + nnXLen, + nnYLen, + (float*)inputBuffer.data(), + (float*)maskBuffer.data(), + outputBuffer.data()); +} + +bool testEvaluateGlobalPoolingResidualBlock( + const GlobalPoolingResidualBlockDesc* desc, + int batchSize, + int nnXLen, + int nnYLen, + const vector& inputBuffer, + const vector& maskBuffer, + vector& outputBuffer) { + + vector mergedScalePreBN, mergedBiasPreBN; + vector mergedScaleMidBN, mergedBiasMidBN; + vector mergedScaleGpoolBN, mergedBiasGpoolBN; + SWGlobalPoolingResidualBlockDesc swDesc = globalPoolingResidualBlockDescToSwiftWithMerge( + desc, mergedScalePreBN, mergedBiasPreBN, mergedScaleMidBN, mergedBiasMidBN, + mergedScaleGpoolBN, mergedBiasGpoolBN); + + size_t numOutputFloats = (size_t)batchSize * nnXLen * nnYLen * desc->preBN.numChannels; + outputBuffer.resize(numOutputFloats); + + return testGlobalPoolingResidualBlock( + swDesc, + batchSize, + nnXLen, + nnYLen, + (float*)inputBuffer.data(), + (float*)maskBuffer.data(), + outputBuffer.data()); +} + +} // namespace MetalProcess + bool NeuralNet::testEvaluateConv( const ConvLayerDesc* desc, int batchSize, @@ -1031,49 +1292,16 @@ bool NeuralNet::testEvaluateConv( const vector& inputBuffer, vector& outputBuffer) { + // Metal backend only supports NCHW layout + if(useNHWC) + return false; + + // useFP16 is ignored - MPSGraph tests use FP32 (void)useFP16; - (void)useNHWC; + return MetalProcess::testEvaluateConv(desc, batchSize, nnXLen, nnYLen, inputBuffer, outputBuffer); } -bool MetalProcess::testEvaluateBatchNorm(const BatchNormLayerDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - const vector& inputBuffer, - const vector& maskBuffer, - vector& outputBuffer) { - - size_t numOutputFloats = (size_t)batchSize * nnXLen * nnYLen * desc->numChannels; - outputBuffer.resize(numOutputFloats); - - testBatchNormLayer(batchNormLayerDescToSwift(desc), - nnXLen, - nnYLen, - batchSize, - (float*)inputBuffer.data(), - (float*)maskBuffer.data(), - (float*)outputBuffer.data()); - - return true; -} - -/** - * @brief Evaluate a batch normalization layer using Metal API for testing purposes. - * This function evaluates a batch normalization layer using the Metal API for testing purposes. - * The input buffer and output buffer are specified as vectors of floats, and the result of the computation - * is stored in the output buffer. The function returns true if the evaluation is implemented. - * @param desc A pointer to the BatchNormLayerDesc object describing the batch normalization layer to evaluate. - * @param batchSize The batch size to use for computation. - * @param nnXLen The x length of the neural network computation context. - * @param nnYLen The y length of the neural network computation context. - * @param useFP16 A boolean indicating whether to use half-precision floating point format for computation. - * @param useNHWC A boolean indicating whether to use NHWC layout for input and output buffers. - * @param inputBuffer A vector of floats containing the input buffer data. - * @param maskBuffer A vector of floats containing the mask buffer data. Mask should be in 'NHW' format (no "C" channel). - * @param outputBuffer A vector of floats to store the computed output. - * @return true if the batch normalization layer evaluation is implemented, false otherwise. - */ bool NeuralNet::testEvaluateBatchNorm( const BatchNormLayerDesc* desc, int batchSize, @@ -1085,49 +1313,16 @@ bool NeuralNet::testEvaluateBatchNorm( const vector& maskBuffer, vector& outputBuffer) { + // Metal backend only supports NCHW layout + if(useNHWC) + return false; + + // useFP16 is ignored - MPSGraph tests use FP32 (void)useFP16; - (void)useNHWC; + return MetalProcess::testEvaluateBatchNorm(desc, batchSize, nnXLen, nnYLen, inputBuffer, maskBuffer, outputBuffer); } -bool MetalProcess::testEvaluateResidualBlock(const ResidualBlockDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - const vector& inputBuffer, - const vector& maskBuffer, - vector& outputBuffer) { - - size_t numTrunkFloats = (size_t)batchSize * nnXLen * nnYLen * desc->preBN.numChannels; - outputBuffer.resize(numTrunkFloats); - - testResidualBlock(residualBlockDescToSwift(desc), - batchSize, - nnXLen, - nnYLen, - (float*)inputBuffer.data(), - (float*)maskBuffer.data(), - (float*)outputBuffer.data()); - - return true; -} - -/** - * @brief Evaluate a residual block using Metal API for testing purposes. - * This function evaluates a residual block using the Metal API for testing purposes. - * The input buffer and output buffer are specified as vectors of floats, and the result of the computation - * is stored in the output buffer. The function returns true if the evaluation is implemented. - * @param desc A pointer to the ResidualBlockDesc object describing the residual block to evaluate. - * @param batchSize The batch size to use for computation. - * @param nnXLen The x length of the neural network computation context. - * @param nnYLen The y length of the neural network computation context. - * @param useFP16 A boolean indicating whether to use half-precision floating point format for computation. - * @param useNHWC A boolean indicating whether to use NHWC layout for input and output buffers. - * @param inputBuffer A vector of floats containing the input buffer data. - * @param maskBuffer A vector of floats containing the mask buffer data. - * @param outputBuffer A vector of floats to store the computed output. - * @return true if the residual block evaluation is implemented, false otherwise. - */ bool NeuralNet::testEvaluateResidualBlock( const ResidualBlockDesc* desc, int batchSize, @@ -1139,50 +1334,16 @@ bool NeuralNet::testEvaluateResidualBlock( const vector& maskBuffer, vector& outputBuffer) { + // Metal backend only supports NCHW layout + if(useNHWC) + return false; + + // useFP16 is ignored - MPSGraph tests use FP32 (void)useFP16; - (void)useNHWC; + return MetalProcess::testEvaluateResidualBlock(desc, batchSize, nnXLen, nnYLen, inputBuffer, maskBuffer, outputBuffer); } -bool MetalProcess::testEvaluateGlobalPoolingResidualBlock(const GlobalPoolingResidualBlockDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - const vector& inputBuffer, - const vector& maskBuffer, - vector& outputBuffer) { - - size_t numTrunkFloats = (size_t)batchSize * nnXLen * nnYLen * desc->preBN.numChannels; - outputBuffer.resize(numTrunkFloats); - - testGlobalPoolingResidualBlock(globalPoolingResidualBlockDescToSwift(desc), - batchSize, - nnXLen, - nnYLen, - (float*)inputBuffer.data(), - (float*)maskBuffer.data(), - (float*)outputBuffer.data()); - - return true; -} - -/** - * @brief Evaluate a global pooling residual block using Metal API for testing purposes. - * This function evaluates a global pooling residual block using the Metal API for testing purposes. - * The input buffer and output buffer are specified as vectors of floats, and the result of the computation - * is stored in the output buffer. The function returns true if the evaluation is implemented. - * @param desc A pointer to the GlobalPoolingResidualBlockDesc object describing the global pooling residual block to - * evaluate. - * @param batchSize The batch size to use for computation. - * @param nnXLen The x length of the neural network computation context. - * @param nnYLen The y length of the neural network computation context. - * @param useFP16 A boolean indicating whether to use half-precision floating point format for computation. - * @param useNHWC A boolean indicating whether to use NHWC layout for input and output buffers. - * @param inputBuffer A vector of floats containing the input buffer data. - * @param maskBuffer A vector of floats containing the mask buffer data. - * @param outputBuffer A vector of floats to store the computed output. - * @return true if the global pooling residual block evaluation is implemented, false otherwise. - */ bool NeuralNet::testEvaluateGlobalPoolingResidualBlock( const GlobalPoolingResidualBlockDesc* desc, int batchSize, @@ -1194,9 +1355,14 @@ bool NeuralNet::testEvaluateGlobalPoolingResidualBlock( const vector& maskBuffer, vector& outputBuffer) { + // Metal backend only supports NCHW layout + if(useNHWC) + return false; + + // useFP16 is ignored - MPSGraph tests use FP32 (void)useFP16; - (void)useNHWC; + return MetalProcess::testEvaluateGlobalPoolingResidualBlock(desc, batchSize, nnXLen, nnYLen, inputBuffer, maskBuffer, outputBuffer); } -#endif // USE_METAL_BACKEND +#endif // USE_METAL_BACKEND diff --git a/cpp/neuralnet/metalbackend.h b/cpp/neuralnet/metalbackend.h index 34e44b8e7..12cc6b0c0 100644 --- a/cpp/neuralnet/metalbackend.h +++ b/cpp/neuralnet/metalbackend.h @@ -1,4 +1,7 @@ -#pragma once +#ifndef NEURALNET_METALBACKEND_H_ +#define NEURALNET_METALBACKEND_H_ + +#ifdef USE_METAL_BACKEND #include #include "desc.h" @@ -13,51 +16,6 @@ using namespace std; using namespace KataGoSwift; namespace MetalProcess { -SWConvLayerDesc convLayerDescToSwift(const ConvLayerDesc * desc); -SWBatchNormLayerDesc batchNormLayerDescToSwift(const BatchNormLayerDesc * desc); -ActivationKind activationLayerDescToSwift(const ActivationLayerDesc * desc); -SWResidualBlockDesc residualBlockDescToSwift(const ResidualBlockDesc * desc); -SWMatMulLayerDesc matMulLayerDescToSwift(const MatMulLayerDesc * desc); -SWGlobalPoolingResidualBlockDesc globalPoolingResidualBlockDescToSwift(const GlobalPoolingResidualBlockDesc* desc); -swift::Array residualBlocksToSwift(const vector>& blocks); -SWNestedBottleneckResidualBlockDesc nestedBottleneckResidualBlockDescToSwift(const NestedBottleneckResidualBlockDesc* desc); -swift::Optional sGFMetadataEncoderDescToSwift(const SGFMetadataEncoderDesc * desc); -SWTrunkDesc trunkDescToSwift(const TrunkDesc * trunk); -SWPolicyHeadDesc policyHeadDescToSwift(const PolicyHeadDesc * policyHead); -SWMatBiasLayerDesc matBiasLayerDescToSwift(const MatBiasLayerDesc * desc); -SWValueHeadDesc valueHeadDescToSwift(const ValueHeadDesc * valueHead); -SWModelDesc modelDescToSwift(const ModelDesc* modelDesc); - -bool testEvaluateConv(const ConvLayerDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - const vector& inputBuffer, - vector& outputBuffer); - -bool testEvaluateBatchNorm(const BatchNormLayerDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - const vector& inputBuffer, - const vector& maskBuffer, - vector& outputBuffer); - -bool testEvaluateResidualBlock(const ResidualBlockDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - const vector& inputBuffer, - const vector& maskBuffer, - vector& outputBuffer); - -bool testEvaluateGlobalPoolingResidualBlock(const GlobalPoolingResidualBlockDesc* desc, - int batchSize, - int nnXLen, - int nnYLen, - const vector& inputBuffer, - const vector& maskBuffer, - vector& outputBuffer); void copyRowData(float* dest, const float* src, size_t numElements); void convertNCHW(float* rowSpatialInput, int C, int H, int W, bool inputsUseNHWC); @@ -89,63 +47,44 @@ void processRow(size_t row, vector& outputs); void getMetalOutput(ComputeHandle* gpuHandle, - InputBuffers* inputBuffers, - int numBatchEltsFilled, - NNResultBuf** inputBufs, - vector& outputs); -}; + InputBuffers* inputBuffers, + int numBatchEltsFilled, + NNResultBuf** inputBufs, + vector& outputs); +} /** * @brief Represents a loaded neural network model. * A LoadedModel object contains a ModelDesc object that describes the characteristics of the loaded model. - * The default constructor, copy constructor, and assignment operator are deleted to prevent - * creation of an uninitialized LoadedModel object, copying of the loaded model, and potential memory leaks. + * For Metal backend, we also store the model path for on-demand conversion. */ struct LoadedModel { /** * @brief The description of the loaded model. - * The modelDesc field is a ModelDesc object that describes the characteristics of the loaded model. */ ModelDesc modelDesc; + /** + * @brief Path to the original .bin.gz model file for conversion. + */ + string modelPath; + /** * @brief Construct a new Loaded Model object - * This constructor loads a machine learning model from a file and sets the modelDesc field to the - * characteristics of the loaded model. + * This constructor loads a machine learning model from a file and sets the modelDesc field. * @param fileName The name of the file containing the machine learning model. * @param expectedSha256 The expected SHA-256 hash of the model file. */ - LoadedModel(const string& fileName, const string& expectedSha256) - { - ModelDesc::loadFromFileMaybeGZipped(fileName, modelDesc, expectedSha256); - } + LoadedModel(const string& fileName, const string& expectedSha256); - /** - * @brief Delete the default constructor - * The default constructor is deleted to prevent creation of an uninitialized LoadedModel object. - */ LoadedModel() = delete; - - /** - * @brief Delete the copy constructor - * The copy constructor is deleted to prevent copying of the loaded model. - */ LoadedModel(const LoadedModel&) = delete; - - /** - * @brief Delete the assignment operator - * The assignment operator is deleted to prevent copying of the loaded model. - */ LoadedModel& operator=(const LoadedModel&) = delete; }; /** - * @brief Context for computing neural network operations. - * A ComputeContext object contains configuration settings for neural network computations, such as - * whether to use half-precision floating-point (FP16) mode and whether to use the NHWC format for - * input tensors. The default constructor, copy constructor, and assignment operator are deleted - * to prevent creation of an uninitialized ComputeContext object, copying of the object, and potential - * memory leaks. + * @brief Context for computing neural network operations using Metal. + * Contains global configuration settings for neural network computations. */ struct ComputeContext { /** @@ -154,64 +93,47 @@ struct ComputeContext { enabled_t useFP16Mode; /** - * @brief ComputeContext ID + * @brief The width of the neural network input. */ - int identifier; + int nnXLen; /** - * @brief Metal compute context instance + * @brief The height of the neural network input. */ - MetalComputeContext metalComputeContext; + int nnYLen; + + /** + * @brief Metal compute context instance from Swift. + */ + MetalComputeContext metalContext; /** * @brief Constructs a ComputeContext object. - * This constructor creates a ComputeContext object and sets the configuration settings for neural network - * computations, including whether to use FP16 mode and whether to use the NHWC format for input tensors. * @param nnX The width of the input tensor. * @param nnY The height of the input tensor. - * @param useFP16Mode Whether to use half-precision floating-point (FP16) mode for computations. + * @param useFP16Mode Whether to use half-precision floating-point (FP16) mode. * @param useNHWCMode Whether to use the NHWC format for input tensors. */ ComputeContext(int nnX, int nnY, enabled_t useFP16Mode, enabled_t useNHWCMode); - /** - * @brief Destroys the ComputeContext object. - */ ~ComputeContext(); - - /** - * @brief Deletes the default constructor. - */ ComputeContext() = delete; - - /** - * @brief Deletes the copy constructor. - */ ComputeContext(const ComputeContext&) = delete; - - /** - * @brief Deletes the copy constructor. - * - * @return ComputeContext& - */ ComputeContext& operator=(const ComputeContext&) = delete; }; /** - * @brief A handle for performing neural network computations. - * This struct represents a handle for computing neural network operations. It contains various - * parameters and settings that determine how the computation is performed. + * @brief A handle for performing neural network computations using Metal. + * This struct represents a per-thread handle for computing neural network operations. */ struct ComputeHandle { - int identifier; - /** - * @brief The x length of the neural network computation context. + * @brief The x length of the neural network. */ int nnXLen; /** - * @brief The y length of the neural network computation context. + * @brief The y length of the neural network. */ int nnYLen; @@ -236,53 +158,55 @@ struct ComputeHandle { bool inputsUseNHWC; /** - * @brief Whether to use 16-bit floating-point precision for computation. + * @brief Whether to use 16-bit floating-point precision. */ bool useFP16; /** - * @brief The Metal handle instance. + * @brief Whether exact neural net length is required (enables mask optimization). + */ + bool requireExactNNLen; + + /** + * @brief The hybrid compute handle instance from Swift. + * This handle dispatches work to both CoreML (CPU+ANE) and MPSGraph (GPU). */ - swift::Optional metalhandle; + swift::Optional hybridHandle; + + /** + * @brief The MPSGraph-only handle instance from Swift (used for FP32 mode). + * This handle dispatches work only to GPU, avoiding slow FP32 CPU+ANE execution. + */ + swift::Optional mpsGraphOnlyHandle; /** * @brief Construct a new ComputeHandle object. - * This constructor initializes a new ComputeHandle object with the specified parameters and settings. * @param context The ComputeContext object to use for computation. - * @param loadedModel A pointer to the LoadedModel object containing the neural network model to use. + * @param loadedModel A pointer to the LoadedModel object. * @param inputsUseNHWC Whether the input data uses NHWC format. - * @param gpuIdx The index of the GPU to use for computation. - * @param serverThreadIdx The index of the server thread to use for computation. + * @param gpuIdx The index of the GPU to use. + * @param serverThreadIdx The index of the server thread. + * @param requireExactNNLen Whether exact NN length is required. + * @param maxBatchSize Maximum batch size for dynamic batch support. */ ComputeHandle( ComputeContext* context, const LoadedModel* loadedModel, bool inputsUseNHWC, int gpuIdx, - int serverThreadIdx); + int serverThreadIdx, + bool requireExactNNLen, + int maxBatchSize); - /** - * @brief Destroy the ComputeHandle object. - * This destructor frees any resources that were allocated for the ComputeHandle object. - */ ~ComputeHandle(); - - /** - * @brief Delete the default constructor. - */ ComputeHandle() = delete; - - /** - * @brief Delete the copy constructor. - */ ComputeHandle(const ComputeHandle&) = delete; - - /** - * @brief Delete the assignment operator. - */ ComputeHandle& operator=(const ComputeHandle&) = delete; }; +/** + * @brief Input and output buffers for neural network inference. + */ struct InputBuffers { int maxBatchSize; size_t policyResultChannels; @@ -298,6 +222,7 @@ struct InputBuffers { size_t singleOwnershipResultElts; size_t singleOwnerMapElts; size_t singleScoreValuesResultElts; + size_t singleMaskElts; size_t rowSpatialBufferElts; size_t userInputBufferElts; @@ -310,6 +235,7 @@ struct InputBuffers { size_t ownershipResultBufferElts; size_t ownerMapBufferElts; size_t scoreValuesResultBufferElts; + size_t userInputMaskBufferElts; float* rowSpatialBuffer; float* userInputBuffer; @@ -322,6 +248,7 @@ struct InputBuffers { float* ownershipResults; float* ownerMapBuffer; float* scoreValuesResults; + float* userInputMaskBuffer; InputBuffers(const LoadedModel* loadedModel, int maxBatchSz, int nnXLen, int nnYLen); ~InputBuffers(); @@ -329,3 +256,7 @@ struct InputBuffers { InputBuffers(const InputBuffers&) = delete; InputBuffers& operator=(const InputBuffers&) = delete; }; + +#endif // USE_METAL_BACKEND + +#endif // NEURALNET_METALBACKEND_H_ diff --git a/cpp/neuralnet/metalbackend.swift b/cpp/neuralnet/metalbackend.swift index 97c6e181d..8f209d054 100644 --- a/cpp/neuralnet/metalbackend.swift +++ b/cpp/neuralnet/metalbackend.swift @@ -1,3032 +1,681 @@ import Foundation +import CoreML import MetalPerformanceShaders import MetalPerformanceShadersGraph /// A class that handles output to standard error. class StandardError: TextOutputStream { - /// Outputs the specified string to the standard error stream. func write(_ string: String) { - /// Tries to write the UTF-8 encoded contents of the string to the standard error file handle. try? FileHandle.standardError.write(contentsOf: Data(string.utf8)) } } -/// A function to print error messages +/// Print to standard error func printError(_ item: Any) { - // Create an instance of StandardError to direct output to the standard error stream var instance = StandardError() - // Output the provided item to the standard error using the created instance print(item, to: &instance) } -/// An extension to the Data struct for handling float data with optional FP16 conversion. -extension Data { - /// Initializes a new Data instance using an UnsafeMutablePointer, with optional conversion to FP16 format. - /// - Parameters: - /// - floatsNoCopy: An UnsafeMutablePointer containing the float data. - /// - shape: An array of NSNumber objects representing the shape of the data. - init( - floatsNoCopy: UnsafeMutablePointer, - shape: [NSNumber] - ) { - self.init( - bytesNoCopy: floatsNoCopy, - count: shape.countBytesOfFloat32(), - deallocator: .none) - } -} - -/// Extension to MPSNDArray to convert from MPSGraphTensor, and to read/write bytes from/to UnsafeMutableRawPointer -extension MPSNDArray { - /// Read bytes from the buffer - /// - Parameter buffer: The buffer to read - func readBytes(_ buffer: UnsafeMutableRawPointer) { - self.readBytes(buffer, strideBytes: nil) - } - - /// Write bytes to the buffer - /// - Parameter buffer: The buffer to write - func writeBytes(_ buffer: UnsafeMutableRawPointer) { - self.writeBytes(buffer, strideBytes: nil) - } -} - -/// Extension to Array to count number of elements and bytes -extension Array where Element == NSNumber { - /// Count number of elements - /// - Returns: Number of elements - func countElements() -> Int { - return reduce(1, { $0 * $1.intValue }) - } - - /// Count number of bytes - /// - Parameter dataType: The data type - /// - Returns: Number of bytes - func countBytesOfFloat32() -> Int { - return countElements() * MemoryLayout.size - } -} - -/// Extension to MPSGraph to the mish activation function -extension MPSGraph { - /// This function applies the Mish activation function on the input tensor `x`. The Mish function is defined as - /// x * tanh(Softplus(x)), where Softplus(x) is defined as log(1 + exp(min(x, 10.39))) if x < 10.39 and x otherwise. - /// When FP16 is later used, the threshold of softplus will need to be modified to 10.39, which is different from - /// the original 20. This is because exp(10.39) = 32532.666936 < 32767.0 < 65504.0, so the result of exp(10.39) can - /// be represented by float16. If the threshold of softplus is 20, the result of exp(20) is 485165195.40979004, - /// which is out of range of float16. - /// - Parameter tensor: The input tensor of mish activation function - /// - Returns: The output tensor of mish activation function - func mish(tensor: MPSGraphTensor) -> MPSGraphTensor { - assert(tensor.dataType == .float32) - - let one = 1.0 - let threshold = 20.0 - let thresholdTensor = constant(threshold, dataType: tensor.dataType) - let minimumTensor = minimum(tensor, thresholdTensor, name: nil) - let expTensor = exponent(with: minimumTensor, name: nil) - let oneTensor = constant(one, dataType: tensor.dataType) - let addTensor = addition(expTensor, oneTensor, name: nil) - let logTensor = logarithm(with: addTensor, name: nil) - let lessTensor = lessThan(tensor, thresholdTensor, name: nil) - let selectTensor = select( - predicate: lessTensor, trueTensor: logTensor, falseTensor: tensor, name: nil) - let tanhTensor = tanh(with: selectTensor, name: nil) - let mulTensor = multiplication(tensor, tanhTensor, name: nil) - - return mulTensor - } -} - -/// A structure that represents the input shape -struct InputShape { - /// Create a shape for the input tensor - /// - Parameters: - /// - batchSize: Batch size - /// - numChannels: Number of channels - /// - nnYLen: Y length - /// - nnXLen: X length - /// - Returns: The shape - static func create( - batchSize: NSNumber, - numChannels: NSNumber, - nnYLen: NSNumber, - nnXLen: NSNumber - ) -> [NSNumber] { - let shape = [ - batchSize, - numChannels, - nnYLen, - nnXLen, - ] - return shape - } - - /// Get the channel axis - /// - Returns: The channel axis - static func getChannelAxis() -> Int { - return 1 - } - - /// Get the HW axes - /// - Returns: The HW axes - static func getHWAxes() -> [NSNumber] { - let hwAxes = [2, 3] as [NSNumber] - return hwAxes - } -} - -/// A structure that represents the input layer -struct InputLayer { - let tensor: MPSGraphTensor - let shape: [NSNumber] - - /// Initialize a InputLayer object - /// - Parameters: - /// - graph: The graph - /// - nnXLen: X length - /// - nnYLen: Y length - /// - numChannels: Number of channels - /// - dataType: Data type - init( - graph: MPSGraph, - nnXLen: NSNumber, - nnYLen: NSNumber, - numChannels: NSNumber, - dataType: MPSDataType = .float32 - ) { - shape = InputShape.create( - batchSize: -1, - numChannels: numChannels, - nnYLen: nnYLen, - nnXLen: nnXLen) - - self.tensor = graph.placeholder( - shape: shape, - dataType: dataType, - name: nil) - - assert(self.tensor.shape?.count == 4) - } -} - -/// A structure that represents an input global layer for a neural network model. -struct InputGlobalLayer { - let tensor: MPSGraphTensor - let shape: [NSNumber] - - /// Initializes an InputGlobalLayer object with a graph, batch size, number of global features, data type, and input shape. - /// - Parameters: - /// - graph: The graph. - /// - numGlobalFeatures: The number of global features. - /// - dataType: The data type. - init( - graph: MPSGraph, - numGlobalFeatures: NSNumber, - dataType: MPSDataType = .float32 - ) { - shape = InputShape.create( - batchSize: -1, - numChannels: numGlobalFeatures, - nnYLen: 1, - nnXLen: 1) - - self.tensor = graph.placeholder( - shape: shape, - dataType: dataType, - name: nil) - - assert(self.tensor.shape?.count == 4) - } -} - -/// A structure representing the input meta layer for a neural network graph. -struct InputMetaLayer { - /// A `MPSGraphTensor` representing the placeholder tensor in the graph. - let tensor: MPSGraphTensor - /// An array of `NSNumber` representing the shape of the tensor placeholder. - let shape: [NSNumber] - - /// Initializes a new `InputMetaLayer` instance with the given graph and number of meta features. - /// - /// - Parameters: - /// - graph: The `MPSGraph` instance where the placeholder tensor will be created. - /// - numMetaFeatures: The number of meta features (channels) for the input tensor. - /// - dataType: The data type - /// - /// This initializer sets the shape of the input tensor using a helper function `InputShape.create` with - /// a dynamic batch size (-1), the specified number of channels, and a spatial size of 1x1 (nnYLen and nnXLen). - /// It also creates a placeholder tensor in the MPS graph with the specified shape and data type `float32`. - init( - graph: MPSGraph, - numMetaFeatures: NSNumber, - dataType: MPSDataType = .float32 - ) { - // Define the shape of the input tensor with dynamic batch size, specified number of channels, and spatial dimensions 1x1. - shape = InputShape.create( - batchSize: -1, - numChannels: numMetaFeatures, - nnYLen: 1, - nnXLen: 1) - - // Create a placeholder tensor in the graph with the above-defined shape and data type float32. - self.tensor = graph.placeholder( - shape: shape, - dataType: dataType, - name: nil) - } -} - -/// A structure that represents a mask layer for a neural network model. -struct MaskLayer { - let tensor: MPSGraphTensor - let shape: [NSNumber] - - /// Initializes a MaskLayer object with a graph, batch size, x and y lengths, data type, and input shape. - /// - Parameters: - /// - graph: The graph. - /// - nnXLen: The length of the x-axis. - /// - nnYLen: The length of the y-axis. - /// - dataType: The data type. - init( - graph: MPSGraph, - nnXLen: NSNumber, - nnYLen: NSNumber, - dataType: MPSDataType = .float32 - ) { - shape = InputShape.create( - batchSize: -1, - numChannels: 1, - nnYLen: nnYLen, - nnXLen: nnXLen) - - self.tensor = graph.placeholder( - shape: shape, - dataType: dataType, - name: nil) - - assert(self.tensor.shape?.count == 4) - } -} - -/// A structure that represents a layer which performs the summation operation on a mask layer. -struct MaskSumLayer { - let tensor: MPSGraphTensor - - /// Initializes a MaskSumLayer object with a given tensor. - /// - Parameter tensor: The tensor to use for the layer. - init(tensor: MPSGraphTensor) { - self.tensor = tensor - assert(self.tensor.shape?.count == 4) - } - - /// Initializes a MaskSumLayer object with a graph, a mask layer, and a boolean flag indicating whether to use NHWC or NCHW format. - /// - Parameters: - /// - graph: The graph. - /// - maskTensor: The mask tensor. - init( - graph: MPSGraph, - maskTensor: MPSGraphTensor - ) { - let hwAxes = InputShape.getHWAxes() - - self.tensor = graph.reductionSum( - with: maskTensor, - axes: hwAxes, - name: nil) - - assert(self.tensor.shape?.count == 4) - } -} - -/// A structure that represents a layer which performs square root, subtraction, and multiplication operations on a MaskSumLayer object. -struct MaskSumSqrtS14M01Layer { - let tensor: MPSGraphTensor - - /// Initializes a MaskSumSqrtS14M01Layer object with a given tensor. - /// - Parameter tensor: The tensor to use for the layer. - init(tensor: MPSGraphTensor) { - self.tensor = tensor - assert(self.tensor.shape?.count == 4) - } - - /// Initializes a MaskSumSqrtS14M01Layer object with a graph, a MaskSumLayer object, and a boolean flag indicating whether to use 16-bit floating-point data type. - /// - Parameters: - /// - graph: The graph. - /// - maskSum: The MaskSumLayer object. - init( - graph: MPSGraph, - maskSum: MaskSumLayer - ) { - let sqrtMaskSum = graph.squareRoot(with: maskSum.tensor, name: nil) - - let fourTeen = graph.constant( - 14.0, - shape: [1], - dataType: maskSum.tensor.dataType) - - let subtracted = graph.subtraction(sqrtMaskSum, fourTeen, name: nil) - - let zeroPointone = graph.constant( - 0.1, - shape: [1], - dataType: maskSum.tensor.dataType) - - self.tensor = graph.multiplication( - subtracted, - zeroPointone, - name: nil) - - assert(self.tensor.shape?.count == 4) - } -} - -/// A structure that represents a layer which performs squaring and subtraction operations on a MaskSumSqrtS14M01Layer object. -struct MaskSumSqrtS14M01SquareS01Layer { - let tensor: MPSGraphTensor - - /// Initializes a MaskSumSqrtS14M01SquareS01Layer object with a given tensor. - /// - Parameter tensor: The tensor to use for the layer. - init(tensor: MPSGraphTensor) { - self.tensor = tensor - assert(self.tensor.shape?.count == 4) - } - - /// Initializes a MaskSumSqrtS14M01SquareS01Layer object with a graph, a MaskSumSqrtS14M01Layer object, and a boolean flag indicating whether to use 16-bit floating-point data type. - /// - Parameters: - /// - graph: The graph. - /// - maskSumSqrtS14M01: The MaskSumSqrtS14M01Layer object. - init( - graph: MPSGraph, - maskSumSqrtS14M01: MaskSumSqrtS14M01Layer - ) { - let squared = graph.square(with: maskSumSqrtS14M01.tensor, name: nil) - - let zeroPointone = graph.constant( - 0.1, - shape: [1], - dataType: maskSumSqrtS14M01.tensor.dataType) - - self.tensor = graph.subtraction( - squared, - zeroPointone, - name: nil) - - assert(self.tensor.shape?.count == 4) - } -} - -/// A Swift structure that represents a network tester, which tests various neural network configurations. -struct NetworkTester { - - /// A static function that tests a custom neural network configuration with the given parameters. - /// - Parameters: - /// - batchSize: The number of input batches. - /// - nnXLen: The width of the input tensor. - /// - nnYLen: The height of the input tensor. - /// - numChannels: The number of channels in the input tensor. - /// - input: A pointer to the input data. - /// - mask: A pointer to the mask data. - /// - output: A pointer to the output data. - /// - networkBuilder: A closure that takes an MPSGraph, InputLayer, and MaskLayer, and returns an MPSGraphTensor representing the custom network configuration. - static func test( - batchSize: NSNumber, - nnXLen: NSNumber, - nnYLen: NSNumber, - numChannels: NSNumber, - input: UnsafeMutablePointer, - mask: UnsafeMutablePointer, - output: UnsafeMutablePointer, - networkBuilder: (MPSGraph, InputLayer, MaskLayer) -> MPSGraphTensor - ) { - - // Create a Metal device. - let device = MTLCreateSystemDefaultDevice()! - - // Create a MPSGraph. - let graph = MPSGraph() - - // Create the input and mask layers. - let inputLayer = InputLayer( - graph: graph, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: numChannels) - - let maskLayer = MaskLayer( - graph: graph, - nnXLen: nnXLen, - nnYLen: nnYLen) - - // Build the custom network configuration using the provided networkBuilder closure. - let resultTensor = networkBuilder(graph, inputLayer, maskLayer) - - // Create input shape - let inputShape = InputShape.create( - batchSize: batchSize, - numChannels: numChannels, - nnYLen: nnYLen, - nnXLen: nnXLen) - - // Create MPSNDArrayDescriptors from the input shape. - let sourceDescriptor = MPSNDArrayDescriptor( - dataType: inputLayer.tensor.dataType, - shape: inputShape) - - // Create MPSNDArray from the source descriptor. - let sourceArray = MPSNDArray( - device: device, - descriptor: sourceDescriptor) - - // Create a mask shape - let maskShape = InputShape.create( - batchSize: batchSize, - numChannels: 1, - nnYLen: nnYLen, - nnXLen: nnXLen) - - // Create MPSNDArrayDescriptors from the mask shape. - let maskDescriptor = MPSNDArrayDescriptor( - dataType: maskLayer.tensor.dataType, - shape: maskShape) - - // Create MPSNDArray from the mask descriptor. - let maskArray = MPSNDArray( - device: device, - descriptor: maskDescriptor) - - // Write input and mask data to their respective MPSNDArrays, converting to FP16 if necessary. - sourceArray.writeBytes(input) - maskArray.writeBytes(mask) - - // Create MPSGraphTensorData objects from the source and mask arrays. - let sourceTensorData = MPSGraphTensorData(sourceArray) - let maskTensorData = MPSGraphTensorData(maskArray) - - // Execute the graph and fetch the result. - let fetch = graph.run( - feeds: [ - inputLayer.tensor: sourceTensorData, - maskLayer.tensor: maskTensorData, - ], - targetTensors: [resultTensor], - targetOperations: nil) - - // Read the output data from the result tensor, converting from FP16 to FP32 if necessary. - fetch[resultTensor]?.mpsndarray().readBytes(output) - } -} - -/// A struct that represents a description of convolutional layer. -public struct SWConvLayerDesc { - let convYSize: NSNumber - let convXSize: NSNumber - let inChannels: NSNumber - let outChannels: NSNumber - let dilationY: Int - let dilationX: Int - let weights: UnsafeMutablePointer - - /// Initializes a SWConvLayerDesc object. - /// - Parameters: - /// - convYSize: The Y size of the convolution. - /// - convXSize: The X size of the convolution. - /// - inChannels: The number of input channels. - /// - outChannels: The number of output channels. - /// - dilationY: The dilation in the Y direction. - /// - dilationX: The dilation in the X direction. - /// - weights: A pointer to the weights. - init( - convYSize: NSNumber, - convXSize: NSNumber, - inChannels: NSNumber, - outChannels: NSNumber, - dilationY: Int, - dilationX: Int, - weights: UnsafeMutablePointer - ) { - self.convYSize = convYSize - self.convXSize = convXSize - self.inChannels = inChannels - self.outChannels = outChannels - self.dilationY = dilationY - self.dilationX = dilationX - self.weights = weights - } -} - -public func createSWConvLayerDesc( - convYSize: Int32, - convXSize: Int32, - inChannels: Int32, - outChannels: Int32, - dilationY: Int32, - dilationX: Int32, - weights: UnsafeMutablePointer -) -> SWConvLayerDesc { - return SWConvLayerDesc( - convYSize: convYSize as NSNumber, - convXSize: convXSize as NSNumber, - inChannels: inChannels as NSNumber, - outChannels: outChannels as NSNumber, - dilationY: Int(dilationY), - dilationX: Int(dilationX), - weights: weights) -} - -/// A class that represents a convolutional layer using MPSGraph -class ConvLayer { - /// The result tensor of the convolutional operation - let resultTensor: MPSGraphTensor - /// The convolution 2D operation descriptor - let convDescriptor = MPSGraphConvolution2DOpDescriptor( - strideInX: 1, - strideInY: 1, - dilationRateInX: 1, - dilationRateInY: 1, - groups: 1, - paddingStyle: .TF_SAME, - dataLayout: .NCHW, - weightsLayout: .OIHW)! - - /// Class method that tests the convolutional layer by running a forward pass - /// - Parameters: - /// - descriptor: A descriptor for the convolutional layer - /// - nnXLen: The width of the input tensor - /// - nnYLen: The height of the input tensor - /// - batchSize: The batch size of the input tensor - /// - input: A pointer to the input tensor data - /// - output: A pointer to the output tensor data - class func test( - descriptor: SWConvLayerDesc, - nnXLen: NSNumber, - nnYLen: NSNumber, - batchSize: NSNumber, - input: UnsafeMutablePointer, - output: UnsafeMutablePointer - ) { - let device = MTLCreateSystemDefaultDevice()! - let graph = MPSGraph() - - let source = InputLayer( - graph: graph, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: descriptor.inChannels) - - let conv = ConvLayer( - graph: graph, - sourceTensor: source.tensor, - descriptor: descriptor, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let inputShape = InputShape.create( - batchSize: batchSize, - numChannels: descriptor.inChannels, - nnYLen: nnYLen, - nnXLen: nnXLen) - - let sourceDescriptor = MPSNDArrayDescriptor( - dataType: source.tensor.dataType, - shape: inputShape) - - let sourceArray = MPSNDArray( - device: device, - descriptor: sourceDescriptor) - - sourceArray.writeBytes(input) - let sourceTensorData = MPSGraphTensorData(sourceArray) - - let fetch = graph.run( - feeds: [source.tensor: sourceTensorData], - targetTensors: [conv.resultTensor], - targetOperations: nil) - - fetch[conv.resultTensor]?.mpsndarray().readBytes(output) - } - - /// Initializes a ConvLayer object - /// - Parameters: - /// - graph: An MPSGraph object - /// - sourceTensor: The input tensor for the convolutional layer - /// - descriptor: A descriptor for the convolutional layer - /// - nnXLen: The width of the input tensor - /// - nnYLen: The height of the input tensor - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - descriptor: SWConvLayerDesc, - nnXLen: NSNumber, - nnYLen: NSNumber - ) { - let weightsShape = [ - descriptor.outChannels, - descriptor.inChannels, - descriptor.convYSize, - descriptor.convXSize, - ] - - let weightsData = Data( - floatsNoCopy: descriptor.weights, - shape: weightsShape) - - let weightsTensor = graph.constant( - weightsData, - shape: weightsShape, - dataType: sourceTensor.dataType) - - resultTensor = graph.convolution2D( - sourceTensor, - weights: weightsTensor, - descriptor: convDescriptor, - name: nil) - - assert(resultTensor.shape?.count == 4) - } -} - -public func testConvLayer( - descriptor: SWConvLayerDesc, - nnXLen: Int32, - nnYLen: Int32, - batchSize: Int32, - input: UnsafeMutablePointer, - output: UnsafeMutablePointer -) { - ConvLayer.test( - descriptor: descriptor, - nnXLen: nnXLen as NSNumber, - nnYLen: nnYLen as NSNumber, - batchSize: batchSize as NSNumber, - input: input, - output: output) -} - -/// A struct that represents a description of a batch normalization layer. -public struct SWBatchNormLayerDesc { - let numChannels: NSNumber - let mergedScale: UnsafeMutablePointer - let mergedBias: UnsafeMutablePointer - - /// Initializes a SWBatchNormLayerDesc object. - /// - Parameters: - /// - numChannels: The number of channels in the input tensor. - /// - mergedScale: A pointer to the merged scale. - /// - mergedBias: A pointer to the merged bias. - init( - numChannels: NSNumber, - mergedScale: UnsafeMutablePointer, - mergedBias: UnsafeMutablePointer - ) { - self.numChannels = numChannels - self.mergedScale = mergedScale - self.mergedBias = mergedBias - } -} - -public func createSWBatchNormLayerDesc( - numChannels: Int32, - mergedScale: UnsafeMutablePointer, - mergedBias: UnsafeMutablePointer -) -> SWBatchNormLayerDesc { - return SWBatchNormLayerDesc( - numChannels: numChannels as NSNumber, - mergedScale: mergedScale, - mergedBias: mergedBias) -} - -/// A class that represents a batch normalization layer. -class BatchNormLayer { - let resultTensor: MPSGraphTensor - - /// Executes a test for the batch normalization layer. - /// - Parameters: - /// - descriptor: The description of the batch normalization layer. - /// - nnXLen: The width of the input tensor. - /// - nnYLen: The height of the input tensor. - /// - batchSize: The number of input batches. - /// - input: A pointer to the input data. - /// - mask: A pointer to the mask data. - /// - output: A pointer to the output data. - class func test( - descriptor: SWBatchNormLayerDesc, - nnXLen: NSNumber, - nnYLen: NSNumber, - batchSize: NSNumber, - input: UnsafeMutablePointer, - mask: UnsafeMutablePointer, - output: UnsafeMutablePointer - ) { - - NetworkTester.test( - batchSize: batchSize, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: descriptor.numChannels, - input: input, - mask: mask, - output: output - ) { graph, inputLayer, maskLayer in - - let batchNorm = BatchNormLayer( - graph: graph, - sourceTensor: inputLayer.tensor, - maskTensor: maskLayer.tensor, - descriptor: descriptor, - nnXLen: nnXLen, - nnYLen: nnYLen) - - return batchNorm.resultTensor - } - } - - /// Initializes a BatchNormLayer object with the specified parameters, and computes the normalized and masked result tensor. - /// - Parameters: - /// - graph: The MPSGraph object used to build the BatchNormLayer. - /// - sourceTensor: The input tensor to the BatchNormLayer. - /// - maskTensor: The mask tensor to apply to the normalized tensor. - /// - descriptor: The BatchNormLayer descriptor containing parameters such as the number of channels, mean, variance, scale, and bias. - /// - nnXLen: The length of the input tensor in the X direction. - /// - nnYLen: The length of the input tensor in the Y direction. - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - maskTensor: MPSGraphTensor, - descriptor: SWBatchNormLayerDesc, - nnXLen: NSNumber, - nnYLen: NSNumber - ) { - let scaleBiasShape = InputShape.create( - batchSize: 1, - numChannels: descriptor.numChannels, - nnYLen: 1, - nnXLen: 1) - - let mergedScaleData = Data( - floatsNoCopy: descriptor.mergedScale, - shape: scaleBiasShape) - - let mergedBiasData = Data( - floatsNoCopy: descriptor.mergedBias, - shape: scaleBiasShape) - - let scaleTensor = graph.constant( - mergedScaleData, - shape: scaleBiasShape, - dataType: sourceTensor.dataType) - - let biasTensor = graph.constant( - mergedBiasData, - shape: scaleBiasShape, - dataType: sourceTensor.dataType) - - let scaled = graph.multiplication( - sourceTensor, - scaleTensor, - name: nil) - - let normalized = graph.addition( - scaled, - biasTensor, - name: nil) - - resultTensor = graph.multiplication( - normalized, - maskTensor, - name: nil) - - assert(resultTensor.shape?.count == 4) - } -} - -public func testBatchNormLayer( - descriptor: SWBatchNormLayerDesc, - nnXLen: Int32, - nnYLen: Int32, - batchSize: Int32, - input: UnsafeMutablePointer, - mask: UnsafeMutablePointer, - output: UnsafeMutablePointer -) { - BatchNormLayer.test( - descriptor: descriptor, - nnXLen: nnXLen as NSNumber, - nnYLen: nnYLen as NSNumber, - batchSize: batchSize as NSNumber, - input: input, - mask: mask, - output: output) -} - -/// An enumeration of the different kinds of activation function. -public enum ActivationKind { - case identity - case relu - case mish -} - -/// A structure that represents an activation layer -struct ActivationLayer { - let resultTensor: MPSGraphTensor - - /// Initialize an ActivationLayer object - /// - Parameters: - /// - graph: The MPSGraph - /// - sourceTensor: The input tensor - /// - activationKind: The activation kind - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - activationKind: ActivationKind - ) { - - switch activationKind { - case .relu: - resultTensor = graph.reLU(with: sourceTensor, name: nil) - case .mish: - resultTensor = graph.mish(tensor: sourceTensor) - default: - resultTensor = sourceTensor - } - - assert(resultTensor.shape == sourceTensor.shape) - } -} - -/// A class that represents a residual block in a convolutional neural network. -public class SWResidualBlockDesc: BlockDescriptor { - /// A description of the batch normalization layer that is applied before the first convolutional layer. - let preBN: SWBatchNormLayerDesc - - /// The type of activation function that is applied before the first convolutional layer. - let preActivation: ActivationKind - - /// A description of the convolutional layer that is applied in the middle of the residual block. - let regularConv: SWConvLayerDesc - - /// A description of the batch normalization layer that is applied after the middle convolutional layer. - let midBN: SWBatchNormLayerDesc - - /// The type of activation function that is applied after the middle convolutional layer. - let midActivation: ActivationKind - - /// A description of the convolutional layer that is applied at the end of the residual block. - let finalConv: SWConvLayerDesc - - /// Initializes a `SWResidualBlockDesc` object. - /// - Parameters: - /// - preBN: A description of the batch normalization layer that is applied before the first convolutional layer. - /// - preActivation: The type of activation function that is applied before the first convolutional layer. - /// - regularConv: A description of the convolutional layer that is applied in the middle of the residual block. - /// - midBN: A description of the batch normalization layer that is applied after the middle convolutional layer. - /// - midActivation: The type of activation function that is applied after the middle convolutional layer. - /// - finalConv: A description of the convolutional layer that is applied at the end of the residual block. - init( - preBN: SWBatchNormLayerDesc, - preActivation: ActivationKind, - regularConv: SWConvLayerDesc, - midBN: SWBatchNormLayerDesc, - midActivation: ActivationKind, - finalConv: SWConvLayerDesc - ) { - self.preBN = preBN - self.preActivation = preActivation - self.regularConv = regularConv - self.midBN = midBN - self.midActivation = midActivation - self.finalConv = finalConv - } -} - -public func createSWResidualBlockDesc( - preBN: SWBatchNormLayerDesc, - preActivation: ActivationKind, - regularConv: SWConvLayerDesc, - midBN: SWBatchNormLayerDesc, - midActivation: ActivationKind, - finalConv: SWConvLayerDesc -) -> SWResidualBlockDesc { - return SWResidualBlockDesc( - preBN: preBN, - preActivation: preActivation, - regularConv: regularConv, - midBN: midBN, - midActivation: midActivation, - finalConv: finalConv) -} - -/// A class that represents a Residual Block layer -class ResidualBlock { - let resultTensor: MPSGraphTensor - - /// A function that runs tests on the Residual Block layer - /// - /// - Parameters: - /// - descriptor: The Residual Block descriptor - /// - batchSize: Batch size - /// - nnXLen: X length - /// - nnYLen: Y length - /// - input: The input float32 pointer - /// - mask: The mask float32 pointer - /// - output: The output float32 pointer - class func test( - descriptor: SWResidualBlockDesc, - batchSize: NSNumber, - nnXLen: NSNumber, - nnYLen: NSNumber, - input: UnsafeMutablePointer, - mask: UnsafeMutablePointer, - output: UnsafeMutablePointer - ) { - - NetworkTester.test( - batchSize: batchSize, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: descriptor.preBN.numChannels, - input: input, - mask: mask, - output: output - ) { graph, inputLayer, maskLayer in - - let block = ResidualBlock( - graph: graph, - sourceTensor: inputLayer.tensor, - maskTensor: maskLayer.tensor, - descriptor: descriptor, - nnXLen: nnXLen, - nnYLen: nnYLen) - - return block.resultTensor - } - } - - /// Initialize a ResidualBlock object - /// - /// - Parameters: - /// - graph: The MPSGraph - /// - sourceTensor: The input tensor - /// - maskTensor: The mask tensor - /// - descriptor: The Residual Block descriptor - /// - nnXLen: X length - /// - nnYLen: Y length - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - maskTensor: MPSGraphTensor, - descriptor: SWResidualBlockDesc, - nnXLen: NSNumber, - nnYLen: NSNumber - ) { - let preBN = BatchNormLayer( - graph: graph, - sourceTensor: sourceTensor, - maskTensor: maskTensor, - descriptor: descriptor.preBN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let preActivation = ActivationLayer( - graph: graph, - sourceTensor: preBN.resultTensor, - activationKind: descriptor.preActivation) - - let regularConv = ConvLayer( - graph: graph, - sourceTensor: preActivation.resultTensor, - descriptor: descriptor.regularConv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let midBN = BatchNormLayer( - graph: graph, - sourceTensor: regularConv.resultTensor, - maskTensor: maskTensor, - descriptor: descriptor.midBN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let midActivation = ActivationLayer( - graph: graph, - sourceTensor: midBN.resultTensor, - activationKind: descriptor.midActivation) - - let finalConv = ConvLayer( - graph: graph, - sourceTensor: midActivation.resultTensor, - descriptor: descriptor.finalConv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - resultTensor = graph.addition( - sourceTensor, - finalConv.resultTensor, - name: nil) - - assert(resultTensor.shape?.count == 4) - } -} - -public func testResidualBlock( - descriptor: SWResidualBlockDesc, - batchSize: Int32, - nnXLen: Int32, - nnYLen: Int32, - input: UnsafeMutablePointer, - mask: UnsafeMutablePointer, - output: UnsafeMutablePointer -) { - ResidualBlock.test( - descriptor: descriptor, - batchSize: batchSize as NSNumber, - nnXLen: nnXLen as NSNumber, - nnYLen: nnYLen as NSNumber, - input: input, - mask: mask, - output: output) -} - -/// A structure that represents a global pooling layer -struct GlobalPoolingLayer { - /// The resulting tensor after applying the global pooling operation - let resultTensor: MPSGraphTensor - - /// Initialize a GlobalPoolingLayer object - /// - Parameters: - /// - graph: The graph - /// - sourceTensor: The source tensor to be pooled - /// - maskTensor: The mask tensor - /// - maskSumTensor: The sum of the mask - /// - maskSumSqrtS14M01Tensor: The multiplication of subtraction of square root of the sum of the mask - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - maskTensor: MPSGraphTensor, - maskSumTensor: MPSGraphTensor, - maskSumSqrtS14M01Tensor: MPSGraphTensor - ) { - let hwAxes = InputShape.getHWAxes() - let channelAxis = InputShape.getChannelAxis() - - let sumTensor = graph.reductionSum( - with: sourceTensor, - axes: hwAxes, - name: nil) - - let meanTensor = graph.division(sumTensor, maskSumTensor, name: nil) - - let meanMaskTensor = graph.multiplication( - meanTensor, - maskSumSqrtS14M01Tensor, - name: nil) - - let oneTensor = graph.constant(1.0, dataType: sourceTensor.dataType) - let maskM1Tensor = graph.subtraction(maskTensor, oneTensor, name: nil) - let addition = graph.addition(sourceTensor, maskM1Tensor, name: nil) - - let maxTensor = graph.reductionMaximum( - with: addition, - axes: hwAxes, - name: nil) - - resultTensor = graph.concatTensors( - [ - meanTensor, - meanMaskTensor, - maxTensor, - ], - dimension: channelAxis, - name: nil) - - assert(resultTensor.shape?.count == 4) - assert(resultTensor.shape?[2] == 1) - assert(resultTensor.shape?[3] == 1) - } -} - -/// A structure that represents a layer that performs global pooling on the input tensor -struct GlobalPoolingValueLayer { - let resultTensor: MPSGraphTensor - - /// Initialize a GlobalPoolingValueLayer object - /// - Parameters: - /// - graph: The graph - /// - sourceTensor: The input tensor - /// - maskSumTensor: The sum of the mask - /// - maskSumSqrtS14M01Tensor: The multiplication of subtraction of square root of the sum of the mask - /// - maskSumSqrtS14M01SquareS01Tensor: The subtraction of square of multiplication of subtraction of square root of the sum of the mask - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - maskSumTensor: MPSGraphTensor, - maskSumSqrtS14M01Tensor: MPSGraphTensor, - maskSumSqrtS14M01SquareS01Tensor: MPSGraphTensor - ) { - let hwAxes = InputShape.getHWAxes() - let channelAxis = InputShape.getChannelAxis() - - let sumTensor = graph.reductionSum( - with: sourceTensor, - axes: hwAxes, - name: nil) - - let meanTensor = graph.division(sumTensor, maskSumTensor, name: nil) - - let meanMaskTensor = graph.multiplication( - meanTensor, - maskSumSqrtS14M01Tensor, - name: nil) - - let meanMaskSquareTensor = graph.multiplication( - meanTensor, - maskSumSqrtS14M01SquareS01Tensor, - name: nil) - - resultTensor = graph.concatTensors( - [ - meanTensor, - meanMaskTensor, - meanMaskSquareTensor, - ], - dimension: channelAxis, - name: nil) - - assert(resultTensor.shape?.count == 4) - assert(resultTensor.shape?[2] == 1) - assert(resultTensor.shape?[3] == 1) - } -} - -/// A struct that represents a matrix multiplication layer descriptor -public struct SWMatMulLayerDesc { - /// The number of input channels - let inChannels: NSNumber - /// The number of output channels - let outChannels: NSNumber - /// The weights used for the matrix multiplication - let weights: UnsafeMutablePointer - - /// Initialize a SWMatMulLayerDesc object - /// - Parameters: - /// - inChannels: The number of input channels - /// - outChannels: The number of output channels - /// - weights: The weights used for the matrix multiplication - init( - inChannels: NSNumber, - outChannels: NSNumber, - weights: UnsafeMutablePointer - ) { - self.inChannels = inChannels - self.outChannels = outChannels - self.weights = weights - } -} - -public func createSWMatMulLayerDesc( - inChannels: Int32, - outChannels: Int32, - weights: UnsafeMutablePointer -) -> SWMatMulLayerDesc { - return SWMatMulLayerDesc( - inChannels: inChannels as NSNumber, - outChannels: outChannels as NSNumber, - weights: weights) -} - -/// A structure representing a matrix multiplication layer. -struct MatMulLayer { - /// The resulting tensor from the layer. - let resultTensor: MPSGraphTensor - - /// Initializes a MatMulLayer object. - /// - Parameters: - /// - graph: The graph. - /// - descriptor: The matrix multiplication layer descriptor. - /// - sourceTensor: The input tensor to the layer. - init( - graph: MPSGraph, - descriptor: SWMatMulLayerDesc, - sourceTensor: MPSGraphTensor - ) { - - assert( - (sourceTensor.shape?.count == 4) || (sourceTensor.shape?[1] == descriptor.inChannels)) - assert( - (sourceTensor.shape?.count == 2) || (sourceTensor.shape?[1] == descriptor.inChannels)) - - let weightsShape = [ - descriptor.inChannels, - descriptor.outChannels, - ] - - let weightsData = Data( - floatsNoCopy: descriptor.weights, - shape: weightsShape) - - let weightsTensor = graph.constant( - weightsData, - shape: weightsShape, - dataType: sourceTensor.dataType) - - let shape = [-1, descriptor.inChannels] - - let reshapedSource = graph.reshape( - sourceTensor, - shape: shape, - name: nil) - - resultTensor = graph.matrixMultiplication( - primary: reshapedSource, - secondary: weightsTensor, - name: nil) - - assert(resultTensor.shape?.count == 2) - } -} - -/// An Objective-C class that represents the bias layer description used in Swift. -public struct SWMatBiasLayerDesc { - /// The number of channels. - let numChannels: NSNumber - /// The pointer to the weights. - let weights: UnsafeMutablePointer - - /// Initialize an instance of SWMatBiasLayerDesc. - /// - Parameters: - /// - numChannels: The number of channels. - /// - weights: The pointer to the weights. - init( - numChannels: NSNumber, - weights: UnsafeMutablePointer - ) { - self.numChannels = numChannels - self.weights = weights - } -} - -public func createSWMatBiasLayerDesc( - numChannels: Int32, - weights: UnsafeMutablePointer -) -> SWMatBiasLayerDesc { - return SWMatBiasLayerDesc( - numChannels: numChannels as NSNumber, - weights: weights) -} - -/// A structure that performs matrix bias operations -struct MatBiasLayer { - /// The resulting tensor from the layer. - let resultTensor: MPSGraphTensor - - /// Initializes a MatBiasLayer object. - /// - Parameters: - /// - graph: The graph. - /// - descriptor: The descriptor that contains information about the layer - /// - sourceTensor: The input tensor to the layer. - init( - graph: MPSGraph, - descriptor: SWMatBiasLayerDesc, - sourceTensor: MPSGraphTensor - ) { - - assert( - (sourceTensor.shape?.count == 2) && (sourceTensor.shape?[1] == descriptor.numChannels)) - - let weightsShape = [1, descriptor.numChannels] - - let weightsData = Data( - floatsNoCopy: descriptor.weights, - shape: weightsShape) - - let weightsTensor = graph.constant( - weightsData, - shape: weightsShape, - dataType: sourceTensor.dataType) - - resultTensor = graph.addition( - sourceTensor, - weightsTensor, - name: nil) - } -} - -/// A structure that performs bias operations in NC coordinates. -struct AddNCBiasLayer { - /// The resulting tensor from the layer. - let resultTensor: MPSGraphTensor - - /// Initializes an AddNCBiasLayer object. - /// - Parameters: - /// - graph: The graph. - /// - sourceTensor: The input tensor to the layer. - /// - biasTensor: The bias tensor. - /// - nnXLen: The x length. - /// - nnYLen: The y length. - /// - numChannels: The number of channels. - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - biasTensor: MPSGraphTensor, - nnXLen: NSNumber, - nnYLen: NSNumber, - numChannels: NSNumber - ) { - let shape = InputShape.create( - batchSize: -1, - numChannels: numChannels, - nnYLen: 1, - nnXLen: 1) - - assert(biasTensor.shape?[1] == shape[1]) - - let reshaped = graph.reshape(biasTensor, shape: shape, name: nil) - resultTensor = graph.addition(sourceTensor, reshaped, name: nil) - - assert(resultTensor.shape?.count == 4) - assert(resultTensor.shape?[2] == nnYLen) - assert(resultTensor.shape?[3] == nnXLen) - } -} - -/// A class that represents a residual block with global pooling. -public class SWGlobalPoolingResidualBlockDesc: BlockDescriptor { - /// The batch normalization layer before the residual block. - let preBN: SWBatchNormLayerDesc - - /// The pre-activation function of the residual block. - let preActivation: ActivationKind - - /// The regular convolutional layer in the residual block. - let regularConv: SWConvLayerDesc - - /// The convolutional layer for global pooling. - let gpoolConv: SWConvLayerDesc - - /// The batch normalization layer after the global pooling convolutional layer. - let gpoolBN: SWBatchNormLayerDesc - - /// The activation function after the global pooling batch normalization layer. - let gpoolActivation: ActivationKind - - /// The matrix multiplication layer that multiplies the global pooled output with a bias. - let gpoolToBiasMul: SWMatMulLayerDesc - - /// The batch normalization layer after the matrix multiplication layer. - let midBN: SWBatchNormLayerDesc - - /// The activation function after the mid batch normalization layer. - let midActivation: ActivationKind - - /// The final convolutional layer in the residual block. - let finalConv: SWConvLayerDesc - - /// Initialize a SWGlobalPoolingResidualBlockDesc object. - /// - Parameters: - /// - preBN: The batch normalization layer before the residual block. - /// - preActivation: The pre-activation function of the residual block. - /// - regularConv: The regular convolutional layer in the residual block. - /// - gpoolConv: The convolutional layer for global pooling. - /// - gpoolBN: The batch normalization layer after the global pooling convolutional layer. - /// - gpoolActivation: The activation function after the global pooling batch normalization layer. - /// - gpoolToBiasMul: The matrix multiplication layer that multiplies the global pooled output with a bias. - /// - midBN: The batch normalization layer after the matrix multiplication layer. - /// - midActivation: The activation function after the mid batch normalization layer. - /// - finalConv: The final convolutional layer in the residual block. - init( - preBN: SWBatchNormLayerDesc, - preActivation: ActivationKind, - regularConv: SWConvLayerDesc, - gpoolConv: SWConvLayerDesc, - gpoolBN: SWBatchNormLayerDesc, - gpoolActivation: ActivationKind, - gpoolToBiasMul: SWMatMulLayerDesc, - midBN: SWBatchNormLayerDesc, - midActivation: ActivationKind, - finalConv: SWConvLayerDesc - ) { - self.preBN = preBN - self.preActivation = preActivation - self.regularConv = regularConv - self.gpoolConv = gpoolConv - self.gpoolBN = gpoolBN - self.gpoolActivation = gpoolActivation - self.gpoolToBiasMul = gpoolToBiasMul - self.midBN = midBN - self.midActivation = midActivation - self.finalConv = finalConv - } -} - -public func createSWGlobalPoolingResidualBlockDesc( - preBN: SWBatchNormLayerDesc, - preActivation: ActivationKind, - regularConv: SWConvLayerDesc, - gpoolConv: SWConvLayerDesc, - gpoolBN: SWBatchNormLayerDesc, - gpoolActivation: ActivationKind, - gpoolToBiasMul: SWMatMulLayerDesc, - midBN: SWBatchNormLayerDesc, - midActivation: ActivationKind, - finalConv: SWConvLayerDesc -) -> SWGlobalPoolingResidualBlockDesc { - - return SWGlobalPoolingResidualBlockDesc( - preBN: preBN, - preActivation: preActivation, - regularConv: regularConv, - gpoolConv: gpoolConv, - gpoolBN: gpoolBN, - gpoolActivation: gpoolActivation, - gpoolToBiasMul: gpoolToBiasMul, - midBN: midBN, - midActivation: midActivation, - finalConv: finalConv) -} - -/// A class representing a residual block with global pooling -class GlobalPoolingResidualBlock { - let resultTensor: MPSGraphTensor - - /// A method to test the global pooling residual block - /// - /// - Parameters: - /// - descriptor: The descriptor of the global pooling residual block - /// - batchSize: The batch size - /// - nnXLen: The X length - /// - nnYLen: The Y length - /// - input: The input pointer - /// - mask: The mask pointer - /// - output: The output pointer - class func test( - descriptor: SWGlobalPoolingResidualBlockDesc, - batchSize: NSNumber, - nnXLen: NSNumber, - nnYLen: NSNumber, - input: UnsafeMutablePointer, - mask: UnsafeMutablePointer, - output: UnsafeMutablePointer - ) { - - NetworkTester.test( - batchSize: batchSize, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: descriptor.preBN.numChannels, - input: input, - mask: mask, - output: output - ) { graph, inputLayer, maskLayer in - - let maskSum = MaskSumLayer( - graph: graph, - maskTensor: maskLayer.tensor) - - let maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( - graph: graph, - maskSum: maskSum) - - let block = - GlobalPoolingResidualBlock( - graph: graph, - sourceTensor: inputLayer.tensor, - maskTensor: maskLayer.tensor, - maskSumTensor: maskSum.tensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, - descriptor: descriptor, - nnXLen: nnXLen, - nnYLen: nnYLen) - - return block.resultTensor - } - } - - /// Initialize a GlobalPoolingResidualBlock object - /// - /// - Parameters: - /// - graph: The graph - /// - sourceTensor: The source tensor - /// - maskTensor: The mask tensor - /// - maskSumTensor: The mask sum tensor - /// - maskSumSqrtS14M01Tensor: The mask sum square tensor - /// - descriptor: The descriptor of the global pooling residual block - /// - nnXLen: The X length - /// - nnYLen: The Y length - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - maskTensor: MPSGraphTensor, - maskSumTensor: MPSGraphTensor, - maskSumSqrtS14M01Tensor: MPSGraphTensor, - descriptor: SWGlobalPoolingResidualBlockDesc, - nnXLen: NSNumber, - nnYLen: NSNumber - ) { - let maskSum = MaskSumLayer(tensor: maskSumTensor) - let maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer(tensor: maskSumSqrtS14M01Tensor) - - let preBN = BatchNormLayer( - graph: graph, - sourceTensor: sourceTensor, - maskTensor: maskTensor, - descriptor: descriptor.preBN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let preActivation = ActivationLayer( - graph: graph, - sourceTensor: preBN.resultTensor, - activationKind: descriptor.preActivation) - - let regularConv = ConvLayer( - graph: graph, - sourceTensor: preActivation.resultTensor, - descriptor: descriptor.regularConv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let gpoolConv = ConvLayer( - graph: graph, - sourceTensor: preActivation.resultTensor, - descriptor: descriptor.gpoolConv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let gpoolBN = BatchNormLayer( - graph: graph, - sourceTensor: gpoolConv.resultTensor, - maskTensor: maskTensor, - descriptor: descriptor.gpoolBN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let gpoolActivation = ActivationLayer( - graph: graph, - sourceTensor: gpoolBN.resultTensor, - activationKind: descriptor.gpoolActivation) - - let gpoolConcat = GlobalPoolingLayer( - graph: graph, - sourceTensor: gpoolActivation.resultTensor, - maskTensor: maskTensor, - maskSumTensor: maskSum.tensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor) - - assert(gpoolConcat.resultTensor.shape?[1] == descriptor.gpoolToBiasMul.inChannels) - - let gpoolToBiasMul = MatMulLayer( - graph: graph, - descriptor: descriptor.gpoolToBiasMul, - sourceTensor: gpoolConcat.resultTensor) - - let added = AddNCBiasLayer( - graph: graph, - sourceTensor: regularConv.resultTensor, - biasTensor: gpoolToBiasMul.resultTensor, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: descriptor.gpoolToBiasMul.outChannels) - - let midBN = BatchNormLayer( - graph: graph, - sourceTensor: added.resultTensor, - maskTensor: maskTensor, - descriptor: descriptor.midBN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let midActivation = ActivationLayer( - graph: graph, - sourceTensor: midBN.resultTensor, - activationKind: descriptor.midActivation) - - let finalConv = ConvLayer( - graph: graph, - sourceTensor: midActivation.resultTensor, - descriptor: descriptor.finalConv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - resultTensor = graph.addition( - sourceTensor, - finalConv.resultTensor, - name: nil) - - assert(resultTensor.shape?.count == 4) - } -} - -public func testGlobalPoolingResidualBlock( - descriptor: SWGlobalPoolingResidualBlockDesc, - batchSize: Int32, - nnXLen: Int32, - nnYLen: Int32, - input: UnsafeMutablePointer, - mask: UnsafeMutablePointer, - output: UnsafeMutablePointer -) { - GlobalPoolingResidualBlock.test( - descriptor: descriptor, - batchSize: batchSize as NSNumber, - nnXLen: nnXLen as NSNumber, - nnYLen: nnYLen as NSNumber, - input: input, - mask: mask, - output: output) -} - -/// A class that represents a nested bottleneck residual block -public class SWNestedBottleneckResidualBlockDesc: BlockDescriptor { - /// The batch normalization layer before the residual block. - let preBN: SWBatchNormLayerDesc - - /// The pre-activation function of the residual block. - let preActivation: ActivationKind - - /// The convolutional layer before the residual block. - let preConv: SWConvLayerDesc - - /// The list of blocks that make up the trunk - let blockDescriptors: [BlockDescriptor] - - /// The batch normalization layer after the residual block. - let postBN: SWBatchNormLayerDesc - - /// The activation function after the post batch normalization layer. - let postActivation: ActivationKind - - /// The convolutional layer after the post activation layer. - let postConv: SWConvLayerDesc - - /// Initialize a SWNestedBottleneckResidualBlockDesc object. - /// - Parameters: - /// - preBN: The batch normalization layer before the residual block. - /// - preActivation: The pre-activation function of the residual block. - /// - preConv: The convolutional layer before the residual block. - /// - postBN: The batch normalization layer after the residual block. - /// - postActivation: The activation function after the post batch normalization layer. - /// - postConv: The convolutional layer after the post activation layer. - init( - preBN: SWBatchNormLayerDesc, - preActivation: ActivationKind, - preConv: SWConvLayerDesc, - blockDescriptors: [BlockDescriptor], - postBN: SWBatchNormLayerDesc, - postActivation: ActivationKind, - postConv: SWConvLayerDesc - ) { - self.preBN = preBN - self.preActivation = preActivation - self.preConv = preConv - self.blockDescriptors = blockDescriptors - self.postBN = postBN - self.postActivation = postActivation - self.postConv = postConv - } -} - -public func createSWNestedBottleneckResidualBlockDesc( - preBN: SWBatchNormLayerDesc, - preActivation: ActivationKind, - preConv: SWConvLayerDesc, - blockDescriptors: [BlockDescriptor], - postBN: SWBatchNormLayerDesc, - postActivation: ActivationKind, - postConv: SWConvLayerDesc -) -> SWNestedBottleneckResidualBlockDesc { - return SWNestedBottleneckResidualBlockDesc( - preBN: preBN, - preActivation: preActivation, - preConv: preConv, - blockDescriptors: blockDescriptors, - postBN: postBN, - postActivation: postActivation, - postConv: postConv) -} - -public class BlockDescriptor { -} - -public class BlockDescriptorBuilder { - public var blockDescriptors: [BlockDescriptor] = [] - - public func enque(with descriptor: BlockDescriptor) { - blockDescriptors.append(descriptor) - } -} - -public func createBlockDescriptorBuilder() -> BlockDescriptorBuilder { - return BlockDescriptorBuilder() -} - -/// A structure that represents a block stack -struct BlockStack { - /// The resulting tensor after processing the block stack - let resultTensor: MPSGraphTensor - - /// Process block descriptors - /// - Parameters: - /// - graph: The MPSGraph - /// - sourceTensor: The input tensor - /// - maskTensor: The mask tensor - /// - maskSumTensor: The sum of the mask tensor - /// - maskSumSqrtS14M01Tensor: The square root of the sum of the mask tensor - /// - blockDescriptors: The block descriptors - /// - index: The index of the block descriptor - /// - nnXLen: X length - /// - nnYLen: Y length - /// - Returns: The result tensor - static func processBlockDescriptors( - _ graph: MPSGraph, - _ sourceTensor: MPSGraphTensor, - _ maskTensor: MPSGraphTensor, - _ maskSumTensor: MPSGraphTensor, - _ maskSumSqrtS14M01Tensor: MPSGraphTensor, - _ blockDescriptors: [BlockDescriptor], - _ index: Int, - _ nnXLen: NSNumber, - _ nnYLen: NSNumber - ) -> MPSGraphTensor { - guard index < blockDescriptors.count else { - return sourceTensor - } - - let blockDescriptor = blockDescriptors[index] - let blockInput: MPSGraphTensor - - switch blockDescriptor { - case let globalPoolingDescriptor as SWGlobalPoolingResidualBlockDesc: - let globalPooling = GlobalPoolingResidualBlock( - graph: graph, - sourceTensor: sourceTensor, - maskTensor: maskTensor, - maskSumTensor: maskSumTensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, - descriptor: globalPoolingDescriptor, - nnXLen: nnXLen, - nnYLen: nnYLen) - - blockInput = globalPooling.resultTensor - case let nestedBottleneckDescriptor as SWNestedBottleneckResidualBlockDesc: - let nestedBottleneck = NestedBottleneckResidualBlock( - graph: graph, - sourceTensor: sourceTensor, - maskTensor: maskTensor, - maskSumTensor: maskSumTensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, - descriptor: nestedBottleneckDescriptor, - nnXLen: nnXLen, - nnYLen: nnYLen) - - blockInput = nestedBottleneck.resultTensor - case let residualBlockDescriptor as SWResidualBlockDesc: - let ordinary = ResidualBlock( - graph: graph, - sourceTensor: sourceTensor, - maskTensor: maskTensor, - descriptor: residualBlockDescriptor, - nnXLen: nnXLen, - nnYLen: nnYLen) - - blockInput = ordinary.resultTensor - default: - blockInput = sourceTensor - } - - return processBlockDescriptors( - graph, - blockInput, - maskTensor, - maskSumTensor, - maskSumSqrtS14M01Tensor, - blockDescriptors, - index + 1, - nnXLen, - nnYLen) - } - - /// Initialize a BlockStack object - /// - Parameters: - /// - graph: The MPSGraph - /// - sourceTensor: The input tensor - /// - maskTensor: The mask tensor - /// - maskSumTensor: The sum of the mask tensor - /// - maskSumSqrtS14M01Tensor: The square root of the sum of the mask tensor - /// - blockDescriptors: The block descriptors - /// - nnXLen: X length - /// - nnYLen: Y length - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - maskTensor: MPSGraphTensor, - maskSumTensor: MPSGraphTensor, - maskSumSqrtS14M01Tensor: MPSGraphTensor, - blockDescriptors: [BlockDescriptor], - nnXLen: NSNumber, - nnYLen: NSNumber - ) { - resultTensor = BlockStack.processBlockDescriptors( - graph, - sourceTensor, - maskTensor, - maskSumTensor, - maskSumSqrtS14M01Tensor, - blockDescriptors, - 0, - nnXLen, - nnYLen) - } -} - -/// A structure that represents a nested bottleneck residual block -struct NestedBottleneckResidualBlock { - /// The resulting tensor after processing the nested bottleneck residual block - let resultTensor: MPSGraphTensor - - /// Initialize a ResidualBlock object - /// - /// - Parameters: - /// - graph: The MPSGraph - /// - sourceTensor: The input tensor - /// - maskTensor: The mask tensor - /// - maskSumTensor: The sum of the mask tensor - /// - maskSumSqrtS14M01Tensor: The square root of the sum of the mask tensor - /// - descriptor: The nested bottleneck residual block descriptor - /// - nnXLen: X length - /// - nnYLen: Y length - init( - graph: MPSGraph, - sourceTensor: MPSGraphTensor, - maskTensor: MPSGraphTensor, - maskSumTensor: MPSGraphTensor, - maskSumSqrtS14M01Tensor: MPSGraphTensor, - descriptor: SWNestedBottleneckResidualBlockDesc, - nnXLen: NSNumber, - nnYLen: NSNumber - ) { - - let preBN = BatchNormLayer( - graph: graph, - sourceTensor: sourceTensor, - maskTensor: maskTensor, - descriptor: descriptor.preBN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let preActivation = ActivationLayer( - graph: graph, - sourceTensor: preBN.resultTensor, - activationKind: descriptor.preActivation) - - let preConv = ConvLayer( - graph: graph, - sourceTensor: preActivation.resultTensor, - descriptor: descriptor.preConv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let blocks = BlockStack( - graph: graph, - sourceTensor: preConv.resultTensor, - maskTensor: maskTensor, - maskSumTensor: maskSumTensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, - blockDescriptors: descriptor.blockDescriptors, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let postBN = BatchNormLayer( - graph: graph, - sourceTensor: blocks.resultTensor, - maskTensor: maskTensor, - descriptor: descriptor.postBN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let postActivation = ActivationLayer( - graph: graph, - sourceTensor: postBN.resultTensor, - activationKind: descriptor.postActivation) - - let postConv = ConvLayer( - graph: graph, - sourceTensor: postActivation.resultTensor, - descriptor: descriptor.postConv, - nnXLen: nnXLen, - nnYLen: nnYLen) +// NOTE: Model caching and conversion are now handled in C++ using the native katagocoreml library. +// The Python-based CoreMLConverter and ModelCacheManager have been removed to eliminate Python dependency. - resultTensor = graph.addition( - sourceTensor, - postConv.resultTensor, - name: nil) +/// Context storing board dimensions and settings +public class MetalComputeContext { + public let nnXLen: Int32 + public let nnYLen: Int32 + public let useFP16: Bool - assert(resultTensor.shape?.count == 4) + init(nnXLen: Int32, nnYLen: Int32, useFP16: Bool) { + self.nnXLen = nnXLen + self.nnYLen = nnYLen + self.useFP16 = useFP16 } } -/// Class representing the description of the SGF Metadata Encoder. -/// -/// This encoder consists of three matrix multiplication layers, each followed by a bias and an activation function. -public class SWSGFMetadataEncoderDesc { - /// Version of the SGF Metadata Encoder. - let version: Int +/// Create a Metal compute context +public func createMetalComputeContext( + nnXLen: Int32, + nnYLen: Int32, + useFP16: Bool +) -> MetalComputeContext { + return MetalComputeContext(nnXLen: nnXLen, nnYLen: nnYLen, useFP16: useFP16) +} - /// Number of input metadata channels. +/// Handle that wraps the loaded MLModel for inference +public class CoreMLComputeHandle { + let model: MLModel + let nnXLen: Int32 + let nnYLen: Int32 + let optimizeIdentityMask: Bool + let numInputChannels: Int + let numInputGlobalChannels: Int let numInputMetaChannels: Int + let numPolicyChannels: Int + let numValueChannels: Int + let numScoreValueChannels: Int + let numOwnershipChannels: Int + + /// Model input/output names matching KataGoCoremltools output + struct IONames { + static let spatialInput = "spatial_input" + static let globalInput = "global_input" + static let inputMask = "input_mask" + static let metaInput = "meta_input" + + static let policyOutput = "policy_p2_conv" + static let policyPassOutput = "policy_pass" + static let valueOutput = "value_v3_bias" + static let ownershipOutput = "value_ownership_conv" + static let scoreValueOutput = "value_sv3_bias" + } - /// Description of the first multiplication layer. - let mul1: SWMatMulLayerDesc - - /// Description of the bias for the first layer. - let bias1: SWMatBiasLayerDesc - - /// Activation kind for the first layer. - let act1: ActivationKind - - /// Description of the second multiplication layer. - let mul2: SWMatMulLayerDesc - - /// Description of the bias for the second layer. - let bias2: SWMatBiasLayerDesc - - /// Activation kind for the second layer. - let act2: ActivationKind - - /// Description of the third multiplication layer. - let mul3: SWMatMulLayerDesc - - /// Initializes a new instance of the `SWSGFMetadataEncoderDesc` class. - /// - /// - Parameters: - /// - version: The version of the SGF Metadata Encoder. - /// - numInputMetaChannels: The number of input metadata channels. - /// - mul1: Description of the first multiplication layer. - /// - bias1: Description of the bias for the first layer. - /// - act1: Activation kind for the first layer. - /// - mul2: Description of the second multiplication layer. - /// - bias2: Description of the bias for the second layer. - /// - act2: Activation kind for the second layer. - /// - mul3: Description of the third multiplication layer. - init( - version: Int, - numInputMetaChannels: Int, - mul1: SWMatMulLayerDesc, - bias1: SWMatBiasLayerDesc, - act1: ActivationKind, - mul2: SWMatMulLayerDesc, - bias2: SWMatBiasLayerDesc, - act2: ActivationKind, - mul3: SWMatMulLayerDesc - ) { - self.version = version + init(model: MLModel, nnXLen: Int32, nnYLen: Int32, + optimizeIdentityMask: Bool, + numInputChannels: Int, + numInputGlobalChannels: Int, + numInputMetaChannels: Int, + numPolicyChannels: Int, + numValueChannels: Int, + numScoreValueChannels: Int, + numOwnershipChannels: Int) { + self.model = model + self.nnXLen = nnXLen + self.nnYLen = nnYLen + self.optimizeIdentityMask = optimizeIdentityMask + self.numInputChannels = numInputChannels + self.numInputGlobalChannels = numInputGlobalChannels self.numInputMetaChannels = numInputMetaChannels - self.mul1 = mul1 - self.bias1 = bias1 - self.act1 = act1 - self.mul2 = mul2 - self.bias2 = bias2 - self.act2 = act2 - self.mul3 = mul3 + self.numPolicyChannels = numPolicyChannels + self.numValueChannels = numValueChannels + self.numScoreValueChannels = numScoreValueChannels + self.numOwnershipChannels = numOwnershipChannels } -} - -/// Creates an instance of `SWSGFMetadataEncoderDesc` using the specified parameters. -/// -/// - Parameters: -/// - version: An `Int32` representing the version of the encoder descriptor. -/// - numInputMetaChannels: An `Int32` specifying the number of input metadata channels. -/// - mul1: A `SWMatMulLayerDesc` representing the description of the first matrix multiplication layer. -/// - bias1: A `SWMatBiasLayerDesc` representing the description of the bias for the first layer. -/// - act1: An `ActivationKind` specifying the activation function applied after the first layer. -/// - mul2: A `SWMatMulLayerDesc` representing the description of the second matrix multiplication layer. -/// - bias2: A `SWMatBiasLayerDesc` representing the description of the bias for the second layer. -/// - act2: An `ActivationKind` specifying the activation function applied after the second layer. -/// - mul3: A `SWMatMulLayerDesc` representing the description of the third matrix multiplication layer. -/// -/// - Returns: -/// An instance of `SWSGFMetadataEncoderDesc` initialized with the provided parameters. -public func createSWSGFMetadataEncoderDesc( - version: Int32, - numInputMetaChannels: Int32, - mul1: SWMatMulLayerDesc, - bias1: SWMatBiasLayerDesc, - act1: ActivationKind, - mul2: SWMatMulLayerDesc, - bias2: SWMatBiasLayerDesc, - act2: ActivationKind, - mul3: SWMatMulLayerDesc -) -> SWSGFMetadataEncoderDesc? { - return SWSGFMetadataEncoderDesc( - version: Int(version), - numInputMetaChannels: Int(numInputMetaChannels), - mul1: mul1, - bias1: bias1, - act1: act1, - mul2: mul2, - bias2: bias2, - act2: act2, - mul3: mul3) -} -/// A class that describes SGF metadata encoder. -/// SGFMetadataEncoder takes a graph, a descriptor object defining various parameters for the encoding process, -/// and an input tensor, and performs a sequence of matrix multiplications, bias additions, and activation functions -/// to produce a final encoded tensor. -class SGFMetadataEncoder { - /// The resulting tensor after encoding the metadata. - let resultTensor: MPSGraphTensor - - /// Initializes an `SGFMetadataEncoder` instance and performs the encoding process. - /// - /// - Parameters: - /// - graph: The computational graph object used to define and manage tensor operations. - /// - descriptor: An object holding all the required parameters, including matrix multiplication, biases, - /// and activation functions for each layer. - /// - sourceTensor: The initial input tensor containing the metadata to be encoded. - init( - graph: MPSGraph, - descriptor: SWSGFMetadataEncoderDesc, - sourceTensor: MPSGraphTensor + /// Run inference on a batch of inputs + public func apply( + spatialInput: UnsafeMutablePointer, + globalInput: UnsafeMutablePointer, + metaInput: UnsafeMutablePointer, + maskInput: UnsafeMutablePointer, + policy: UnsafeMutablePointer, + policyPass: UnsafeMutablePointer, + value: UnsafeMutablePointer, + scoreValue: UnsafeMutablePointer, + ownership: UnsafeMutablePointer, + batchSize: Int ) { + // Process batch elements in parallel using Grand Central Dispatch + // Each inference is independent, reading/writing to different buffer offsets + DispatchQueue.concurrentPerform(iterations: batchSize) { b in + autoreleasepool { + do { + try runSingleInference( + batchIndex: b, + spatialInput: spatialInput, + globalInput: globalInput, + metaInput: metaInput, + maskInput: maskInput, + policy: policy, + policyPass: policyPass, + value: value, + scoreValue: scoreValue, + ownership: ownership + ) + } catch { + printError("Metal backend: CoreML inference error: \(error)") + } + } + } + } - // First matrix multiplication layer. - let mul1 = MatMulLayer( - graph: graph, - descriptor: descriptor.mul1, - sourceTensor: sourceTensor) - - // Adding bias to the result of the first matrix multiplication. - let bias1 = MatBiasLayer( - graph: graph, - descriptor: descriptor.bias1, - sourceTensor: mul1.resultTensor) - - // Applying the first activation function to the biased tensor. - let act1 = ActivationLayer( - graph: graph, - sourceTensor: bias1.resultTensor, - activationKind: descriptor.act1) - - // Second matrix multiplication layer taking the output of the first activation layer. - let mul2 = MatMulLayer( - graph: graph, - descriptor: descriptor.mul2, - sourceTensor: act1.resultTensor) - - // Adding bias to the result of the second matrix multiplication. - let bias2 = MatBiasLayer( - graph: graph, - descriptor: descriptor.bias2, - sourceTensor: mul2.resultTensor) - - // Applying the second activation function to the biased tensor. - let act2 = ActivationLayer( - graph: graph, - sourceTensor: bias2.resultTensor, - activationKind: descriptor.act2) - - // Third and final matrix multiplication layer taking the output of the second activation layer. - let mul3 = MatMulLayer( - graph: graph, - descriptor: descriptor.mul3, - sourceTensor: act2.resultTensor) + private func runSingleInference( + batchIndex: Int, + spatialInput: UnsafeMutablePointer, + globalInput: UnsafeMutablePointer, + metaInput: UnsafeMutablePointer, + maskInput: UnsafeMutablePointer, + policy: UnsafeMutablePointer, + policyPass: UnsafeMutablePointer, + value: UnsafeMutablePointer, + scoreValue: UnsafeMutablePointer, + ownership: UnsafeMutablePointer + ) throws { + let spatialSize = Int(nnXLen) * Int(nnYLen) * numInputChannels + let spatialOffset = batchIndex * spatialSize + + // Create MLMultiArray for spatial input (1, C, H, W) + let spatialArray = try MLMultiArray( + shape: [1, NSNumber(value: numInputChannels), + NSNumber(value: nnYLen), NSNumber(value: nnXLen)], + dataType: .float32) + + // Copy spatial data using fast memcpy + let spatialPtr = spatialArray.dataPointer.assumingMemoryBound(to: Float32.self) + memcpy(spatialPtr, spatialInput.advanced(by: spatialOffset), spatialSize * MemoryLayout.size) + + // Create global input array (1, C) - rank 2 as expected by converter + let globalArray = try MLMultiArray( + shape: [1, NSNumber(value: numInputGlobalChannels)], + dataType: .float32) + let globalPtr = globalArray.dataPointer.assumingMemoryBound(to: Float32.self) + let globalOffset = batchIndex * numInputGlobalChannels + memcpy(globalPtr, globalInput.advanced(by: globalOffset), numInputGlobalChannels * MemoryLayout.size) + + // Build feature provider dictionary + var inputDict: [String: MLFeatureValue] = [ + IONames.spatialInput: MLFeatureValue(multiArray: spatialArray), + IONames.globalInput: MLFeatureValue(multiArray: globalArray) + ] - // Setting the final result tensor to the output of the last matrix multiplication layer. - resultTensor = mul3.resultTensor + // Add mask input (always required, even with optimize_identity_mask=True) + // When optimize_identity_mask=True, the mask is still required as input but + // internal mask operations are optimized away for ~6.5% speedup + let maskArray = try MLMultiArray( + shape: [1, 1, NSNumber(value: nnYLen), NSNumber(value: nnXLen)], + dataType: .float32) + let maskPtr = maskArray.dataPointer.assumingMemoryBound(to: Float32.self) + let maskSize = Int(nnXLen) * Int(nnYLen) + let maskOffset = batchIndex * maskSize + memcpy(maskPtr, maskInput.advanced(by: maskOffset), maskSize * MemoryLayout.size) + inputDict[IONames.inputMask] = MLFeatureValue(multiArray: maskArray) + + // Add meta input if model has it + if numInputMetaChannels > 0 { + let metaArray = try MLMultiArray( + shape: [1, NSNumber(value: numInputMetaChannels)], + dataType: .float32) + let metaPtr = metaArray.dataPointer.assumingMemoryBound(to: Float32.self) + let metaOffset = batchIndex * numInputMetaChannels + memcpy(metaPtr, metaInput.advanced(by: metaOffset), numInputMetaChannels * MemoryLayout.size) + inputDict[IONames.metaInput] = MLFeatureValue(multiArray: metaArray) + } - assert(resultTensor.shape?.count == 2) + // Run prediction + let featureProvider = try MLDictionaryFeatureProvider(dictionary: inputDict) + let prediction = try model.prediction(from: featureProvider) + + // Extract outputs and copy to output buffers + extractOutputs( + prediction: prediction, + batchIndex: batchIndex, + policy: policy, + policyPass: policyPass, + value: value, + scoreValue: scoreValue, + ownership: ownership + ) } -} -/// A class that describes a trunk for a neural network -public class SWTrunkDesc { - /// The version of the ResNet trunk - let version: Int - /// Number of channels for the trunk - let trunkNumChannels: NSNumber - /// Number of channels for the mid section - let midNumChannels: NSNumber - /// Number of channels for the regular section - let regularNumChannels: NSNumber - /// Number of channels for the global pooling section - let gpoolNumChannels: NSNumber - /// The description of the initial convolutional layer - let initialConv: SWConvLayerDesc - /// The description of the initial matrix multiplication layer - let initialMatMul: SWMatMulLayerDesc - /// The description of the SGF metadata encoder - let sgfMetadataEncoder: SWSGFMetadataEncoderDesc? - /// The list of blocks that make up the trunk - let blockDescriptors: [BlockDescriptor] - /// The description of the batch normalization layer that is applied at the end of the trunk - let trunkTipBN: SWBatchNormLayerDesc - /// The activation function that is applied at the end of the trunk - let trunkTipActivation: ActivationKind - - /// Initializes a SWTrunkDesc object - /// - Parameters: - /// - version: The version of the ResNet trunk - /// - trunkNumChannels: Number of channels for the trunk - /// - midNumChannels: Number of channels for the mid section - /// - regularNumChannels: Number of channels for the regular section - /// - gpoolNumChannels: Number of channels for the global pooling section - /// - initialConv: The description of the initial convolutional layer - /// - initialMatMul: The description of the initial matrix multiplication layer - /// - sgfMetadataEncoder: The description of the SGF metadata encoder - /// - blockDescriptors: The list of blocks that make up the trunk - /// - trunkTipBN: The description of the batch normalization layer that is applied at the end of the trunk - /// - trunkTipActivation: The activation function that is applied at the end of the trunk - init( - version: Int, - trunkNumChannels: NSNumber, - midNumChannels: NSNumber, - regularNumChannels: NSNumber, - gpoolNumChannels: NSNumber, - initialConv: SWConvLayerDesc, - initialMatMul: SWMatMulLayerDesc, - sgfMetadataEncoder: SWSGFMetadataEncoderDesc?, - blockDescriptors: [BlockDescriptor], - trunkTipBN: SWBatchNormLayerDesc, - trunkTipActivation: ActivationKind + /// Copy MLMultiArray data to destination buffer, respecting strides. + /// Core ML may return non-contiguous arrays, especially for spatial outputs after GPU computation. + private func copyMultiArray( + _ array: MLMultiArray, + to dest: UnsafeMutablePointer, + destOffset: Int ) { - self.version = version - self.trunkNumChannels = trunkNumChannels - self.midNumChannels = midNumChannels - self.regularNumChannels = regularNumChannels - self.gpoolNumChannels = gpoolNumChannels - self.initialConv = initialConv - self.initialMatMul = initialMatMul - self.sgfMetadataEncoder = sgfMetadataEncoder - self.blockDescriptors = blockDescriptors - self.trunkTipBN = trunkTipBN - self.trunkTipActivation = trunkTipActivation - } -} - -public func createSWTrunkDesc( - version: Int32, - trunkNumChannels: Int32, - midNumChannels: Int32, - regularNumChannels: Int32, - gpoolNumChannels: Int32, - initialConv: SWConvLayerDesc, - initialMatMul: SWMatMulLayerDesc, - sgfMetadataEncoder: SWSGFMetadataEncoderDesc?, - blockDescriptors: [BlockDescriptor], - trunkTipBN: SWBatchNormLayerDesc, - trunkTipActivation: ActivationKind -) -> SWTrunkDesc { - return SWTrunkDesc( - version: Int(version), - trunkNumChannels: trunkNumChannels as NSNumber, - midNumChannels: midNumChannels as NSNumber, - regularNumChannels: regularNumChannels as NSNumber, - gpoolNumChannels: gpoolNumChannels as NSNumber, - initialConv: initialConv, - initialMatMul: initialMatMul, - sgfMetadataEncoder: sgfMetadataEncoder, - blockDescriptors: blockDescriptors, - trunkTipBN: trunkTipBN, - trunkTipActivation: trunkTipActivation) -} - -/// A structure representing a ResNet trunk for a neural network -struct Trunk { - /// The resulting tensor after processing the trunk - let resultTensor: MPSGraphTensor - - /// Returns the block source tensor by processing the input meta tensor, if available, and adding a bias term. - /// - /// - Parameters: - /// - graph: The Metal Performance Shaders (MPS) graph. - /// - descriptor: The SGF metadata encoder descriptor. - /// - initialAdd: The initial add operation result tensor. - /// - inputMetaTensor: The input meta tensor. - /// - nnXLen: The X length of the neural network (NN). - /// - nnYLen: The Y length of the neural network (NN). - /// - numChannels: The number of channels of the initial add operation result tensor. - /// - /// - Returns: - /// - blockSourceTensor: The processed block source tensor. - /// - /// This function is used to get the block source tensor by processing the input meta tensor, if available. - /// If the input meta tensor is not available, it returns the result tensor from the initial add operation. - /// The function uses SGF metadata encoder and AddNCBiasLayer to process the input meta tensor. - static func getBlockSourceTensor( - graph: MPSGraph, - descriptor: SWSGFMetadataEncoderDesc?, - initialAdd: AddNCBiasLayer, - inputMetaTensor: MPSGraphTensor?, - nnXLen: NSNumber, - nnYLen: NSNumber, - numChannels: NSNumber - ) -> MPSGraphTensor { - var blockSourceTensor: MPSGraphTensor - - if let inputMetaTensor, - let descriptor, descriptor.numInputMetaChannels > 0 - { - let encoded = SGFMetadataEncoder( - graph: graph, - descriptor: descriptor, - sourceTensor: inputMetaTensor) + let shape = array.shape.map { $0.intValue } + let strides = array.strides.map { $0.intValue } + let ptr = array.dataPointer.assumingMemoryBound(to: Float32.self) + let totalElements = shape.reduce(1, *) + + // Check if contiguous (strides match expected for row-major C-order) + var isContiguous = true + var expectedStride = 1 + for i in (0...size) + } else { + // Slow path: copy with strides (handles non-contiguous layouts) + copyWithStrides( + from: ptr, + to: dest, + destOffset: destOffset, + shape: shape, + strides: strides, + dim: 0, + srcOffset: 0, + destIdx: 0 + ) + } + } - blockSourceTensor = encodedAdd.resultTensor + /// Recursively copy array elements respecting strides (NCHW order) + @discardableResult + private func copyWithStrides( + from src: UnsafePointer, + to dest: UnsafeMutablePointer, + destOffset: Int, + shape: [Int], + strides: [Int], + dim: Int, + srcOffset: Int, + destIdx: Int + ) -> Int { + var currentDestIdx = destIdx + + if dim == shape.count - 1 { + // Innermost dimension: copy elements + for i in 0.., + policyPass: UnsafeMutablePointer, + value: UnsafeMutablePointer, + scoreValue: UnsafeMutablePointer, + ownership: UnsafeMutablePointer ) { + // Extract policy output (1, policyChannels, H, W) + // Must use stride-aware copy as Core ML may return non-contiguous arrays + if let policyArray = prediction.featureValue(for: IONames.policyOutput)?.multiArrayValue { + let policyOffset = batchIndex * Int(nnXLen) * Int(nnYLen) * numPolicyChannels + copyMultiArray(policyArray, to: policy, destOffset: policyOffset) + } - let initialConv = ConvLayer( - graph: graph, - sourceTensor: inputTensor, - descriptor: descriptor.initialConv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let initialMatMul = MatMulLayer( - graph: graph, - descriptor: descriptor.initialMatMul, - sourceTensor: inputGlobalTensor) - - let initialAdd = AddNCBiasLayer( - graph: graph, - sourceTensor: initialConv.resultTensor, - biasTensor: initialMatMul.resultTensor, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: descriptor.initialMatMul.outChannels) - - let blockSourceTensor = Trunk.getBlockSourceTensor( - graph: graph, - descriptor: descriptor.sgfMetadataEncoder, - initialAdd: initialAdd, - inputMetaTensor: inputMetaTensor, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: descriptor.initialMatMul.outChannels) - - let blocks = BlockStack( - graph: graph, - sourceTensor: blockSourceTensor, - maskTensor: maskTensor, - maskSumTensor: maskSumTensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, - blockDescriptors: descriptor.blockDescriptors, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let trunkTipBN = BatchNormLayer( - graph: graph, - sourceTensor: blocks.resultTensor, - maskTensor: maskTensor, - descriptor: descriptor.trunkTipBN, - nnXLen: nnXLen, - nnYLen: nnYLen) + // Extract policy pass output (1, numPolicyChannels) + if let passArray = prediction.featureValue(for: IONames.policyPassOutput)?.multiArrayValue { + let passOffset = batchIndex * numPolicyChannels + copyMultiArray(passArray, to: policyPass, destOffset: passOffset) + } - let trunkTipActivation = ActivationLayer( - graph: graph, - sourceTensor: trunkTipBN.resultTensor, - activationKind: descriptor.trunkTipActivation) + // Extract value output (1, 3) + if let valueArray = prediction.featureValue(for: IONames.valueOutput)?.multiArrayValue { + let valueOffset = batchIndex * numValueChannels + copyMultiArray(valueArray, to: value, destOffset: valueOffset) + } - resultTensor = trunkTipActivation.resultTensor + // Extract score value output (1, numScoreValueChannels) + if let svArray = prediction.featureValue(for: IONames.scoreValueOutput)?.multiArrayValue { + let svOffset = batchIndex * numScoreValueChannels + copyMultiArray(svArray, to: scoreValue, destOffset: svOffset) + } - assert(resultTensor.shape?.count == 4) + // Extract ownership output (1, 1, H, W) + // Must use stride-aware copy as Core ML may return non-contiguous arrays + if let ownArray = prediction.featureValue(for: IONames.ownershipOutput)?.multiArrayValue { + let ownOffset = batchIndex * Int(nnXLen) * Int(nnYLen) * numOwnershipChannels + copyMultiArray(ownArray, to: ownership, destOffset: ownOffset) + } } } -/// A class that describes a policy head for a neural network, responsible for predicting -/// the best moves for the current player and the opposing player on the subsequent turn. -public struct SWPolicyHeadDesc { - /// The version of the policy head - let version: Int - /// The 1x1 convolution layer for P - let p1Conv: SWConvLayerDesc - /// The 1x1 convolution layer for G - let g1Conv: SWConvLayerDesc - /// The batch normalization layer for G - let g1BN: SWBatchNormLayerDesc - /// The activation function for G - let g1Activation: ActivationKind - /// The global pooling bias structure that pools the output of G to bias the output of P - let gpoolToBiasMul: SWMatMulLayerDesc - /// The batch normalization layer for P - let p1BN: SWBatchNormLayerDesc - /// The activation function for P - let p1Activation: ActivationKind - /// The 1x1 convolution layer with 2 channels for outputting two policy distributions - let p2Conv: SWConvLayerDesc - /// The fully connected linear layer for outputting logits for the pass move - let gpoolToPassMul: SWMatMulLayerDesc - /// The description of the bias layer that is applied to the output of the matrix multiplication layer for model version >= 15 - let gpoolToPassBias: SWMatBiasLayerDesc? - /// The activation function for the bias layer in model version >= 15 - let passActivation: ActivationKind? - /// The fully connected linear layer for outputting logits for the pass move in model version >= 15 - let gpoolToPassMul2: SWMatMulLayerDesc? - - /// Initializes a SWPolicyHeadDesc object with the given parameters - /// - Parameters: - /// - version: The version of the policy head - /// - p1Conv: The 1x1 convolution layer for P - /// - g1Conv: The 1x1 convolution layer for G - /// - g1BN: The batch normalization layer for G - /// - g1Activation: The activation function for G - /// - gpoolToBiasMul: The global pooling bias structure that pools the output of G to bias the output of P - /// - p1BN: The batch normalization layer for P - /// - p1Activation: The activation function for P - /// - p2Conv: The 1x1 convolution layer with 2 channels for outputting two policy distributions - /// - gpoolToPassMul: The fully connected linear layer for outputting logits for the pass move - init( - version: Int, - p1Conv: SWConvLayerDesc, - g1Conv: SWConvLayerDesc, - g1BN: SWBatchNormLayerDesc, - g1Activation: ActivationKind, - gpoolToBiasMul: SWMatMulLayerDesc, - p1BN: SWBatchNormLayerDesc, - p1Activation: ActivationKind, - p2Conv: SWConvLayerDesc, - gpoolToPassMul: SWMatMulLayerDesc, - gpoolToPassBias: SWMatBiasLayerDesc?, - passActivation: ActivationKind?, - gpoolToPassMul2: SWMatMulLayerDesc? - ) { - self.version = version - self.p1Conv = p1Conv - self.g1Conv = g1Conv - self.g1BN = g1BN - self.g1Activation = g1Activation - self.gpoolToBiasMul = gpoolToBiasMul - self.p1BN = p1BN - self.p1Activation = p1Activation - self.p2Conv = p2Conv - self.gpoolToPassMul = gpoolToPassMul - self.gpoolToPassBias = gpoolToPassBias - self.passActivation = passActivation - self.gpoolToPassMul2 = gpoolToPassMul2 - - assert( - (version >= 15) - || ((gpoolToPassBias == nil) && (passActivation == nil) && (gpoolToPassMul2 == nil)) - ) - assert( - (version < 15) - || ((gpoolToPassBias != nil) && (passActivation != nil) && (gpoolToPassMul2 != nil)) - ) +/// Delete the source .mlpackage after compilation +/// CoreML caches the compiled model, so the source is no longer needed +private func deleteSourceModel(at url: URL, serverThreadIdx: Int) { + do { + try FileManager.default.removeItem(at: url) + printError("Metal backend \(serverThreadIdx): Deleted temp model") + } catch { + printError("Metal backend \(serverThreadIdx): Warning: Failed to delete temp model: \(error)") } } -public func createSWPolicyHeadDesc( - version: Int32, - p1Conv: SWConvLayerDesc, - g1Conv: SWConvLayerDesc, - g1BN: SWBatchNormLayerDesc, - g1Activation: ActivationKind, - gpoolToBiasMul: SWMatMulLayerDesc, - p1BN: SWBatchNormLayerDesc, - p1Activation: ActivationKind, - p2Conv: SWConvLayerDesc, - gpoolToPassMul: SWMatMulLayerDesc, - gpoolToPassBias: SWMatBiasLayerDesc, - passActivation: ActivationKind, - gpoolToPassMul2: SWMatMulLayerDesc -) -> SWPolicyHeadDesc { - if version >= 15 { - return SWPolicyHeadDesc( - version: Int(version), - p1Conv: p1Conv, - g1Conv: g1Conv, - g1BN: g1BN, - g1Activation: g1Activation, - gpoolToBiasMul: gpoolToBiasMul, - p1BN: p1BN, - p1Activation: p1Activation, - p2Conv: p2Conv, - gpoolToPassMul: gpoolToPassMul, - gpoolToPassBias: gpoolToPassBias, - passActivation: passActivation, - gpoolToPassMul2: gpoolToPassMul2) - } else { - return SWPolicyHeadDesc( - version: Int(version), - p1Conv: p1Conv, - g1Conv: g1Conv, - g1BN: g1BN, - g1Activation: g1Activation, - gpoolToBiasMul: gpoolToBiasMul, - p1BN: p1BN, - p1Activation: p1Activation, - p2Conv: p2Conv, - gpoolToPassMul: gpoolToPassMul, - gpoolToPassBias: nil, - passActivation: nil, - gpoolToPassMul2: nil) +/// Create compute handle - loads pre-converted Core ML model +/// Model conversion is now handled in C++ using the native katagocoreml library +public func createCoreMLComputeHandle( + coremlModelPath: String, + serverThreadIdx: Int, + requireExactNNLen: Bool, + numInputChannels: Int32, + numInputGlobalChannels: Int32, + numInputMetaChannels: Int32, + numPolicyChannels: Int32, + numValueChannels: Int32, + numScoreValueChannels: Int32, + numOwnershipChannels: Int32, + context: MetalComputeContext +) -> CoreMLComputeHandle? { + + let optimizeMask = requireExactNNLen // When true: skips internal mask operations (~6.5% speedup) + let mlpackagePath = URL(fileURLWithPath: coremlModelPath) + + // Ensure temp file is deleted regardless of success/failure + defer { deleteSourceModel(at: mlpackagePath, serverThreadIdx: serverThreadIdx) } + + // Load Core ML model (already converted by C++ katagocoreml library) + do { + let config = MLModelConfiguration() + config.computeUnits = .cpuAndNeuralEngine // Exclude GPU for hybrid mode + + printError("Metal backend \(serverThreadIdx): Compiling model...") + let compiledURL = try MLModel.compileModel(at: mlpackagePath) + + printError("Metal backend \(serverThreadIdx): Loading compiled model...") + let model = try MLModel(contentsOf: compiledURL, configuration: config) + + printError("Metal backend \(serverThreadIdx): Model loaded successfully, \(context.nnXLen)x\(context.nnYLen)") + + return CoreMLComputeHandle( + model: model, + nnXLen: context.nnXLen, + nnYLen: context.nnYLen, + optimizeIdentityMask: optimizeMask, + numInputChannels: Int(numInputChannels), + numInputGlobalChannels: Int(numInputGlobalChannels), + numInputMetaChannels: Int(numInputMetaChannels), + numPolicyChannels: Int(numPolicyChannels), + numValueChannels: Int(numValueChannels), + numScoreValueChannels: Int(numScoreValueChannels), + numOwnershipChannels: Int(numOwnershipChannels) + ) + } catch { + printError("Metal backend: Failed to load model: \(error)") + return nil } } -/// A structure that represents a policy head of a neural network. -struct PolicyHead { - /// The tensor that holds the policy prediction of the neural network - let policyTensor: MPSGraphTensor - /// The tensor that holds the policy pass of the neural network - let policyPassTensor: MPSGraphTensor - - /// Initializes a PolicyHead object - /// - Parameters: - /// - graph: The MPSGraph object to which the policy head is added - /// - descriptor: The description of the policy head - /// - sourceTensor: The input tensor to the policy head - /// - maskTensor: The mask tensor for the input tensor - /// - maskSumTensor: The sum of the mask tensor - /// - maskSumSqrtS14M01Tensor: The square root of the sum of the mask tensor and a small epsilon - /// - nnXLen: The number of X pixels in the input tensor - /// - nnYLen: The number of Y pixels in the input tensor - init( - graph: MPSGraph, - descriptor: SWPolicyHeadDesc, - sourceTensor: MPSGraphTensor, - maskTensor: MPSGraphTensor, - maskSumTensor: MPSGraphTensor, - maskSumSqrtS14M01Tensor: MPSGraphTensor, - nnXLen: NSNumber, - nnYLen: NSNumber - ) { - - let p1Conv = ConvLayer( - graph: graph, - sourceTensor: sourceTensor, - descriptor: descriptor.p1Conv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let g1Conv = ConvLayer( - graph: graph, - sourceTensor: sourceTensor, - descriptor: descriptor.g1Conv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let g1BN = BatchNormLayer( - graph: graph, - sourceTensor: g1Conv.resultTensor, - maskTensor: maskTensor, - descriptor: descriptor.g1BN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let g1Activation = ActivationLayer( - graph: graph, - sourceTensor: g1BN.resultTensor, - activationKind: descriptor.g1Activation) - - let g1Concat = GlobalPoolingLayer( - graph: graph, - sourceTensor: g1Activation.resultTensor, - maskTensor: maskTensor, - maskSumTensor: maskSumTensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor) - - assert(g1Concat.resultTensor.shape?[1] == descriptor.gpoolToBiasMul.inChannels) - - let gpoolToBiasMul = MatMulLayer( - graph: graph, - descriptor: descriptor.gpoolToBiasMul, - sourceTensor: g1Concat.resultTensor) - - let added = AddNCBiasLayer( - graph: graph, - sourceTensor: p1Conv.resultTensor, - biasTensor: gpoolToBiasMul.resultTensor, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: descriptor.gpoolToBiasMul.outChannels) - - let p1BN = BatchNormLayer( - graph: graph, - sourceTensor: added.resultTensor, - maskTensor: maskTensor, - descriptor: descriptor.p1BN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let p1Activation = ActivationLayer( - graph: graph, - sourceTensor: p1BN.resultTensor, - activationKind: descriptor.p1Activation) - - let p2Conv = ConvLayer( - graph: graph, - sourceTensor: p1Activation.resultTensor, - descriptor: descriptor.p2Conv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - policyTensor = p2Conv.resultTensor - - assert(g1Concat.resultTensor.shape?[1] == descriptor.gpoolToPassMul.inChannels) +/// Print available Core ML compute units +public func printMetalDevices() { + printError("Metal backend: Hybrid mode - CoreML (CPU+ANE) + MPSGraph (GPU)") +} - let gpoolToPassMul = MatMulLayer( - graph: graph, - descriptor: descriptor.gpoolToPassMul, - sourceTensor: g1Concat.resultTensor) +// MARK: - Throughput Tracker for Adaptive Batch Sizing - if let gpoolToPassBias = descriptor.gpoolToPassBias, - let passActivation = descriptor.passActivation, - let gpoolToPassMul2 = descriptor.gpoolToPassMul2 - { - assert(descriptor.version >= 15) +/// Tracks throughput for CoreML and MPSGraph paths to adaptively adjust batch split ratio. +/// +/// # Thread Safety +/// +/// This class is thread-safe by design without requiring explicit locks: +/// +/// 1. **Single-Owner Access**: Each server thread owns its own `ComputeHandle` → +/// `HybridComputeHandle` → `ThroughputTracker` instance. There is no sharing +/// of `ThroughputTracker` instances between server threads. +/// +/// 2. **Disjoint Field Access**: Within a single `HybridComputeHandle.apply()` call, +/// concurrent dispatch queues access disjoint fields: +/// - `coremlQueue.async` calls `updateCoreML()` → writes `coreMLSamplesPerSec`, `totalCoreMLSamples` +/// - `mpsGraphQueue.async` calls `updateMPSGraph()` → writes `mpsGraphSamplesPerSec`, `totalMPSGraphSamples` +/// +/// Both read `warmupComplete`, `stableAlpha`, and `warmupAlpha`, but these are either +/// `let` constants or only written sequentially after `group.wait()`. +/// +/// 3. **Sequential Barrier**: `group.wait()` in `apply()` ensures all concurrent throughput +/// updates complete before `recordBatch()`, `shouldLogAndMark()`, or `getDiagnosticStats()` +/// are called. These methods run sequentially on the calling thread. +/// +/// Because of these invariants, no locks are needed. Removing `NSLock` was intentional +/// as it was unnecessary overhead given the access patterns above. +public class ThroughputTracker { + private var coreMLSamplesPerSec: Double = 0.9 // Warm-start: initial ratio ~0.47 (closer to optimal ~0.45) + private var mpsGraphSamplesPerSec: Double = 1.0 + + // Diagnostic fields + private var batchCount: Int = 0 + private var totalCoreMLSamples: Int = 0 + private var totalMPSGraphSamples: Int = 0 + private var ratioHistory: [Float] = [] + private let maxHistorySize = 100 // Keep last 100 ratios for analysis + private var lastLogBatchCount: Int = 0 + private let logInterval: Int = 50 // Log every N batches + + // Adaptive alpha parameters + private var warmupComplete: Bool = false + private let warmupAlpha: Double = 0.25 // Faster adaptation during warmup + private let stableAlpha: Double = 0.10 // Slower adaptation after convergence + private let warmupBatches: Int = 100 // Min batches before checking warmup transition + private let warmupVarianceThreshold: Double = 0.005 // Variance threshold for warmup completion + + /// Update CoreML throughput measurement with adaptive alpha + public func updateCoreML(samples: Int, duration: TimeInterval) { + guard duration > 0, samples > 0 else { return } + let newRate = Double(samples) / duration + let effectiveAlpha = warmupComplete ? stableAlpha : warmupAlpha + coreMLSamplesPerSec = effectiveAlpha * newRate + (1 - effectiveAlpha) * coreMLSamplesPerSec + totalCoreMLSamples += samples + } - let gpoolToPassBiasLayer = MatBiasLayer( - graph: graph, - descriptor: gpoolToPassBias, - sourceTensor: gpoolToPassMul.resultTensor) + /// Update MPSGraph throughput measurement with adaptive alpha + public func updateMPSGraph(samples: Int, duration: TimeInterval) { + guard duration > 0, samples > 0 else { return } + let newRate = Double(samples) / duration + let effectiveAlpha = warmupComplete ? stableAlpha : warmupAlpha + mpsGraphSamplesPerSec = effectiveAlpha * newRate + (1 - effectiveAlpha) * mpsGraphSamplesPerSec + totalMPSGraphSamples += samples + } - let passActivationLayer = ActivationLayer( - graph: graph, - sourceTensor: gpoolToPassBiasLayer.resultTensor, - activationKind: passActivation) + /// Get optimal CoreML ratio (0.0 to 1.0) based on measured throughput + public func getOptimalCoreMLRatio() -> Float { + let total = coreMLSamplesPerSec + mpsGraphSamplesPerSec + return total > 0 ? Float(coreMLSamplesPerSec / total) : 0.5 + } - let gpoolToPassMul2Layer = MatMulLayer( - graph: graph, - descriptor: gpoolToPassMul2, - sourceTensor: passActivationLayer.resultTensor) + /// Get current throughput stats for logging + public func getStats() -> (coreML: Double, mpsGraph: Double, ratio: Float) { + return (coreMLSamplesPerSec, mpsGraphSamplesPerSec, getOptimalCoreMLRatio()) + } - policyPassTensor = gpoolToPassMul2Layer.resultTensor - } else { - assert(descriptor.version < 15) - policyPassTensor = gpoolToPassMul.resultTensor + /// Record a batch for diagnostics (call after each apply) + public func recordBatch(ratio: Float) { + batchCount += 1 + if ratioHistory.count >= maxHistorySize { + ratioHistory.removeFirst() + } + ratioHistory.append(ratio) + // Check warmup transition + if !warmupComplete && batchCount >= warmupBatches && computeRatioVariance() < Float(warmupVarianceThreshold) { + warmupComplete = true } - - assert(policyTensor.shape?.count == 4) - assert(policyPassTensor.shape?.count == 2) } -} -/// A struct that describes the value head of a neural network -public struct SWValueHeadDesc { - /// The version of the value head - let version: Int - /// The description of the first convolutional layer in the value head - let v1Conv: SWConvLayerDesc - /// The description of the batch normalization layer after the first convolutional layer in the value head - let v1BN: SWBatchNormLayerDesc - /// The activation function that is applied after the first batch normalization layer in the value head - let v1Activation: ActivationKind - /// The description of the matrix multiplication layer that is applied to the output of the first convolutional layer in the value head - let v2Mul: SWMatMulLayerDesc - /// The description of the bias layer that is applied to the output of the matrix multiplication layer in the value head - let v2Bias: SWMatBiasLayerDesc - /// The activation function that is applied after the bias layer in the value head - let v2Activation: ActivationKind - /// The description of the matrix multiplication layer that is applied to the output of the bias layer in the value head - let v3Mul: SWMatMulLayerDesc - /// The description of the bias layer that is applied to the output of the matrix multiplication layer in the value head - let v3Bias: SWMatBiasLayerDesc - /// The description of the matrix multiplication layer that is applied to the output of the third bias layer in the value head - let sv3Mul: SWMatMulLayerDesc - /// The description of the bias layer that is applied to the output of the matrix multiplication layer in the value head - let sv3Bias: SWMatBiasLayerDesc - /// The description of the convolutional layer that is applied to the board ownership map in the value head - let vOwnershipConv: SWConvLayerDesc - - /// Initializes a SWValueHeadDesc object - /// - Parameters: - /// - version: The version of the value head - /// - v1Conv: The description of the first convolutional layer in the value head - /// - v1BN: The description of the batch normalization layer after the first convolutional layer in the value head - /// - v1Activation: The activation function that is applied after the first batch normalization layer in the value head - /// - v2Mul: The description of the matrix multiplication layer that is applied to the output of the first convolutional layer in the value head - /// - v2Bias: The description of the bias layer that is applied to the output of the matrix multiplication layer in the value head - /// - v2Activation: The activation function that is applied after the bias layer in the value head - /// - v3Mul: The description of the matrix multiplication layer that is applied to the output of the bias layer in the value head - /// - v3Bias: The description of the bias layer that is applied to the output of the matrix multiplication layer in the value head - /// - sv3Mul: The description of the matrix multiplication layer that is applied to the output of the third bias layer in the value head - /// - sv3Bias: The description of the bias layer that is applied to the output of the matrix multiplication layer in the value head - /// - vOwnershipConv: The description of the convolutional layer that is applied to the board ownership map in the value head - init( - version: Int, - v1Conv: SWConvLayerDesc, - v1BN: SWBatchNormLayerDesc, - v1Activation: ActivationKind, - v2Mul: SWMatMulLayerDesc, - v2Bias: SWMatBiasLayerDesc, - v2Activation: ActivationKind, - v3Mul: SWMatMulLayerDesc, - v3Bias: SWMatBiasLayerDesc, - sv3Mul: SWMatMulLayerDesc, - sv3Bias: SWMatBiasLayerDesc, - vOwnershipConv: SWConvLayerDesc - ) { - self.version = version - self.v1Conv = v1Conv - self.v1BN = v1BN - self.v1Activation = v1Activation - self.v2Mul = v2Mul - self.v2Bias = v2Bias - self.v2Activation = v2Activation - self.v3Mul = v3Mul - self.v3Bias = v3Bias - self.sv3Mul = sv3Mul - self.sv3Bias = sv3Bias - self.vOwnershipConv = vOwnershipConv + /// Check if logging should occur this batch, and if so, mark as logged + /// Returns true if logging should occur (atomically checks and marks) + public func shouldLogAndMark() -> Bool { + if batchCount - lastLogBatchCount >= logInterval { + lastLogBatchCount = batchCount + return true + } + return false } -} - -public func createSWValueHeadDesc( - version: Int32, - v1Conv: SWConvLayerDesc, - v1BN: SWBatchNormLayerDesc, - v1Activation: ActivationKind, - v2Mul: SWMatMulLayerDesc, - v2Bias: SWMatBiasLayerDesc, - v2Activation: ActivationKind, - v3Mul: SWMatMulLayerDesc, - v3Bias: SWMatBiasLayerDesc, - sv3Mul: SWMatMulLayerDesc, - sv3Bias: SWMatBiasLayerDesc, - vOwnershipConv: SWConvLayerDesc -) -> SWValueHeadDesc { - return SWValueHeadDesc( - version: Int(version), - v1Conv: v1Conv, - v1BN: v1BN, - v1Activation: v1Activation, - v2Mul: v2Mul, - v2Bias: v2Bias, - v2Activation: v2Activation, - v3Mul: v3Mul, - v3Bias: v3Bias, - sv3Mul: sv3Mul, - sv3Bias: sv3Bias, - vOwnershipConv: vOwnershipConv) -} -/// A structure that creates a value head for the neural network, which produces the value, score value, and ownership tensors. -struct ValueHead { - /// The tensor that represents the value of the board - let valueTensor: MPSGraphTensor - /// The tensor that represents the score value of the board - let scoreValueTensor: MPSGraphTensor - /// The tensor that represents the ownership of the board - let ownershipTensor: MPSGraphTensor - - /// Initializes the value head using a graph, a descriptor, a source tensor, and other relevant tensors. - /// - Parameters: - /// - graph: The graph used to perform calculations on tensors - /// - descriptor: The SWValueHeadDesc object that describes the value head - /// - sourceTensor: The tensor used to source data to the neural network - /// - maskTensor: The tensor used to mask out invalid moves - /// - maskSumTensor: The tensor used to sum up the mask tensor values - /// - maskSumSqrtS14M01Tensor: The tensor used to calculate a square root value - /// - maskSumSqrtS14M01SquareS01Tensor: The tensor used to calculate a square value - /// - nnXLen: The x-axis length of the neural network - /// - nnYLen: The y-axis length of the neural network - init( - graph: MPSGraph, - descriptor: SWValueHeadDesc, - sourceTensor: MPSGraphTensor, - maskTensor: MPSGraphTensor, - maskSumTensor: MPSGraphTensor, - maskSumSqrtS14M01Tensor: MPSGraphTensor, - maskSumSqrtS14M01SquareS01Tensor: MPSGraphTensor, - nnXLen: NSNumber, - nnYLen: NSNumber + /// Get diagnostic stats for logging + public func getDiagnosticStats() -> ( + batchCount: Int, + coreMLSamplesPerSec: Double, + mpsGraphSamplesPerSec: Double, + ratio: Float, + totalCoreMLSamples: Int, + totalMPSGraphSamples: Int, + ratioVariance: Float ) { + return ( + batchCount, + coreMLSamplesPerSec, + mpsGraphSamplesPerSec, + getOptimalCoreMLRatio(), + totalCoreMLSamples, + totalMPSGraphSamples, + computeRatioVariance() + ) + } - let v1Conv = ConvLayer( - graph: graph, - sourceTensor: sourceTensor, - descriptor: descriptor.v1Conv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let v1BN = BatchNormLayer( - graph: graph, - sourceTensor: v1Conv.resultTensor, - maskTensor: maskTensor, - descriptor: descriptor.v1BN, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let v1Activation = ActivationLayer( - graph: graph, - sourceTensor: v1BN.resultTensor, - activationKind: descriptor.v1Activation) - - let v1Mean = - GlobalPoolingValueLayer( - graph: graph, - sourceTensor: v1Activation.resultTensor, - maskSumTensor: maskSumTensor, - maskSumSqrtS14M01Tensor: maskSumSqrtS14M01Tensor, - maskSumSqrtS14M01SquareS01Tensor: maskSumSqrtS14M01SquareS01Tensor) - - assert(v1Mean.resultTensor.shape?[1] == descriptor.v2Mul.inChannels) - - let v2Mul = MatMulLayer( - graph: graph, - descriptor: descriptor.v2Mul, - sourceTensor: v1Mean.resultTensor) - - let v2Bias = MatBiasLayer( - graph: graph, - descriptor: descriptor.v2Bias, - sourceTensor: v2Mul.resultTensor) - - let v2Activation = ActivationLayer( - graph: graph, - sourceTensor: v2Bias.resultTensor, - activationKind: descriptor.v2Activation) - - let v3Mul = MatMulLayer( - graph: graph, - descriptor: descriptor.v3Mul, - sourceTensor: v2Activation.resultTensor) - - let v3Bias = MatBiasLayer( - graph: graph, - descriptor: descriptor.v3Bias, - sourceTensor: v3Mul.resultTensor) - - let sv3Mul = MatMulLayer( - graph: graph, - descriptor: descriptor.sv3Mul, - sourceTensor: v2Activation.resultTensor) - - let sv3Bias = MatBiasLayer( - graph: graph, - descriptor: descriptor.sv3Bias, - sourceTensor: sv3Mul.resultTensor) - - let vOwnershipConv = ConvLayer( - graph: graph, - sourceTensor: v1Activation.resultTensor, - descriptor: descriptor.vOwnershipConv, - nnXLen: nnXLen, - nnYLen: nnYLen) - - valueTensor = v3Bias.resultTensor - scoreValueTensor = sv3Bias.resultTensor - ownershipTensor = vOwnershipConv.resultTensor - - assert(valueTensor.shape?.count == 2) - assert(scoreValueTensor.shape?.count == 2) - assert(ownershipTensor.shape?.count == 4) + /// Compute variance of recent ratios + private func computeRatioVariance() -> Float { + guard ratioHistory.count >= 10 else { return 0.0 } + let recentRatios = Array(ratioHistory.suffix(20)) + let mean = recentRatios.reduce(0.0, +) / Float(recentRatios.count) + let variance = recentRatios.map { ($0 - mean) * ($0 - mean) }.reduce(0.0, +) / Float(recentRatios.count) + return variance } -} -/// A struct that describes a neural network model used for playing the game of Go. -public struct SWModelDesc { - /// The version of the model. - let version: Int - /// The name of the model. - let name: String - /// Number of channels for input features. - let numInputChannels: NSNumber - /// Number of channels for global input features. - let numInputGlobalChannels: NSNumber - /// Number of channels for meta input features. - let numInputMetaChannels: NSNumber - /// Number of channels for the value head output. - let numValueChannels: NSNumber - /// Number of channels for the score value head output. - let numScoreValueChannels: NSNumber - /// Number of channels for the ownership head output. - let numOwnershipChannels: NSNumber - /// The description of the trunk that makes up the backbone of the model. - let trunk: SWTrunkDesc - /// The description of the policy head that predicts the probability of playing at a particular position. - let policyHead: SWPolicyHeadDesc - /// The description of the value head that predicts the expected outcome of a game state. - let valueHead: SWValueHeadDesc - - /// Initializes an SWModelDesc object. - /// - Parameters: - /// - version: The version of the model. - /// - name: The name of the model. - /// - numInputChannels: Number of channels for input features. - /// - numInputGlobalChannels: Number of channels for global input features. - /// - numInputMetaChannels: Number of channels for meta input features. - /// - numValueChannels: Number of channels for the value head output. - /// - numScoreValueChannels: Number of channels for the score value head output. - /// - numOwnershipChannels: Number of channels for the ownership head output. - /// - trunk: The description of the trunk that makes up the backbone of the model. - /// - policyHead: The description of the policy head that predicts the probability of playing at a particular position. - /// - valueHead: The description of the value head that predicts the expected outcome of a game state. - init( - version: Int, - name: String, - numInputChannels: NSNumber, - numInputGlobalChannels: NSNumber, - numInputMetaChannels: NSNumber, - numValueChannels: NSNumber, - numScoreValueChannels: NSNumber, - numOwnershipChannels: NSNumber, - trunk: SWTrunkDesc, - policyHead: SWPolicyHeadDesc, - valueHead: SWValueHeadDesc - ) { - self.version = version - self.name = name - self.numInputChannels = numInputChannels - self.numInputGlobalChannels = numInputGlobalChannels - self.numInputMetaChannels = numInputMetaChannels - self.numValueChannels = numValueChannels - self.numScoreValueChannels = numScoreValueChannels - self.numOwnershipChannels = numOwnershipChannels - self.trunk = trunk - self.policyHead = policyHead - self.valueHead = valueHead + /// Check if ratio has converged (variance < threshold) + public func hasConverged(threshold: Float = 0.001) -> Bool { + let variance = computeRatioVariance() + return ratioHistory.count >= 20 && variance < threshold } } -public func createSWModelDesc( - version: Int32, - name: String, - numInputChannels: Int32, - numInputGlobalChannels: Int32, - numInputMetaChannels: Int32, - numValueChannels: Int32, - numScoreValueChannels: Int32, - numOwnershipChannels: Int32, - trunk: SWTrunkDesc, - policyHead: SWPolicyHeadDesc, - valueHead: SWValueHeadDesc -) -> SWModelDesc { - return SWModelDesc( - version: Int(version), - name: name, - numInputChannels: numInputChannels as NSNumber, - numInputGlobalChannels: numInputGlobalChannels as NSNumber, - numInputMetaChannels: numInputMetaChannels as NSNumber, - numValueChannels: numValueChannels as NSNumber, - numScoreValueChannels: numScoreValueChannels as NSNumber, - numOwnershipChannels: numOwnershipChannels as NSNumber, - trunk: trunk, - policyHead: policyHead, - valueHead: valueHead) -} +// MARK: - MPSGraph-based Model for GPU Inference -/// A structure representing a neural network model for processing Go game states. -struct Model { - /// The Metal device +/// GPU-based model using MPSGraph for inference +public class MPSGraphModelHandle { let device: MTLDevice - /// The command queue used to execute the graph on the GPU let commandQueue: MTLCommandQueue - /// The Metal Performance Shaders graph object used for building and executing the graph let graph: MPSGraph - /// The length of the neural network input in the x dimension - let nnXLen: NSNumber - /// The length of the neural network input in the y dimension - let nnYLen: NSNumber - /// The version of the model - let version: Int - /// The number of channels in the value output layer - let numValueChannels: NSNumber - /// The number of channels in the score value output layer - let numScoreValueChannels: NSNumber - /// The number of channels in the ownership output layer - let numOwnershipChannels: NSNumber - /// The input layer of the neural network + let nnXLen: Int32 + let nnYLen: Int32 + let numInputChannels: Int + let numInputGlobalChannels: Int + let numInputMetaChannels: Int + let numPolicyChannels: Int + let numValueChannels: Int + let numScoreValueChannels: Int + let numOwnershipChannels: Int + + // Layers let input: InputLayer - /// The global input layer of the neural network let inputGlobal: InputGlobalLayer - /// The meta input layer of the neural network let inputMeta: InputMetaLayer - /// The mask layer of the neural network let mask: MaskLayer - /// The trunk of the neural network let trunk: Trunk - /// The policy head of the neural network let policyHead: PolicyHead - /// The value head of the neural network let valueHead: ValueHead - /// The dictionary that maps the output tensors to the tensor data let targetTensors: [MPSGraphTensor] - /// Initializes a Model object. - /// - Parameters: - /// - device: The Metal device to use for computations. - /// - graph: The Metal Performance Shaders graph object used for building and executing the graph. - /// - descriptor: The description of the model. - /// - nnXLen: The length of the neural network input in the x dimension. - /// - nnYLen: The length of the neural network input in the y dimension. - init( - device: MTLDevice, - graph: MPSGraph, - descriptor: SWModelDesc, - nnXLen: NSNumber, - nnYLen: NSNumber + public init?( + modelDesc: SWModelDesc, + nnXLen: Int32, + nnYLen: Int32, + optimizeIdentityMask: Bool = false ) { + guard let device = MTLCreateSystemDefaultDevice() else { + printError("Metal backend: Failed to create Metal device") + return nil + } + self.device = device - self.commandQueue = device.makeCommandQueue()! - self.graph = graph + guard let queue = device.makeCommandQueue() else { + printError("Metal backend: Failed to create command queue") + return nil + } + self.commandQueue = queue + self.graph = MPSGraph() self.nnXLen = nnXLen self.nnYLen = nnYLen - self.version = descriptor.version - self.numValueChannels = descriptor.numValueChannels - self.numScoreValueChannels = descriptor.numScoreValueChannels - self.numOwnershipChannels = descriptor.numOwnershipChannels + self.numInputChannels = modelDesc.numInputChannels.intValue + self.numInputGlobalChannels = modelDesc.numInputGlobalChannels.intValue + self.numInputMetaChannels = modelDesc.numInputMetaChannels.intValue + self.numPolicyChannels = modelDesc.numPolicyChannels.intValue + self.numValueChannels = modelDesc.numValueChannels.intValue + self.numScoreValueChannels = modelDesc.numScoreValueChannels.intValue + self.numOwnershipChannels = modelDesc.numOwnershipChannels.intValue + + let nnXLenNS = nnXLen as NSNumber + let nnYLenNS = nnYLen as NSNumber input = InputLayer( graph: graph, - nnXLen: nnXLen, - nnYLen: nnYLen, - numChannels: descriptor.numInputChannels) + nnXLen: nnXLenNS, + nnYLen: nnYLenNS, + numChannels: modelDesc.numInputChannels) inputGlobal = InputGlobalLayer( graph: graph, - numGlobalFeatures: descriptor.numInputGlobalChannels) + numGlobalFeatures: modelDesc.numInputGlobalChannels) inputMeta = InputMetaLayer( graph: graph, - numMetaFeatures: descriptor.numInputMetaChannels) + numMetaFeatures: modelDesc.numInputMetaChannels) mask = MaskLayer( graph: graph, - nnXLen: nnXLen, - nnYLen: nnYLen) - - let maskSum = MaskSumLayer( - graph: graph, - maskTensor: mask.tensor) + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) - let maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( - graph: graph, - maskSum: maskSum) + // Use constant tensors when mask is all 1s (requireExactNNLen=true) + let maskSum: MaskSumLayer + let maskSumSqrtS14M01: MaskSumSqrtS14M01Layer + let maskSumSqrtS14M01SquareS01: MaskSumSqrtS14M01SquareS01Layer - let maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( - graph: graph, - maskSumSqrtS14M01: maskSumSqrtS14M01) + if optimizeIdentityMask { + maskSum = MaskSumLayer( + graph: graph, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( + graph: graph, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( + graph: graph, + nnXLen: nnXLenNS, + nnYLen: nnYLenNS) + } else { + maskSum = MaskSumLayer( + graph: graph, + maskTensor: mask.tensor) + maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer( + graph: graph, + maskSum: maskSum) + maskSumSqrtS14M01SquareS01 = MaskSumSqrtS14M01SquareS01Layer( + graph: graph, + maskSumSqrtS14M01: maskSumSqrtS14M01) + } trunk = Trunk( graph: graph, - descriptor: descriptor.trunk, + descriptor: modelDesc.trunk, inputTensor: input.tensor, inputGlobalTensor: inputGlobal.tensor, inputMetaTensor: inputMeta.tensor, maskTensor: mask.tensor, maskSumTensor: maskSum.tensor, maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, - nnXLen: nnXLen, - nnYLen: nnYLen) + nnXLen: nnXLenNS, + nnYLen: nnYLenNS, + optimizeIdentityMask: optimizeIdentityMask) policyHead = PolicyHead( graph: graph, - descriptor: descriptor.policyHead, + descriptor: modelDesc.policyHead, sourceTensor: trunk.resultTensor, maskTensor: mask.tensor, maskSumTensor: maskSum.tensor, maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, - nnXLen: nnXLen, - nnYLen: nnYLen) + nnXLen: nnXLenNS, + nnYLen: nnYLenNS, + optimizeIdentityMask: optimizeIdentityMask) valueHead = ValueHead( graph: graph, - descriptor: descriptor.valueHead, + descriptor: modelDesc.valueHead, sourceTensor: trunk.resultTensor, maskTensor: mask.tensor, maskSumTensor: maskSum.tensor, maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, maskSumSqrtS14M01SquareS01Tensor: maskSumSqrtS14M01SquareS01.tensor, - nnXLen: nnXLen, - nnYLen: nnYLen) + nnXLen: nnXLenNS, + nnYLen: nnYLenNS, + optimizeIdentityMask: optimizeIdentityMask) targetTensors = [ policyHead.policyTensor, @@ -3035,20 +684,12 @@ struct Model { valueHead.scoreValueTensor, valueHead.ownershipTensor, ] + + printError("Metal backend: MPSGraph initialized on \(device.name)\(optimizeIdentityMask ? " (mask optimized)" : "")") } - /// Applies the model to the given input data, and generates predictions for policy, value and ownership - /// - Parameters: - /// - inputPointer: UnsafeMutablePointer to a flattened 2D array of floats representing the input state - /// - inputGlobalPointer: UnsafeMutablePointer to a flattened array of floats representing global state features - /// - inputMetaPointer: UnsafeMutablePointer to a flattened array of floats representing the metadata - /// - policy: UnsafeMutablePointer to a flattened 2D array of floats representing predicted policy - /// - policyPass: UnsafeMutablePointer to a flattened array of floats representing predicted probability of passing - /// - value: UnsafeMutablePointer to a flattened array of floats representing predicted value - /// - scoreValue: UnsafeMutablePointer to a flattened array of floats representing predicted score value - /// - ownership: UnsafeMutablePointer to a flattened 2D array of floats representing predicted ownership - /// - batchSize: The batch size - func apply( + /// Run inference on a batch using MPSGraph (GPU) + public func apply( input inputPointer: UnsafeMutablePointer, inputGlobal inputGlobalPointer: UnsafeMutablePointer, inputMeta inputMetaPointer: UnsafeMutablePointer, @@ -3059,15 +700,16 @@ struct Model { ownership: UnsafeMutablePointer, batchSize: Int ) { - let channelAxis = InputShape.getChannelAxis() let numInputChannels = input.shape[channelAxis] + let nnXLenNS = nnXLen as NSNumber + let nnYLenNS = nnYLen as NSNumber let inputShape = InputShape.create( batchSize: batchSize as NSNumber, numChannels: numInputChannels, - nnYLen: nnYLen, - nnXLen: nnXLen) + nnYLen: nnYLenNS, + nnXLen: nnXLenNS) let inputDescriptor = MPSNDArrayDescriptor( dataType: input.tensor.dataType, @@ -3118,8 +760,8 @@ struct Model { let maskShape = InputShape.create( batchSize: batchSize as NSNumber, numChannels: 1, - nnYLen: nnYLen, - nnXLen: nnXLen) + nnYLen: nnYLenNS, + nnXLen: nnXLenNS) let maskDescriptor = MPSNDArrayDescriptor( dataType: mask.tensor.dataType, @@ -3129,12 +771,12 @@ struct Model { device: device, descriptor: maskDescriptor) + // Extract mask from first channel of spatial input var maskStrideArray = [ MemoryLayout.size, - nnXLen.intValue * MemoryLayout.size, - nnYLen.intValue * nnXLen.intValue * MemoryLayout.size, - numInputChannels.intValue * nnYLen.intValue * nnXLen.intValue - * MemoryLayout.size, + Int(nnXLen) * MemoryLayout.size, + Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, + numInputChannels.intValue * Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, ] maskArray.writeBytes(inputPointer, strideBytes: &maskStrideArray) @@ -3152,12 +794,6 @@ struct Model { targetTensors: targetTensors, targetOperations: nil) - assert(fetch[policyHead.policyTensor] != nil) - assert(fetch[policyHead.policyPassTensor] != nil) - assert(fetch[valueHead.valueTensor] != nil) - assert(fetch[valueHead.scoreValueTensor] != nil) - assert(fetch[valueHead.ownershipTensor] != nil) - fetch[policyHead.policyTensor]?.mpsndarray().readBytes(policy) fetch[policyHead.policyPassTensor]?.mpsndarray().readBytes(policyPass) fetch[valueHead.valueTensor]?.mpsndarray().readBytes(value) @@ -3166,52 +802,48 @@ struct Model { } } -// A enum to represent enabled/disabled/auto option of a feature. -public enum SWEnable { - case False - case True - case Auto -} - -/// A class that represents context of GPU devices. -public class MetalComputeContext { - public let nnXLen: Int32 - public let nnYLen: Int32 +// MARK: - Hybrid Compute Handle - /// Initialize a context. - /// - Parameters: - /// - nnXLen: The width of the input tensor. - /// - nnYLen: The height of the input tensor. - init( - nnXLen: Int32, - nnYLen: Int32 - ) { - self.nnXLen = nnXLen - self.nnYLen = nnYLen +/// Global flag to enable/disable diagnostic logging (set via environment variable) +private let diagnosticLoggingEnabled: Bool = { + if let envValue = ProcessInfo.processInfo.environment["KATAGO_HYBRID_DIAG"] { + return envValue.lowercased() == "1" || envValue.lowercased() == "true" } -} - -public func createMetalComputeContext( - nnXLen: Int32, - nnYLen: Int32 -) -> MetalComputeContext { - return MetalComputeContext( - nnXLen: nnXLen, - nnYLen: nnYLen) -} - -/// A class that represents a handle of GPU device. -public class MetalComputeHandle { - let model: Model - - init(model: Model) { - self.model = model + return false +}() + +/// Hybrid compute handle that dispatches to both CoreML (CPU+ANE) and MPSGraph (GPU) +public class HybridComputeHandle { + let coremlHandle: CoreMLComputeHandle + let mpsGraphHandle: MPSGraphModelHandle + let throughputTracker: ThroughputTracker + let coremlQueue: DispatchQueue + let mpsGraphQueue: DispatchQueue + let nnXLen: Int32 + let nnYLen: Int32 + let serverThreadIdx: Int + + public init( + coremlHandle: CoreMLComputeHandle, + mpsGraphHandle: MPSGraphModelHandle, + serverThreadIdx: Int = 0 + ) { + self.coremlHandle = coremlHandle + self.mpsGraphHandle = mpsGraphHandle + self.serverThreadIdx = serverThreadIdx + self.throughputTracker = ThroughputTracker() + self.coremlQueue = DispatchQueue(label: "com.katago.coreml", qos: .userInitiated) + self.mpsGraphQueue = DispatchQueue(label: "com.katago.mpsgraph", qos: .userInitiated) + self.nnXLen = coremlHandle.nnXLen + self.nnYLen = coremlHandle.nnYLen } + /// Run hybrid inference - splits batch between CoreML and MPSGraph public func apply( - input inputPointer: UnsafeMutablePointer, - inputGlobal inputGlobalPointer: UnsafeMutablePointer, - inputMeta inputMetaPointer: UnsafeMutablePointer, + spatialInput: UnsafeMutablePointer, + globalInput: UnsafeMutablePointer, + metaInput: UnsafeMutablePointer, + maskInput: UnsafeMutablePointer, policy: UnsafeMutablePointer, policyPass: UnsafeMutablePointer, value: UnsafeMutablePointer, @@ -3219,48 +851,196 @@ public class MetalComputeHandle { ownership: UnsafeMutablePointer, batchSize: Int ) { - autoreleasepool { - model.apply( - input: inputPointer, - inputGlobal: inputGlobalPointer, - inputMeta: inputMetaPointer, - policy: policy, - policyPass: policyPass, - value: value, - scoreValue: scoreValue, - ownership: ownership, - batchSize: batchSize) + // Get optimal split ratio based on throughput + let ratio = throughputTracker.getOptimalCoreMLRatio() + // Prefer MPSGraph over CoreML for batch size 1, as MPSGraph is more stable + let coreMLBatchSize = max(0, min(batchSize - 1, Int(Float(batchSize) * ratio))) + let mpsGraphBatchSize = batchSize - coreMLBatchSize + + // Calculate buffer offsets + let spatialSize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numInputChannels + let globalSize = coremlHandle.numInputGlobalChannels + let metaSize = coremlHandle.numInputMetaChannels + let policySize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numPolicyChannels + let policyPassSize = coremlHandle.numPolicyChannels // Non-spatial pass output + let valueSize = coremlHandle.numValueChannels + let scoreValueSize = coremlHandle.numScoreValueChannels + let ownershipSize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numOwnershipChannels + + #if DEBUG + // Verify batch split ensures non-overlapping buffer access + // CoreML writes [0, coreMLBatchSize), MPSGraph writes [coreMLBatchSize, batchSize) + assert(coreMLBatchSize >= 0 && mpsGraphBatchSize >= 0, "Batch sizes must be non-negative") + assert(coreMLBatchSize + mpsGraphBatchSize == batchSize, "Batch split must sum to total") + #endif + + let group = DispatchGroup() + + // CoreML path (CPU + ANE) + if coreMLBatchSize > 0 { + group.enter() + coremlQueue.async { [self] in + let start = CFAbsoluteTimeGetCurrent() + + autoreleasepool { + coremlHandle.apply( + spatialInput: spatialInput, + globalInput: globalInput, + metaInput: metaInput, + maskInput: maskInput, + policy: policy, + policyPass: policyPass, + value: value, + scoreValue: scoreValue, + ownership: ownership, + batchSize: coreMLBatchSize + ) + } + + let duration = CFAbsoluteTimeGetCurrent() - start + throughputTracker.updateCoreML(samples: coreMLBatchSize, duration: duration) + group.leave() + } + } + + // MPSGraph path (GPU) + if mpsGraphBatchSize > 0 { + group.enter() + mpsGraphQueue.async { [self] in + let start = CFAbsoluteTimeGetCurrent() + + // Offset pointers for MPSGraph batch portion + let spatialOffset = coreMLBatchSize * spatialSize + let globalOffset = coreMLBatchSize * globalSize + let metaOffset = coreMLBatchSize * metaSize + let policyOffset = coreMLBatchSize * policySize + let policyPassOffset = coreMLBatchSize * policyPassSize + let valueOffset = coreMLBatchSize * valueSize + let scoreValueOffset = coreMLBatchSize * scoreValueSize + let ownershipOffset = coreMLBatchSize * ownershipSize + + autoreleasepool { + mpsGraphHandle.apply( + input: spatialInput.advanced(by: spatialOffset), + inputGlobal: globalInput.advanced(by: globalOffset), + inputMeta: metaInput.advanced(by: metaOffset), + policy: policy.advanced(by: policyOffset), + policyPass: policyPass.advanced(by: policyPassOffset), + value: value.advanced(by: valueOffset), + scoreValue: scoreValue.advanced(by: scoreValueOffset), + ownership: ownership.advanced(by: ownershipOffset), + batchSize: mpsGraphBatchSize + ) + } + + let duration = CFAbsoluteTimeGetCurrent() - start + throughputTracker.updateMPSGraph(samples: mpsGraphBatchSize, duration: duration) + group.leave() + } + } + + // Wait for both paths to complete + group.wait() + + // Record batch for diagnostics + throughputTracker.recordBatch(ratio: ratio) + + // Periodic diagnostic logging + if diagnosticLoggingEnabled && throughputTracker.shouldLogAndMark() { + let stats = throughputTracker.getDiagnosticStats() + let converged = throughputTracker.hasConverged() + print(String(format: "[HybridDiag T%d] batch=%d ratio=%.3f coreml=%.1f/s mps=%.1f/s total=%d/%d var=%.5f conv=%@", + serverThreadIdx, + stats.batchCount, + stats.ratio, + stats.coreMLSamplesPerSec, + stats.mpsGraphSamplesPerSec, + stats.totalCoreMLSamples, + stats.totalMPSGraphSamples, + stats.ratioVariance, + converged ? "yes" : "no")) } } } -public func maybeCreateMetalComputeHandle( - condition: Bool, - serverThreadIdx: Int = 0, - descriptor: SWModelDesc, +/// Create a hybrid compute handle +public func createHybridComputeHandle( + coremlModelPath: String, + modelDesc: SWModelDesc, + serverThreadIdx: Int, + requireExactNNLen: Bool, + numInputChannels: Int32, + numInputGlobalChannels: Int32, + numInputMetaChannels: Int32, + numPolicyChannels: Int32, + numValueChannels: Int32, + numScoreValueChannels: Int32, + numOwnershipChannels: Int32, context: MetalComputeContext -) -> MetalComputeHandle? { - guard condition else { return nil } +) -> HybridComputeHandle? { + + // Create CoreML handle (CPU + ANE) + guard let coremlHandle = createCoreMLComputeHandle( + coremlModelPath: coremlModelPath, + serverThreadIdx: serverThreadIdx, + requireExactNNLen: requireExactNNLen, + numInputChannels: numInputChannels, + numInputGlobalChannels: numInputGlobalChannels, + numInputMetaChannels: numInputMetaChannels, + numPolicyChannels: numPolicyChannels, + numValueChannels: numValueChannels, + numScoreValueChannels: numScoreValueChannels, + numOwnershipChannels: numOwnershipChannels, + context: context + ) else { + printError("Metal backend \(serverThreadIdx): Failed to create CoreML handle") + return nil + } - let device = MTLCreateSystemDefaultDevice()! + // Create MPSGraph handle (GPU) + guard let mpsGraphHandle = MPSGraphModelHandle( + modelDesc: modelDesc, + nnXLen: context.nnXLen, + nnYLen: context.nnYLen, + optimizeIdentityMask: requireExactNNLen + ) else { + printError("Metal backend \(serverThreadIdx): Failed to create MPSGraph handle") + printError("Metal backend \(serverThreadIdx): CoreML handle will be released") + return nil + } - let model = Model( - device: device, - graph: MPSGraph(), - descriptor: descriptor, - nnXLen: context.nnXLen as NSNumber, - nnYLen: context.nnYLen as NSNumber) + printError("Metal backend \(serverThreadIdx): Initialized CoreML (CPU+ANE) + MPSGraph (GPU)") - let handle = MetalComputeHandle(model: model) + // Log if diagnostic mode is enabled + if diagnosticLoggingEnabled { + printError("Metal backend \(serverThreadIdx): Diagnostic logging enabled (KATAGO_HYBRID_DIAG=1)") + } - printError( - "Metal backend \(serverThreadIdx): \(device.name), Model version \(descriptor.version) \(descriptor.name), \(context.nnXLen)x\(context.nnYLen)" + return HybridComputeHandle( + coremlHandle: coremlHandle, + mpsGraphHandle: mpsGraphHandle, + serverThreadIdx: serverThreadIdx ) - - return handle } -public func printMetalDevices() { - let device = MTLCreateSystemDefaultDevice()! - printError("Found Metal Device: \(device.name)") +/// Create a GPU-only compute handle using MPSGraph +/// Used when useFP16=false to avoid slow FP32 CoreML execution on CPU+ANE +public func createMPSGraphOnlyHandle( + modelDesc: SWModelDesc, + serverThreadIdx: Int, + requireExactNNLen: Bool, + context: MetalComputeContext +) -> MPSGraphModelHandle? { + guard let mpsGraphHandle = MPSGraphModelHandle( + modelDesc: modelDesc, + nnXLen: context.nnXLen, + nnYLen: context.nnYLen, + optimizeIdentityMask: requireExactNNLen + ) else { + printError("Metal backend \(serverThreadIdx): Failed to create MPSGraph handle") + return nil + } + + printError("Metal backend \(serverThreadIdx): Initialized MPSGraph GPU-only mode") + return mpsGraphHandle } diff --git a/cpp/neuralnet/mpsgraphlayers.swift b/cpp/neuralnet/metallayers.swift similarity index 81% rename from cpp/neuralnet/mpsgraphlayers.swift rename to cpp/neuralnet/metallayers.swift index e99107438..9e3327c32 100644 --- a/cpp/neuralnet/mpsgraphlayers.swift +++ b/cpp/neuralnet/metallayers.swift @@ -2363,3 +2363,552 @@ struct MPSGraphModel { fetch[valueHead.ownershipTensor]?.mpsndarray().readBytes(ownership) } } + +// MARK: - Test Infrastructure + +/// Helper struct for testing individual network layers using MPSGraph +struct NetworkTester { + let device: MTLDevice + let commandQueue: MTLCommandQueue + let graph: MPSGraph + let inputTensor: MPSGraphTensor + let maskTensor: MPSGraphTensor + let outputTensor: MPSGraphTensor + let inputShape: [NSNumber] + let maskShape: [NSNumber] + let outputShape: [NSNumber] + + /// Initialize a network tester for testing a single layer + init( + device: MTLDevice, + graph: MPSGraph, + inputTensor: MPSGraphTensor, + maskTensor: MPSGraphTensor, + outputTensor: MPSGraphTensor, + batchSize: NSNumber, + nnXLen: NSNumber, + nnYLen: NSNumber, + inChannels: NSNumber, + outChannels: NSNumber + ) { + self.device = device + self.commandQueue = device.makeCommandQueue()! + self.graph = graph + self.inputTensor = inputTensor + self.maskTensor = maskTensor + self.outputTensor = outputTensor + self.inputShape = InputShape.create( + batchSize: batchSize, + numChannels: inChannels, + nnYLen: nnYLen, + nnXLen: nnXLen) + self.maskShape = InputShape.create( + batchSize: batchSize, + numChannels: 1, + nnYLen: nnYLen, + nnXLen: nnXLen) + self.outputShape = InputShape.create( + batchSize: batchSize, + numChannels: outChannels, + nnYLen: nnYLen, + nnXLen: nnXLen) + } + + /// Run the test with given input and mask data, writing results to output + func run( + inputPointer: UnsafePointer, + maskPointer: UnsafePointer, + outputPointer: UnsafeMutablePointer + ) { + let inputDescriptor = MPSNDArrayDescriptor( + dataType: .float32, + shape: inputShape) + + let inputArray = MPSNDArray( + device: device, + descriptor: inputDescriptor) + + inputArray.writeBytes(UnsafeMutableRawPointer(mutating: inputPointer)) + + let maskDescriptor = MPSNDArrayDescriptor( + dataType: .float32, + shape: maskShape) + + let maskArray = MPSNDArray( + device: device, + descriptor: maskDescriptor) + + maskArray.writeBytes(UnsafeMutableRawPointer(mutating: maskPointer)) + + let feeds = [ + inputTensor: MPSGraphTensorData(inputArray), + maskTensor: MPSGraphTensorData(maskArray), + ] + + let fetch = graph.run( + with: commandQueue, + feeds: feeds, + targetTensors: [outputTensor], + targetOperations: nil) + + fetch[outputTensor]?.mpsndarray().readBytes(outputPointer) + } +} + +// MARK: - ConvLayer Test Extension + +extension ConvLayer { + /// Test the convolution layer with given parameters + static func test( + descriptor: SWConvLayerDesc, + batchSize: Int32, + nnXLen: Int32, + nnYLen: Int32, + inputPointer: UnsafePointer, + outputPointer: UnsafeMutablePointer + ) -> Bool { + guard let device = MTLCreateSystemDefaultDevice() else { + return false + } + + let graph = MPSGraph() + + let inputShape = InputShape.create( + batchSize: -1 as NSNumber, + numChannels: descriptor.inChannels, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let inputTensor = graph.placeholder( + shape: inputShape, + dataType: .float32, + name: nil) + + let convLayer = ConvLayer( + graph: graph, + sourceTensor: inputTensor, + descriptor: descriptor, + nnXLen: nnXLen as NSNumber, + nnYLen: nnYLen as NSNumber) + + // Run the graph + let commandQueue = device.makeCommandQueue()! + + let actualInputShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: descriptor.inChannels, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let inputDescriptor = MPSNDArrayDescriptor( + dataType: .float32, + shape: actualInputShape) + + let inputArray = MPSNDArray( + device: device, + descriptor: inputDescriptor) + + inputArray.writeBytes(UnsafeMutableRawPointer(mutating: inputPointer)) + + let feeds = [inputTensor: MPSGraphTensorData(inputArray)] + + let fetch = graph.run( + with: commandQueue, + feeds: feeds, + targetTensors: [convLayer.resultTensor], + targetOperations: nil) + + fetch[convLayer.resultTensor]?.mpsndarray().readBytes(outputPointer) + + return true + } +} + +// MARK: - BatchNormLayer Test Extension + +extension BatchNormLayer { + /// Test the batch normalization layer with given parameters + static func test( + descriptor: SWBatchNormLayerDesc, + batchSize: Int32, + nnXLen: Int32, + nnYLen: Int32, + inputPointer: UnsafePointer, + maskPointer: UnsafePointer, + outputPointer: UnsafeMutablePointer + ) -> Bool { + guard let device = MTLCreateSystemDefaultDevice() else { + return false + } + + let graph = MPSGraph() + + let inputShape = InputShape.create( + batchSize: -1 as NSNumber, + numChannels: descriptor.numChannels, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let inputTensor = graph.placeholder( + shape: inputShape, + dataType: .float32, + name: nil) + + let maskShape = InputShape.create( + batchSize: -1 as NSNumber, + numChannels: 1, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let maskTensor = graph.placeholder( + shape: maskShape, + dataType: .float32, + name: nil) + + let bnLayer = BatchNormLayer( + graph: graph, + sourceTensor: inputTensor, + maskTensor: maskTensor, + descriptor: descriptor, + nnXLen: nnXLen as NSNumber, + nnYLen: nnYLen as NSNumber) + + // Run the graph + let commandQueue = device.makeCommandQueue()! + + let actualInputShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: descriptor.numChannels, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let inputDescriptor = MPSNDArrayDescriptor( + dataType: .float32, + shape: actualInputShape) + + let inputArray = MPSNDArray( + device: device, + descriptor: inputDescriptor) + + inputArray.writeBytes(UnsafeMutableRawPointer(mutating: inputPointer)) + + let actualMaskShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: 1, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let maskDescriptor = MPSNDArrayDescriptor( + dataType: .float32, + shape: actualMaskShape) + + let maskArray = MPSNDArray( + device: device, + descriptor: maskDescriptor) + + maskArray.writeBytes(UnsafeMutableRawPointer(mutating: maskPointer)) + + let feeds = [ + inputTensor: MPSGraphTensorData(inputArray), + maskTensor: MPSGraphTensorData(maskArray), + ] + + let fetch = graph.run( + with: commandQueue, + feeds: feeds, + targetTensors: [bnLayer.resultTensor], + targetOperations: nil) + + fetch[bnLayer.resultTensor]?.mpsndarray().readBytes(outputPointer) + + return true + } +} + +// MARK: - ResidualBlock Test Extension + +extension ResidualBlock { + /// Test the residual block with given parameters + static func test( + descriptor: SWResidualBlockDesc, + batchSize: Int32, + nnXLen: Int32, + nnYLen: Int32, + inputPointer: UnsafePointer, + maskPointer: UnsafePointer, + outputPointer: UnsafeMutablePointer + ) -> Bool { + guard let device = MTLCreateSystemDefaultDevice() else { + return false + } + + let graph = MPSGraph() + + let inputShape = InputShape.create( + batchSize: -1 as NSNumber, + numChannels: descriptor.preBN.numChannels, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let inputTensor = graph.placeholder( + shape: inputShape, + dataType: .float32, + name: nil) + + let maskShape = InputShape.create( + batchSize: -1 as NSNumber, + numChannels: 1, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let maskTensor = graph.placeholder( + shape: maskShape, + dataType: .float32, + name: nil) + + let resBlock = ResidualBlock( + graph: graph, + sourceTensor: inputTensor, + maskTensor: maskTensor, + descriptor: descriptor, + nnXLen: nnXLen as NSNumber, + nnYLen: nnYLen as NSNumber) + + // Run the graph + let commandQueue = device.makeCommandQueue()! + + let actualInputShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: descriptor.preBN.numChannels, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let inputDescriptor = MPSNDArrayDescriptor( + dataType: .float32, + shape: actualInputShape) + + let inputArray = MPSNDArray( + device: device, + descriptor: inputDescriptor) + + inputArray.writeBytes(UnsafeMutableRawPointer(mutating: inputPointer)) + + let actualMaskShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: 1, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let maskDescriptor = MPSNDArrayDescriptor( + dataType: .float32, + shape: actualMaskShape) + + let maskArray = MPSNDArray( + device: device, + descriptor: maskDescriptor) + + maskArray.writeBytes(UnsafeMutableRawPointer(mutating: maskPointer)) + + let feeds = [ + inputTensor: MPSGraphTensorData(inputArray), + maskTensor: MPSGraphTensorData(maskArray), + ] + + let fetch = graph.run( + with: commandQueue, + feeds: feeds, + targetTensors: [resBlock.resultTensor], + targetOperations: nil) + + fetch[resBlock.resultTensor]?.mpsndarray().readBytes(outputPointer) + + return true + } +} + +// MARK: - GlobalPoolingResidualBlock Test Extension + +extension GlobalPoolingResidualBlock { + /// Test the global pooling residual block with given parameters + static func test( + descriptor: SWGlobalPoolingResidualBlockDesc, + batchSize: Int32, + nnXLen: Int32, + nnYLen: Int32, + inputPointer: UnsafePointer, + maskPointer: UnsafePointer, + outputPointer: UnsafeMutablePointer + ) -> Bool { + guard let device = MTLCreateSystemDefaultDevice() else { + return false + } + + let graph = MPSGraph() + + let inputShape = InputShape.create( + batchSize: -1 as NSNumber, + numChannels: descriptor.preBN.numChannels, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let inputTensor = graph.placeholder( + shape: inputShape, + dataType: .float32, + name: nil) + + let maskShape = InputShape.create( + batchSize: -1 as NSNumber, + numChannels: 1, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let maskTensor = graph.placeholder( + shape: maskShape, + dataType: .float32, + name: nil) + + // Compute mask sum and related tensors from mask + let maskSum = MaskSumLayer(graph: graph, maskTensor: maskTensor) + let maskSumSqrtS14M01 = MaskSumSqrtS14M01Layer(graph: graph, maskSum: maskSum) + + let gpoolBlock = GlobalPoolingResidualBlock( + graph: graph, + sourceTensor: inputTensor, + maskTensor: maskTensor, + maskSumTensor: maskSum.tensor, + maskSumSqrtS14M01Tensor: maskSumSqrtS14M01.tensor, + descriptor: descriptor, + nnXLen: nnXLen as NSNumber, + nnYLen: nnYLen as NSNumber) + + // Run the graph + let commandQueue = device.makeCommandQueue()! + + let actualInputShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: descriptor.preBN.numChannels, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let inputDescriptor = MPSNDArrayDescriptor( + dataType: .float32, + shape: actualInputShape) + + let inputArray = MPSNDArray( + device: device, + descriptor: inputDescriptor) + + inputArray.writeBytes(UnsafeMutableRawPointer(mutating: inputPointer)) + + let actualMaskShape = InputShape.create( + batchSize: batchSize as NSNumber, + numChannels: 1, + nnYLen: nnYLen as NSNumber, + nnXLen: nnXLen as NSNumber) + + let maskDescriptor = MPSNDArrayDescriptor( + dataType: .float32, + shape: actualMaskShape) + + let maskArray = MPSNDArray( + device: device, + descriptor: maskDescriptor) + + maskArray.writeBytes(UnsafeMutableRawPointer(mutating: maskPointer)) + + let feeds = [ + inputTensor: MPSGraphTensorData(inputArray), + maskTensor: MPSGraphTensorData(maskArray), + ] + + let fetch = graph.run( + with: commandQueue, + feeds: feeds, + targetTensors: [gpoolBlock.resultTensor], + targetOperations: nil) + + fetch[gpoolBlock.resultTensor]?.mpsndarray().readBytes(outputPointer) + + return true + } +} + +// MARK: - Public Test Functions (callable from C++) + +/// Test the convolution layer +public func testConvLayer( + descriptor: SWConvLayerDesc, + batchSize: Int32, + nnXLen: Int32, + nnYLen: Int32, + inputPointer: UnsafePointer, + outputPointer: UnsafeMutablePointer +) -> Bool { + return ConvLayer.test( + descriptor: descriptor, + batchSize: batchSize, + nnXLen: nnXLen, + nnYLen: nnYLen, + inputPointer: inputPointer, + outputPointer: outputPointer) +} + +/// Test the batch normalization layer +public func testBatchNormLayer( + descriptor: SWBatchNormLayerDesc, + batchSize: Int32, + nnXLen: Int32, + nnYLen: Int32, + inputPointer: UnsafePointer, + maskPointer: UnsafePointer, + outputPointer: UnsafeMutablePointer +) -> Bool { + return BatchNormLayer.test( + descriptor: descriptor, + batchSize: batchSize, + nnXLen: nnXLen, + nnYLen: nnYLen, + inputPointer: inputPointer, + maskPointer: maskPointer, + outputPointer: outputPointer) +} + +/// Test the residual block +public func testResidualBlock( + descriptor: SWResidualBlockDesc, + batchSize: Int32, + nnXLen: Int32, + nnYLen: Int32, + inputPointer: UnsafePointer, + maskPointer: UnsafePointer, + outputPointer: UnsafeMutablePointer +) -> Bool { + return ResidualBlock.test( + descriptor: descriptor, + batchSize: batchSize, + nnXLen: nnXLen, + nnYLen: nnYLen, + inputPointer: inputPointer, + maskPointer: maskPointer, + outputPointer: outputPointer) +} + +/// Test the global pooling residual block +public func testGlobalPoolingResidualBlock( + descriptor: SWGlobalPoolingResidualBlockDesc, + batchSize: Int32, + nnXLen: Int32, + nnYLen: Int32, + inputPointer: UnsafePointer, + maskPointer: UnsafePointer, + outputPointer: UnsafeMutablePointer +) -> Bool { + return GlobalPoolingResidualBlock.test( + descriptor: descriptor, + batchSize: batchSize, + nnXLen: nnXLen, + nnYLen: nnYLen, + inputPointer: inputPointer, + maskPointer: maskPointer, + outputPointer: outputPointer) +} diff --git a/cpp/program/setup.cpp b/cpp/program/setup.cpp index e271b3321..a31295145 100644 --- a/cpp/program/setup.cpp +++ b/cpp/program/setup.cpp @@ -18,7 +18,6 @@ std::vector Setup::getBackendPrefixes() { prefixes.push_back("cuda"); prefixes.push_back("trt"); prefixes.push_back("metal"); - prefixes.push_back("coreml"); prefixes.push_back("opencl"); prefixes.push_back("eigen"); prefixes.push_back("dummybackend"); @@ -85,8 +84,6 @@ vector Setup::initializeNNEvaluators( string backendPrefix = "trt"; #elif defined(USE_METAL_BACKEND) string backendPrefix = "metal"; - #elif defined(USE_COREML_BACKEND) - string backendPrefix = "coreml"; #elif defined(USE_OPENCL_BACKEND) string backendPrefix = "opencl"; #elif defined(USE_EIGEN_BACKEND) From fc050b6a63226cacf54caee69cf0e2d9a3085010 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Mon, 23 Feb 2026 18:19:14 +0800 Subject: [PATCH 19/34] Replace hybrid batch splitting with per-thread GPU/ANE multiplexer Remove intra-batch HybridComputeHandle that split work between CoreML and MPSGraph within a single thread. Instead, each server thread now runs exclusively as GPU (MPSGraph, gpuIdx=0) or ANE (CoreML, gpuIdx=100), configured via metalDeviceToUseThread. This eliminates the ThroughputTracker, adaptive batch ratio, and dispatch queue complexity in favor of thread-level multiplexing managed by KataGo's existing multi-server-thread infrastructure. Co-Authored-By: Claude Opus 4.6 --- cpp/configs/analysis_example.cfg | 31 ++- cpp/configs/gtp_example.cfg | 32 ++- cpp/neuralnet/metalbackend.cpp | 93 +++----- cpp/neuralnet/metalbackend.h | 16 +- cpp/neuralnet/metalbackend.swift | 360 +------------------------------ 5 files changed, 86 insertions(+), 446 deletions(-) diff --git a/cpp/configs/analysis_example.cfg b/cpp/configs/analysis_example.cfg index edc5e8726..70d884520 100644 --- a/cpp/configs/analysis_example.cfg +++ b/cpp/configs/analysis_example.cfg @@ -224,15 +224,30 @@ nnRandomize = true # ------------------------------ # These only apply when using the METAL version of KataGo. -# For one Metal instance: KataGo will automatically use the default device. -# metalDeviceToUse = 0 - -# For two Metal instance: Uncomment these options, AND set numNNServerThreadsPerModel = 2 above. -# This will create two Metal instances, best overlapping the GPU and CPU execution. +# Metal backend dispatch is configured via numNNServerThreadsPerModel and metalDeviceToUseThread. +# Device index values: +# 0 = GPU only (MPSGraph) - default +# 100 = ANE only (CoreML, runs on CPU + Apple Neural Engine) +# +# Mux mode (recommended): 4 pipelined server threads (2x GPU + 2x ANE). +# Set nnMaxBatchSize to half of numSearchThreads for optimal pipelining. +# +# Example: mux mode (best throughput) +# numNNServerThreadsPerModel = 4 # metalDeviceToUseThread0 = 0 -# metalDeviceToUseThread1 = 1 - -# The pattern continues for additional Metal instances. +# metalDeviceToUseThread1 = 0 +# metalDeviceToUseThread2 = 100 +# metalDeviceToUseThread3 = 100 +# +# Example: GPU-only mode (default) +# numNNServerThreadsPerModel = 1 +# metalDeviceToUseThread0 = 0 +# +# Example: ANE-only mode +# numNNServerThreadsPerModel = 1 +# metalDeviceToUseThread0 = 100 +# +# Default (no config): 1 server thread, GPU-only mode (gpuIdx = 0). # OpenCL-specific GPU settings-------------------------------------- diff --git a/cpp/configs/gtp_example.cfg b/cpp/configs/gtp_example.cfg index cfa720bf3..b3266416a 100644 --- a/cpp/configs/gtp_example.cfg +++ b/cpp/configs/gtp_example.cfg @@ -460,15 +460,31 @@ searchFactorWhenWinningThreshold = 0.95 # ------------------------------ # These only apply when using the METAL version of KataGo. -# For one Metal instance: KataGo will automatically use the default device. -# metalDeviceToUse = 0 - -# For two Metal instance: Uncomment these options, AND set numNNServerThreadsPerModel = 2 above. -# This will create two Metal instances, best overlapping the GPU and CPU execution. +# Metal backend dispatch is configured via numNNServerThreadsPerModel and metalDeviceToUseThread. +# Device index values: +# 0 = GPU only (MPSGraph) - default +# 100 = ANE only (CoreML, runs on CPU + Apple Neural Engine) +# +# Mux mode (recommended): 4 pipelined server threads (2x GPU + 2x ANE). +# Set nnMaxBatchSize to half of numSearchThreads for optimal pipelining. +# Run "benchmark --sweep-backends" to find the best settings for your machine. +# +# Example: mux mode (best throughput) +# numNNServerThreadsPerModel = 4 # metalDeviceToUseThread0 = 0 -# metalDeviceToUseThread1 = 1 - -# The pattern continues for additional Metal instances. +# metalDeviceToUseThread1 = 0 +# metalDeviceToUseThread2 = 100 +# metalDeviceToUseThread3 = 100 +# +# Example: GPU-only mode (default) +# numNNServerThreadsPerModel = 1 +# metalDeviceToUseThread0 = 0 +# +# Example: ANE-only mode +# numNNServerThreadsPerModel = 1 +# metalDeviceToUseThread0 = 100 +# +# Default (no config): 1 server thread, GPU-only mode (gpuIdx = 0). # ------------------------------ # OpenCL GPU settings diff --git a/cpp/neuralnet/metalbackend.cpp b/cpp/neuralnet/metalbackend.cpp index 76e236537..a35bc92ce 100644 --- a/cpp/neuralnet/metalbackend.cpp +++ b/cpp/neuralnet/metalbackend.cpp @@ -21,13 +21,6 @@ using namespace std; namespace gfs = ghc::filesystem; -// Minimum batch sizes for hybrid execution mode. -// Hybrid splits batches between CoreML (CPU+ANE) and MPSGraph (GPU). -// When batch is too small to split, prefer MPSGraph-only for stability: -// MPSGraph has more predictable latency and avoids CoreML dispatch overhead. -static constexpr int MIN_COREML_BATCH = 1; -static constexpr int MIN_MPSGRAPH_BATCH = 1; - namespace CoreMLConversion { // Get temp directory for model conversion @@ -448,9 +441,8 @@ void NeuralNet::freeComputeContext(ComputeContext* computeContext) { static mutex computeHandleMutex; -// Helper function to convert model and create hybrid compute handle -// This is needed because Swift Optional doesn't support assignment in C++ -static swift::Optional convertAndCreateHybridHandle( +// Helper function to convert model and create CoreML-only compute handle (for mux ANE thread) +static swift::Optional convertAndCreateCoreMLOnlyHandle( ComputeContext* context, const LoadedModel* loadedModel, bool requireExactNNLen, @@ -464,7 +456,6 @@ static swift::Optional convertAndCreateHybridH bool optimizeMask = requireExactNNLen; // Convert model to CoreML format in temp directory - // The Swift side will delete the temp file after loading string coremlModelPath = CoreMLConversion::convertModelToTemp( loadedModel->modelPath, nnXLen, @@ -475,13 +466,9 @@ static swift::Optional convertAndCreateHybridH serverThreadIdx ); - // Convert model descriptor to Swift format for MPSGraph path - SWModelDesc swModelDesc = MetalProcess::modelDescToSwift(&loadedModel->modelDesc); - - // Create hybrid compute handle (CoreML on CPU+ANE, MPSGraph on GPU) - return createHybridComputeHandle( + // Create CoreML-only compute handle (CPU+ANE) + return createCoreMLComputeHandle( swift::String(coremlModelPath), - swModelDesc, serverThreadIdx, requireExactNNLen, loadedModel->modelDesc.numInputChannels, @@ -495,62 +482,42 @@ static swift::Optional convertAndCreateHybridH ); } -// Helper function to create hybrid handle if FP16 mode with sufficient batch size, otherwise returns none -static swift::Optional createHybridHandleIfNeeded( +// Helper function to create CoreML-only handle when gpuIdx == METAL_MUX_ANE +static swift::Optional createCoreMLOnlyHandleIfNeeded( ComputeContext* context, const LoadedModel* loadedModel, bool requireExactNNLen, int maxBatchSize, + int gpuIdx, int serverThreadIdx ) { - if(context->useFP16Mode == enabled_t::False) { - // FP32 mode - don't create hybrid handle - return swift::Optional::none(); - } - - // Hybrid mode splits batches: CoreML takes max(1, ...), MPSGraph takes remainder - // Minimum samples for meaningful split = 1 (CoreML) + 1 (MPSGraph) = 2 - // If batch can't be split, prefer MPSGraph-only for stability - if(maxBatchSize < MIN_COREML_BATCH + MIN_MPSGRAPH_BATCH) { - return swift::Optional::none(); + if(gpuIdx != METAL_MUX_ANE) { + return swift::Optional::none(); } - // FP16 mode with sufficient batch size: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) - return convertAndCreateHybridHandle(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx); + cerr << "Metal backend " << serverThreadIdx << ": Mux ANE mode - using CoreML (CPU+ANE)" << endl; + return convertAndCreateCoreMLOnlyHandle(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx); } -// Helper function to create MPSGraph-only handle when needed -// Used when: (1) useFP16=false to avoid slow FP32 CoreML, or (2) batch too small for hybrid split +// Helper function to create MPSGraph-only handle for all non-ANE modes static swift::Optional createMPSGraphHandleIfNeeded( ComputeContext* context, const LoadedModel* loadedModel, bool requireExactNNLen, int maxBatchSize, + int gpuIdx, int serverThreadIdx ) { - // Use MPSGraph-only when: - // 1. FP32 mode (CoreML FP32 on CPU+ANE is slow), OR - // 2. Batch too small to split (hybrid requires minCoreML + minMPSGraph samples) - bool batchTooSmallForHybrid = maxBatchSize < MIN_COREML_BATCH + MIN_MPSGRAPH_BATCH; + (void)maxBatchSize; - if(context->useFP16Mode != enabled_t::False && !batchTooSmallForHybrid) { - // FP16 mode with sufficient batch - hybrid handle will be created instead + // Skip if this is an ANE thread - CoreML-only handle will be created instead + if(gpuIdx == METAL_MUX_ANE) { return swift::Optional::none(); } - // Log reason for MPSGraph-only mode - if(batchTooSmallForHybrid) { - cerr << "Metal backend " << serverThreadIdx << ": Batch size " << maxBatchSize - << " too small for hybrid split - using MPSGraph GPU-only" << endl; - } else { - cerr << "Metal backend " << serverThreadIdx << ": FP32 mode - using MPSGraph GPU-only (skipping CoreML converter)" << endl; - } + cerr << "Metal backend " << serverThreadIdx << ": GPU mode - using MPSGraph (GPU)" << endl; - // Convert model descriptor to Swift format for MPSGraph path - // Note: No CoreML conversion needed - MPSGraph reads weights directly SWModelDesc swModelDesc = MetalProcess::modelDescToSwift(&loadedModel->modelDesc); - - // Create MPSGraph-only handle (GPU only) return createMPSGraphOnlyHandle( swModelDesc, serverThreadIdx, @@ -567,15 +534,12 @@ ComputeHandle::ComputeHandle( int serverThreadIdx, bool requireExactNNLen, int maxBatchSize): -hybridHandle(createHybridHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)), -mpsGraphOnlyHandle(createMPSGraphHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx)) { - bool hasHybrid = static_cast(hybridHandle); +mpsGraphOnlyHandle(createMPSGraphHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, gpuIdx, serverThreadIdx)), +coremlOnlyHandle(createCoreMLOnlyHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, gpuIdx, serverThreadIdx)) { bool hasMPSGraph = static_cast(mpsGraphOnlyHandle); - if(hasHybrid && hasMPSGraph) { - throw runtime_error("Metal backend: Logic error - both hybridHandle and mpsGraphOnlyHandle are valid"); - } - if(!hasHybrid && !hasMPSGraph) { - throw runtime_error("Metal backend: Failed to create compute handle - both CoreML and MPSGraph initialization failed (check logs above for details)"); + bool hasCoreML = static_cast(coremlOnlyHandle); + if(hasMPSGraph + hasCoreML != 1) { + throw runtime_error("Metal backend: Logic error - expected exactly one compute handle, got " + to_string(hasMPSGraph + hasCoreML)); } const ModelDesc* modelDesc = &loadedModel->modelDesc; @@ -987,15 +951,13 @@ void MetalProcess::getMetalOutput( } // Dispatch to appropriate handle based on mode - if(gpuHandle->hybridHandle) { - // FP16 mode: Use hybrid execution (CoreML on CPU+ANE, MPSGraph on GPU) - // Mask buffer has correct stride (singleMaskElts = H*W per batch element) - // When requireExactNNLen is true, mask operations can be optimized (optimize_identity_mask) - gpuHandle->hybridHandle.get().apply( + if(gpuHandle->coremlOnlyHandle) { + // ANE mode: Use CoreML (CPU+ANE) + gpuHandle->coremlOnlyHandle.get().apply( inputBuffers->userInputBuffer, inputBuffers->userInputGlobalBuffer, inputBuffers->userInputMetaBuffer, - inputBuffers->userInputMaskBuffer, // Dedicated mask buffer with correct stride + inputBuffers->userInputMaskBuffer, inputBuffers->policyResults, inputBuffers->policyPassResults, inputBuffers->valueResults, @@ -1003,8 +965,7 @@ void MetalProcess::getMetalOutput( inputBuffers->ownershipResults, batchSize); } else if(gpuHandle->mpsGraphOnlyHandle) { - // FP32 mode: Use MPSGraph only (GPU-only) - // Mask is extracted internally from channel 0 of spatial input via strided reads + // GPU mode: Use MPSGraph (GPU) gpuHandle->mpsGraphOnlyHandle.get().apply( inputBuffers->userInputBuffer, inputBuffers->userInputGlobalBuffer, diff --git a/cpp/neuralnet/metalbackend.h b/cpp/neuralnet/metalbackend.h index 12cc6b0c0..e10b8547a 100644 --- a/cpp/neuralnet/metalbackend.h +++ b/cpp/neuralnet/metalbackend.h @@ -15,6 +15,12 @@ using namespace std; using namespace KataGoSwift; +// Backend mode constants for multiplexer architecture. +// When used as gpuIdx, these select a specific backend for that server thread. +// Default gpuIdx=-1 maps to 0 (GPU-only) in createComputeHandle. +static constexpr int METAL_MUX_GPU = 0; // MPSGraph-only (GPU) - default +static constexpr int METAL_MUX_ANE = 100; // CoreML-only (CPU+ANE) + namespace MetalProcess { void copyRowData(float* dest, const float* src, size_t numElements); @@ -168,16 +174,14 @@ struct ComputeHandle { bool requireExactNNLen; /** - * @brief The hybrid compute handle instance from Swift. - * This handle dispatches work to both CoreML (CPU+ANE) and MPSGraph (GPU). + * @brief The MPSGraph-only handle instance from Swift (GPU-only mode). */ - swift::Optional hybridHandle; + swift::Optional mpsGraphOnlyHandle; /** - * @brief The MPSGraph-only handle instance from Swift (used for FP32 mode). - * This handle dispatches work only to GPU, avoiding slow FP32 CPU+ANE execution. + * @brief The CoreML-only handle instance from Swift (ANE mode). */ - swift::Optional mpsGraphOnlyHandle; + swift::Optional coremlOnlyHandle; /** * @brief Construct a new ComputeHandle object. diff --git a/cpp/neuralnet/metalbackend.swift b/cpp/neuralnet/metalbackend.swift index 8f209d054..64b72bea6 100644 --- a/cpp/neuralnet/metalbackend.swift +++ b/cpp/neuralnet/metalbackend.swift @@ -394,144 +394,9 @@ public func createCoreMLComputeHandle( } } -/// Print available Core ML compute units +/// Print available Metal compute devices public func printMetalDevices() { - printError("Metal backend: Hybrid mode - CoreML (CPU+ANE) + MPSGraph (GPU)") -} - -// MARK: - Throughput Tracker for Adaptive Batch Sizing - -/// Tracks throughput for CoreML and MPSGraph paths to adaptively adjust batch split ratio. -/// -/// # Thread Safety -/// -/// This class is thread-safe by design without requiring explicit locks: -/// -/// 1. **Single-Owner Access**: Each server thread owns its own `ComputeHandle` → -/// `HybridComputeHandle` → `ThroughputTracker` instance. There is no sharing -/// of `ThroughputTracker` instances between server threads. -/// -/// 2. **Disjoint Field Access**: Within a single `HybridComputeHandle.apply()` call, -/// concurrent dispatch queues access disjoint fields: -/// - `coremlQueue.async` calls `updateCoreML()` → writes `coreMLSamplesPerSec`, `totalCoreMLSamples` -/// - `mpsGraphQueue.async` calls `updateMPSGraph()` → writes `mpsGraphSamplesPerSec`, `totalMPSGraphSamples` -/// -/// Both read `warmupComplete`, `stableAlpha`, and `warmupAlpha`, but these are either -/// `let` constants or only written sequentially after `group.wait()`. -/// -/// 3. **Sequential Barrier**: `group.wait()` in `apply()` ensures all concurrent throughput -/// updates complete before `recordBatch()`, `shouldLogAndMark()`, or `getDiagnosticStats()` -/// are called. These methods run sequentially on the calling thread. -/// -/// Because of these invariants, no locks are needed. Removing `NSLock` was intentional -/// as it was unnecessary overhead given the access patterns above. -public class ThroughputTracker { - private var coreMLSamplesPerSec: Double = 0.9 // Warm-start: initial ratio ~0.47 (closer to optimal ~0.45) - private var mpsGraphSamplesPerSec: Double = 1.0 - - // Diagnostic fields - private var batchCount: Int = 0 - private var totalCoreMLSamples: Int = 0 - private var totalMPSGraphSamples: Int = 0 - private var ratioHistory: [Float] = [] - private let maxHistorySize = 100 // Keep last 100 ratios for analysis - private var lastLogBatchCount: Int = 0 - private let logInterval: Int = 50 // Log every N batches - - // Adaptive alpha parameters - private var warmupComplete: Bool = false - private let warmupAlpha: Double = 0.25 // Faster adaptation during warmup - private let stableAlpha: Double = 0.10 // Slower adaptation after convergence - private let warmupBatches: Int = 100 // Min batches before checking warmup transition - private let warmupVarianceThreshold: Double = 0.005 // Variance threshold for warmup completion - - /// Update CoreML throughput measurement with adaptive alpha - public func updateCoreML(samples: Int, duration: TimeInterval) { - guard duration > 0, samples > 0 else { return } - let newRate = Double(samples) / duration - let effectiveAlpha = warmupComplete ? stableAlpha : warmupAlpha - coreMLSamplesPerSec = effectiveAlpha * newRate + (1 - effectiveAlpha) * coreMLSamplesPerSec - totalCoreMLSamples += samples - } - - /// Update MPSGraph throughput measurement with adaptive alpha - public func updateMPSGraph(samples: Int, duration: TimeInterval) { - guard duration > 0, samples > 0 else { return } - let newRate = Double(samples) / duration - let effectiveAlpha = warmupComplete ? stableAlpha : warmupAlpha - mpsGraphSamplesPerSec = effectiveAlpha * newRate + (1 - effectiveAlpha) * mpsGraphSamplesPerSec - totalMPSGraphSamples += samples - } - - /// Get optimal CoreML ratio (0.0 to 1.0) based on measured throughput - public func getOptimalCoreMLRatio() -> Float { - let total = coreMLSamplesPerSec + mpsGraphSamplesPerSec - return total > 0 ? Float(coreMLSamplesPerSec / total) : 0.5 - } - - /// Get current throughput stats for logging - public func getStats() -> (coreML: Double, mpsGraph: Double, ratio: Float) { - return (coreMLSamplesPerSec, mpsGraphSamplesPerSec, getOptimalCoreMLRatio()) - } - - /// Record a batch for diagnostics (call after each apply) - public func recordBatch(ratio: Float) { - batchCount += 1 - if ratioHistory.count >= maxHistorySize { - ratioHistory.removeFirst() - } - ratioHistory.append(ratio) - // Check warmup transition - if !warmupComplete && batchCount >= warmupBatches && computeRatioVariance() < Float(warmupVarianceThreshold) { - warmupComplete = true - } - } - - /// Check if logging should occur this batch, and if so, mark as logged - /// Returns true if logging should occur (atomically checks and marks) - public func shouldLogAndMark() -> Bool { - if batchCount - lastLogBatchCount >= logInterval { - lastLogBatchCount = batchCount - return true - } - return false - } - - /// Get diagnostic stats for logging - public func getDiagnosticStats() -> ( - batchCount: Int, - coreMLSamplesPerSec: Double, - mpsGraphSamplesPerSec: Double, - ratio: Float, - totalCoreMLSamples: Int, - totalMPSGraphSamples: Int, - ratioVariance: Float - ) { - return ( - batchCount, - coreMLSamplesPerSec, - mpsGraphSamplesPerSec, - getOptimalCoreMLRatio(), - totalCoreMLSamples, - totalMPSGraphSamples, - computeRatioVariance() - ) - } - - /// Compute variance of recent ratios - private func computeRatioVariance() -> Float { - guard ratioHistory.count >= 10 else { return 0.0 } - let recentRatios = Array(ratioHistory.suffix(20)) - let mean = recentRatios.reduce(0.0, +) / Float(recentRatios.count) - let variance = recentRatios.map { ($0 - mean) * ($0 - mean) }.reduce(0.0, +) / Float(recentRatios.count) - return variance - } - - /// Check if ratio has converged (variance < threshold) - public func hasConverged(threshold: Float = 0.001) -> Bool { - let variance = computeRatioVariance() - return ratioHistory.count >= 20 && variance < threshold - } + printError("Metal backend: GPU-only mode (MPSGraph)") } // MARK: - MPSGraph-based Model for GPU Inference @@ -802,227 +667,6 @@ public class MPSGraphModelHandle { } } -// MARK: - Hybrid Compute Handle - -/// Global flag to enable/disable diagnostic logging (set via environment variable) -private let diagnosticLoggingEnabled: Bool = { - if let envValue = ProcessInfo.processInfo.environment["KATAGO_HYBRID_DIAG"] { - return envValue.lowercased() == "1" || envValue.lowercased() == "true" - } - return false -}() - -/// Hybrid compute handle that dispatches to both CoreML (CPU+ANE) and MPSGraph (GPU) -public class HybridComputeHandle { - let coremlHandle: CoreMLComputeHandle - let mpsGraphHandle: MPSGraphModelHandle - let throughputTracker: ThroughputTracker - let coremlQueue: DispatchQueue - let mpsGraphQueue: DispatchQueue - let nnXLen: Int32 - let nnYLen: Int32 - let serverThreadIdx: Int - - public init( - coremlHandle: CoreMLComputeHandle, - mpsGraphHandle: MPSGraphModelHandle, - serverThreadIdx: Int = 0 - ) { - self.coremlHandle = coremlHandle - self.mpsGraphHandle = mpsGraphHandle - self.serverThreadIdx = serverThreadIdx - self.throughputTracker = ThroughputTracker() - self.coremlQueue = DispatchQueue(label: "com.katago.coreml", qos: .userInitiated) - self.mpsGraphQueue = DispatchQueue(label: "com.katago.mpsgraph", qos: .userInitiated) - self.nnXLen = coremlHandle.nnXLen - self.nnYLen = coremlHandle.nnYLen - } - - /// Run hybrid inference - splits batch between CoreML and MPSGraph - public func apply( - spatialInput: UnsafeMutablePointer, - globalInput: UnsafeMutablePointer, - metaInput: UnsafeMutablePointer, - maskInput: UnsafeMutablePointer, - policy: UnsafeMutablePointer, - policyPass: UnsafeMutablePointer, - value: UnsafeMutablePointer, - scoreValue: UnsafeMutablePointer, - ownership: UnsafeMutablePointer, - batchSize: Int - ) { - // Get optimal split ratio based on throughput - let ratio = throughputTracker.getOptimalCoreMLRatio() - // Prefer MPSGraph over CoreML for batch size 1, as MPSGraph is more stable - let coreMLBatchSize = max(0, min(batchSize - 1, Int(Float(batchSize) * ratio))) - let mpsGraphBatchSize = batchSize - coreMLBatchSize - - // Calculate buffer offsets - let spatialSize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numInputChannels - let globalSize = coremlHandle.numInputGlobalChannels - let metaSize = coremlHandle.numInputMetaChannels - let policySize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numPolicyChannels - let policyPassSize = coremlHandle.numPolicyChannels // Non-spatial pass output - let valueSize = coremlHandle.numValueChannels - let scoreValueSize = coremlHandle.numScoreValueChannels - let ownershipSize = Int(nnXLen) * Int(nnYLen) * coremlHandle.numOwnershipChannels - - #if DEBUG - // Verify batch split ensures non-overlapping buffer access - // CoreML writes [0, coreMLBatchSize), MPSGraph writes [coreMLBatchSize, batchSize) - assert(coreMLBatchSize >= 0 && mpsGraphBatchSize >= 0, "Batch sizes must be non-negative") - assert(coreMLBatchSize + mpsGraphBatchSize == batchSize, "Batch split must sum to total") - #endif - - let group = DispatchGroup() - - // CoreML path (CPU + ANE) - if coreMLBatchSize > 0 { - group.enter() - coremlQueue.async { [self] in - let start = CFAbsoluteTimeGetCurrent() - - autoreleasepool { - coremlHandle.apply( - spatialInput: spatialInput, - globalInput: globalInput, - metaInput: metaInput, - maskInput: maskInput, - policy: policy, - policyPass: policyPass, - value: value, - scoreValue: scoreValue, - ownership: ownership, - batchSize: coreMLBatchSize - ) - } - - let duration = CFAbsoluteTimeGetCurrent() - start - throughputTracker.updateCoreML(samples: coreMLBatchSize, duration: duration) - group.leave() - } - } - - // MPSGraph path (GPU) - if mpsGraphBatchSize > 0 { - group.enter() - mpsGraphQueue.async { [self] in - let start = CFAbsoluteTimeGetCurrent() - - // Offset pointers for MPSGraph batch portion - let spatialOffset = coreMLBatchSize * spatialSize - let globalOffset = coreMLBatchSize * globalSize - let metaOffset = coreMLBatchSize * metaSize - let policyOffset = coreMLBatchSize * policySize - let policyPassOffset = coreMLBatchSize * policyPassSize - let valueOffset = coreMLBatchSize * valueSize - let scoreValueOffset = coreMLBatchSize * scoreValueSize - let ownershipOffset = coreMLBatchSize * ownershipSize - - autoreleasepool { - mpsGraphHandle.apply( - input: spatialInput.advanced(by: spatialOffset), - inputGlobal: globalInput.advanced(by: globalOffset), - inputMeta: metaInput.advanced(by: metaOffset), - policy: policy.advanced(by: policyOffset), - policyPass: policyPass.advanced(by: policyPassOffset), - value: value.advanced(by: valueOffset), - scoreValue: scoreValue.advanced(by: scoreValueOffset), - ownership: ownership.advanced(by: ownershipOffset), - batchSize: mpsGraphBatchSize - ) - } - - let duration = CFAbsoluteTimeGetCurrent() - start - throughputTracker.updateMPSGraph(samples: mpsGraphBatchSize, duration: duration) - group.leave() - } - } - - // Wait for both paths to complete - group.wait() - - // Record batch for diagnostics - throughputTracker.recordBatch(ratio: ratio) - - // Periodic diagnostic logging - if diagnosticLoggingEnabled && throughputTracker.shouldLogAndMark() { - let stats = throughputTracker.getDiagnosticStats() - let converged = throughputTracker.hasConverged() - print(String(format: "[HybridDiag T%d] batch=%d ratio=%.3f coreml=%.1f/s mps=%.1f/s total=%d/%d var=%.5f conv=%@", - serverThreadIdx, - stats.batchCount, - stats.ratio, - stats.coreMLSamplesPerSec, - stats.mpsGraphSamplesPerSec, - stats.totalCoreMLSamples, - stats.totalMPSGraphSamples, - stats.ratioVariance, - converged ? "yes" : "no")) - } - } -} - -/// Create a hybrid compute handle -public func createHybridComputeHandle( - coremlModelPath: String, - modelDesc: SWModelDesc, - serverThreadIdx: Int, - requireExactNNLen: Bool, - numInputChannels: Int32, - numInputGlobalChannels: Int32, - numInputMetaChannels: Int32, - numPolicyChannels: Int32, - numValueChannels: Int32, - numScoreValueChannels: Int32, - numOwnershipChannels: Int32, - context: MetalComputeContext -) -> HybridComputeHandle? { - - // Create CoreML handle (CPU + ANE) - guard let coremlHandle = createCoreMLComputeHandle( - coremlModelPath: coremlModelPath, - serverThreadIdx: serverThreadIdx, - requireExactNNLen: requireExactNNLen, - numInputChannels: numInputChannels, - numInputGlobalChannels: numInputGlobalChannels, - numInputMetaChannels: numInputMetaChannels, - numPolicyChannels: numPolicyChannels, - numValueChannels: numValueChannels, - numScoreValueChannels: numScoreValueChannels, - numOwnershipChannels: numOwnershipChannels, - context: context - ) else { - printError("Metal backend \(serverThreadIdx): Failed to create CoreML handle") - return nil - } - - // Create MPSGraph handle (GPU) - guard let mpsGraphHandle = MPSGraphModelHandle( - modelDesc: modelDesc, - nnXLen: context.nnXLen, - nnYLen: context.nnYLen, - optimizeIdentityMask: requireExactNNLen - ) else { - printError("Metal backend \(serverThreadIdx): Failed to create MPSGraph handle") - printError("Metal backend \(serverThreadIdx): CoreML handle will be released") - return nil - } - - printError("Metal backend \(serverThreadIdx): Initialized CoreML (CPU+ANE) + MPSGraph (GPU)") - - // Log if diagnostic mode is enabled - if diagnosticLoggingEnabled { - printError("Metal backend \(serverThreadIdx): Diagnostic logging enabled (KATAGO_HYBRID_DIAG=1)") - } - - return HybridComputeHandle( - coremlHandle: coremlHandle, - mpsGraphHandle: mpsGraphHandle, - serverThreadIdx: serverThreadIdx - ) -} - /// Create a GPU-only compute handle using MPSGraph /// Used when useFP16=false to avoid slow FP32 CoreML execution on CPU+ANE public func createMPSGraphOnlyHandle( From ce6eb9b544538203492442a8345d15b4004bb7fa Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Mon, 23 Feb 2026 18:43:41 +0800 Subject: [PATCH 20/34] Add gpuIdx validation, FP32+ANE warning, and fix startup message Validate gpuIdx in createComputeHandle to warn on unrecognized values and default to GPU mode. Warn when ANE mode is used with FP32 since CoreML FP32 bypasses ANE and runs on CPU only. Fix printMetalDevices to accurately describe available modes after the multiplexer refactor. Co-Authored-By: Claude Opus 4.6 --- cpp/neuralnet/metalbackend.cpp | 12 ++++++++++++ cpp/neuralnet/metalbackend.swift | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cpp/neuralnet/metalbackend.cpp b/cpp/neuralnet/metalbackend.cpp index a35bc92ce..f81bcfac5 100644 --- a/cpp/neuralnet/metalbackend.cpp +++ b/cpp/neuralnet/metalbackend.cpp @@ -495,6 +495,12 @@ static swift::Optional createCoreMLOnlyHandleI return swift::Optional::none(); } + if(context->useFP16Mode == enabled_t::False) { + cerr << "Metal backend " << serverThreadIdx << ": Warning: ANE mode with FP32 - " + << "CoreML FP32 runs on CPU only (no ANE acceleration) and is significantly slower. " + << "Consider using GPU mode (gpuIdx=0) or enabling FP16." << endl; + } + cerr << "Metal backend " << serverThreadIdx << ": Mux ANE mode - using CoreML (CPU+ANE)" << endl; return convertAndCreateCoreMLOnlyHandle(context, loadedModel, requireExactNNLen, maxBatchSize, serverThreadIdx); } @@ -571,6 +577,12 @@ ComputeHandle* NeuralNet::createComputeHandle( (void)logger; int gpuIdx = (gpuIdxForThisThread == -1) ? 0 : gpuIdxForThisThread; + if(gpuIdx != METAL_MUX_GPU && gpuIdx != METAL_MUX_ANE) { + cerr << "Metal backend: Warning: Unrecognized gpuIdx=" << gpuIdx + << ", valid values are " << METAL_MUX_GPU << " (GPU) and " << METAL_MUX_ANE << " (ANE)" + << ". Defaulting to GPU mode." << endl; + gpuIdx = METAL_MUX_GPU; + } ComputeHandle* handle = nullptr; { diff --git a/cpp/neuralnet/metalbackend.swift b/cpp/neuralnet/metalbackend.swift index 64b72bea6..f5ab8e713 100644 --- a/cpp/neuralnet/metalbackend.swift +++ b/cpp/neuralnet/metalbackend.swift @@ -396,7 +396,7 @@ public func createCoreMLComputeHandle( /// Print available Metal compute devices public func printMetalDevices() { - printError("Metal backend: GPU-only mode (MPSGraph)") + printError("Metal backend: Available modes - GPU (MPSGraph), CPU+ANE (CoreML)") } // MARK: - MPSGraph-based Model for GPU Inference From 1f8daa81bfa17d2f9f3407b25e4a6a55e16d0ac0 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Mon, 23 Feb 2026 19:31:23 +0800 Subject: [PATCH 21/34] Fix outdated comments and add metalUseFP16 config documentation - Remove non-existent --sweep-backends benchmark reference - Update FP32+ANE warning to reference correct config keys (metalDeviceToUseThread, metalUseFP16) - Add metalUseFP16 setting to Metal sections in gtp and analysis configs - Remove outdated Swift doc comment on createMPSGraphOnlyHandle - Add clarifying comment for unused maxBatchSize parameter Co-Authored-By: Claude Opus 4.6 --- cpp/configs/analysis_example.cfg | 3 +++ cpp/configs/gtp_example.cfg | 4 +++- cpp/neuralnet/metalbackend.cpp | 4 ++-- cpp/neuralnet/metalbackend.swift | 1 - 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cpp/configs/analysis_example.cfg b/cpp/configs/analysis_example.cfg index 70d884520..a94c1537f 100644 --- a/cpp/configs/analysis_example.cfg +++ b/cpp/configs/analysis_example.cfg @@ -249,6 +249,9 @@ nnRandomize = true # # Default (no config): 1 server thread, GPU-only mode (gpuIdx = 0). +# FP16 precision (default true). Set to false for exact FP32 inference (slower). +# metalUseFP16 = true + # OpenCL-specific GPU settings-------------------------------------- # These only apply when using the OpenCL version of KataGo. diff --git a/cpp/configs/gtp_example.cfg b/cpp/configs/gtp_example.cfg index b3266416a..7247ed8b0 100644 --- a/cpp/configs/gtp_example.cfg +++ b/cpp/configs/gtp_example.cfg @@ -467,7 +467,6 @@ searchFactorWhenWinningThreshold = 0.95 # # Mux mode (recommended): 4 pipelined server threads (2x GPU + 2x ANE). # Set nnMaxBatchSize to half of numSearchThreads for optimal pipelining. -# Run "benchmark --sweep-backends" to find the best settings for your machine. # # Example: mux mode (best throughput) # numNNServerThreadsPerModel = 4 @@ -486,6 +485,9 @@ searchFactorWhenWinningThreshold = 0.95 # # Default (no config): 1 server thread, GPU-only mode (gpuIdx = 0). +# FP16 precision (default true). Set to false for exact FP32 inference (slower). +# metalUseFP16 = true + # ------------------------------ # OpenCL GPU settings # ------------------------------ diff --git a/cpp/neuralnet/metalbackend.cpp b/cpp/neuralnet/metalbackend.cpp index f81bcfac5..a86f3d2bd 100644 --- a/cpp/neuralnet/metalbackend.cpp +++ b/cpp/neuralnet/metalbackend.cpp @@ -498,7 +498,7 @@ static swift::Optional createCoreMLOnlyHandleI if(context->useFP16Mode == enabled_t::False) { cerr << "Metal backend " << serverThreadIdx << ": Warning: ANE mode with FP32 - " << "CoreML FP32 runs on CPU only (no ANE acceleration) and is significantly slower. " - << "Consider using GPU mode (gpuIdx=0) or enabling FP16." << endl; + << "Consider using GPU mode (metalDeviceToUseThread=0) or setting metalUseFP16=true." << endl; } cerr << "Metal backend " << serverThreadIdx << ": Mux ANE mode - using CoreML (CPU+ANE)" << endl; @@ -514,7 +514,7 @@ static swift::Optional createMPSGraphHandleIfN int gpuIdx, int serverThreadIdx ) { - (void)maxBatchSize; + (void)maxBatchSize; // MPSGraph handles dynamic batches internally // Skip if this is an ANE thread - CoreML-only handle will be created instead if(gpuIdx == METAL_MUX_ANE) { diff --git a/cpp/neuralnet/metalbackend.swift b/cpp/neuralnet/metalbackend.swift index f5ab8e713..c9de4a3f0 100644 --- a/cpp/neuralnet/metalbackend.swift +++ b/cpp/neuralnet/metalbackend.swift @@ -668,7 +668,6 @@ public class MPSGraphModelHandle { } /// Create a GPU-only compute handle using MPSGraph -/// Used when useFP16=false to avoid slow FP32 CoreML execution on CPU+ANE public func createMPSGraphOnlyHandle( modelDesc: SWModelDesc, serverThreadIdx: Int, From 8cfeae5a5f10a4bd59030d2bab540ac1e6fbf7c2 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Mon, 23 Feb 2026 20:14:21 +0800 Subject: [PATCH 22/34] Include gpuIdx in ComputeHandle error message for easier diagnosis Co-Authored-By: Claude Opus 4.6 --- cpp/neuralnet/metalbackend.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/neuralnet/metalbackend.cpp b/cpp/neuralnet/metalbackend.cpp index a86f3d2bd..f6ad163b7 100644 --- a/cpp/neuralnet/metalbackend.cpp +++ b/cpp/neuralnet/metalbackend.cpp @@ -545,7 +545,7 @@ coremlOnlyHandle(createCoreMLOnlyHandleIfNeeded(context, loadedModel, requireExa bool hasMPSGraph = static_cast(mpsGraphOnlyHandle); bool hasCoreML = static_cast(coremlOnlyHandle); if(hasMPSGraph + hasCoreML != 1) { - throw runtime_error("Metal backend: Logic error - expected exactly one compute handle, got " + to_string(hasMPSGraph + hasCoreML)); + throw runtime_error("Metal backend: Logic error - expected exactly one compute handle, got " + to_string(hasMPSGraph + hasCoreML) + " (gpuIdx=" + to_string(gpuIdx) + ")"); } const ModelDesc* modelDesc = &loadedModel->modelDesc; From 76283d99551f56a41f3bdaf2b891efa993a6627a Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Mon, 23 Feb 2026 20:27:41 +0800 Subject: [PATCH 23/34] Replace bool arithmetic with explicit XOR logic in ComputeHandle check Use `hasMPSGraph == hasCoreML` instead of `hasMPSGraph + hasCoreML != 1` to avoid implicit bool-to-int promotion. The error message now reports "both" or "neither" instead of a numeric count. Co-Authored-By: Claude Opus 4.6 --- cpp/neuralnet/metalbackend.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cpp/neuralnet/metalbackend.cpp b/cpp/neuralnet/metalbackend.cpp index f6ad163b7..e4510c505 100644 --- a/cpp/neuralnet/metalbackend.cpp +++ b/cpp/neuralnet/metalbackend.cpp @@ -544,8 +544,8 @@ mpsGraphOnlyHandle(createMPSGraphHandleIfNeeded(context, loadedModel, requireExa coremlOnlyHandle(createCoreMLOnlyHandleIfNeeded(context, loadedModel, requireExactNNLen, maxBatchSize, gpuIdx, serverThreadIdx)) { bool hasMPSGraph = static_cast(mpsGraphOnlyHandle); bool hasCoreML = static_cast(coremlOnlyHandle); - if(hasMPSGraph + hasCoreML != 1) { - throw runtime_error("Metal backend: Logic error - expected exactly one compute handle, got " + to_string(hasMPSGraph + hasCoreML) + " (gpuIdx=" + to_string(gpuIdx) + ")"); + if(hasMPSGraph == hasCoreML) { + throw runtime_error("Metal backend: Logic error - expected exactly one compute handle, got " + string(hasMPSGraph && hasCoreML ? "both" : "neither") + " (gpuIdx=" + to_string(gpuIdx) + ")"); } const ModelDesc* modelDesc = &loadedModel->modelDesc; From 18c46422317969d43a12950c986c2a8374a0a23c Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Thu, 26 Feb 2026 19:44:30 +0800 Subject: [PATCH 24/34] Refactor MPSGraph inference to use explicit command buffer Switch from graph.run() to graph.encode() with explicit command buffer management (commit/waitUntilCompleted). This enables GPU error checking via commandBuffer.error and lays groundwork for future pipelining. Also consolidates variable declarations and reduces verbosity in the apply() method for better readability. Co-Authored-By: Claude Opus 4.6 --- cpp/neuralnet/metalbackend.swift | 100 ++++++++++++++----------------- 1 file changed, 44 insertions(+), 56 deletions(-) diff --git a/cpp/neuralnet/metalbackend.swift b/cpp/neuralnet/metalbackend.swift index c9de4a3f0..e96f81ffd 100644 --- a/cpp/neuralnet/metalbackend.swift +++ b/cpp/neuralnet/metalbackend.swift @@ -553,7 +553,7 @@ public class MPSGraphModelHandle { printError("Metal backend: MPSGraph initialized on \(device.name)\(optimizeIdentityMask ? " (mask optimized)" : "")") } - /// Run inference on a batch using MPSGraph (GPU) + /// Run inference on a batch using MPSGraph (GPU). public func apply( input inputPointer: UnsafeMutablePointer, inputGlobal inputGlobalPointer: UnsafeMutablePointer, @@ -566,84 +566,63 @@ public class MPSGraphModelHandle { batchSize: Int ) { let channelAxis = InputShape.getChannelAxis() - let numInputChannels = input.shape[channelAxis] + let numInputChannelsNS = input.shape[channelAxis] + let numInputGlobalChannelsNS = inputGlobal.shape[channelAxis] + let numInputMetaChannelsNS = inputMeta.shape[channelAxis] let nnXLenNS = nnXLen as NSNumber let nnYLenNS = nnYLen as NSNumber + // Mask strides describe the source (input) memory layout for extracting channel 0. + var maskStrideArray = [ + MemoryLayout.size, + Int(nnXLen) * MemoryLayout.size, + Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, + numInputChannels * Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, + ] + + guard let mtlCommandBuffer = commandQueue.makeCommandBuffer() else { + fatalError("Metal backend: Failed to create command buffer") + } + let commandBuffer = MPSCommandBuffer(commandBuffer: mtlCommandBuffer) + + // Spatial input let inputShape = InputShape.create( batchSize: batchSize as NSNumber, - numChannels: numInputChannels, + numChannels: numInputChannelsNS, nnYLen: nnYLenNS, nnXLen: nnXLenNS) - - let inputDescriptor = MPSNDArrayDescriptor( - dataType: input.tensor.dataType, - shape: inputShape) - - let inputArray = MPSNDArray( - device: device, - descriptor: inputDescriptor) - + let inputDescriptor = MPSNDArrayDescriptor(dataType: input.tensor.dataType, shape: inputShape) + let inputArray = MPSNDArray(device: device, descriptor: inputDescriptor) inputArray.writeBytes(inputPointer) - let numInputGlobalChannels = inputGlobal.shape[channelAxis] - + // Global input let inputGlobalShape = InputShape.create( batchSize: batchSize as NSNumber, - numChannels: numInputGlobalChannels, + numChannels: numInputGlobalChannelsNS, nnYLen: 1, nnXLen: 1) - - let inputGlobalDescriptor = MPSNDArrayDescriptor( - dataType: inputGlobal.tensor.dataType, - shape: inputGlobalShape) - - let inputGlobalArray = MPSNDArray( - device: device, - descriptor: inputGlobalDescriptor) - + let inputGlobalDescriptor = MPSNDArrayDescriptor(dataType: inputGlobal.tensor.dataType, shape: inputGlobalShape) + let inputGlobalArray = MPSNDArray(device: device, descriptor: inputGlobalDescriptor) inputGlobalArray.writeBytes(inputGlobalPointer) - let numInputMetaChannels = inputMeta.shape[channelAxis] - + // Meta input let inputMetaShape = InputShape.create( batchSize: batchSize as NSNumber, - numChannels: numInputMetaChannels, + numChannels: numInputMetaChannelsNS, nnYLen: 1, nnXLen: 1) - - let inputMetaDescriptor = MPSNDArrayDescriptor( - dataType: inputMeta.tensor.dataType, - shape: inputMetaShape) - - let inputMetaArray = MPSNDArray( - device: device, - descriptor: inputMetaDescriptor) - + let inputMetaDescriptor = MPSNDArrayDescriptor(dataType: inputMeta.tensor.dataType, shape: inputMetaShape) + let inputMetaArray = MPSNDArray(device: device, descriptor: inputMetaDescriptor) inputMetaArray.writeBytes(inputMetaPointer) + // Mask (extracted from first channel of spatial input) let maskShape = InputShape.create( batchSize: batchSize as NSNumber, numChannels: 1, nnYLen: nnYLenNS, nnXLen: nnXLenNS) - - let maskDescriptor = MPSNDArrayDescriptor( - dataType: mask.tensor.dataType, - shape: maskShape) - - let maskArray = MPSNDArray( - device: device, - descriptor: maskDescriptor) - - // Extract mask from first channel of spatial input - var maskStrideArray = [ - MemoryLayout.size, - Int(nnXLen) * MemoryLayout.size, - Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, - numInputChannels.intValue * Int(nnYLen) * Int(nnXLen) * MemoryLayout.size, - ] - + let maskDescriptor = MPSNDArrayDescriptor(dataType: mask.tensor.dataType, shape: maskShape) + let maskArray = MPSNDArray(device: device, descriptor: maskDescriptor) maskArray.writeBytes(inputPointer, strideBytes: &maskStrideArray) let feeds = [ @@ -653,12 +632,21 @@ public class MPSGraphModelHandle { mask.tensor: MPSGraphTensorData(maskArray), ] - let fetch = graph.run( - with: commandQueue, + let fetch = graph.encode( + to: commandBuffer, feeds: feeds, targetTensors: targetTensors, - targetOperations: nil) + targetOperations: nil, + executionDescriptor: nil) + + commandBuffer.commit() + commandBuffer.waitUntilCompleted() + + if let error = commandBuffer.error { + fatalError("Metal backend: GPU error: \(error)") + } + // Copy results into output buffers fetch[policyHead.policyTensor]?.mpsndarray().readBytes(policy) fetch[policyHead.policyPassTensor]?.mpsndarray().readBytes(policyPass) fetch[valueHead.valueTensor]?.mpsndarray().readBytes(value) From 60760f15cf900ade9b93487da2f890c53ce98892 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Fri, 27 Feb 2026 08:42:51 +0800 Subject: [PATCH 25/34] Remove dead USE_COREML_BACKEND ifdef and fix gitignore comment The COREML backend was unified into METAL, but two leftover references remained: a dead #ifdef block in benchmark.cpp and an outdated comment in .gitignore. Co-Authored-By: Claude Opus 4.6 --- .gitignore | 2 +- cpp/command/benchmark.cpp | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index b509ec1ff..32b9d677b 100644 --- a/.gitignore +++ b/.gitignore @@ -91,6 +91,6 @@ cpp/build.ninja cpp/KataGoSwift.* cpp/include/KataGoSwift/KataGoSwift-swift.h -# For CoreML Backend +# For Metal Backend cpp/KataGoCoreML.* cpp/include/KataGoCoreML/KataGoCoreML-swift.h diff --git a/cpp/command/benchmark.cpp b/cpp/command/benchmark.cpp index b29b9325b..3100fb1b1 100644 --- a/cpp/command/benchmark.cpp +++ b/cpp/command/benchmark.cpp @@ -260,9 +260,6 @@ int MainCmds::benchmark(const vector& args) { #ifdef USE_METAL_BACKEND cout << "You are currently using the Metal version of KataGo." << endl; #endif -#ifdef USE_COREML_BACKEND - cout << "You are currently using the Core ML version of KataGo." << endl; -#endif #ifdef USE_OPENCL_BACKEND cout << "You are currently using the OpenCL version of KataGo." << endl; cout << "If you have a strong GPU capable of FP16 tensor cores (e.g. RTX2080), " From 308ad194c69675f3fc6c72a4004350ab7f05658e Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Fri, 27 Feb 2026 08:43:54 +0800 Subject: [PATCH 26/34] Adjusted the parameter alignment in the getMetalOutput function --- cpp/neuralnet/metalbackend.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cpp/neuralnet/metalbackend.h b/cpp/neuralnet/metalbackend.h index e10b8547a..0bf26f41b 100644 --- a/cpp/neuralnet/metalbackend.h +++ b/cpp/neuralnet/metalbackend.h @@ -53,10 +53,10 @@ void processRow(size_t row, vector& outputs); void getMetalOutput(ComputeHandle* gpuHandle, - InputBuffers* inputBuffers, - int numBatchEltsFilled, - NNResultBuf** inputBufs, - vector& outputs); + InputBuffers* inputBuffers, + int numBatchEltsFilled, + NNResultBuf** inputBufs, + vector& outputs); } /** From ea9c81fcaa41d51ba57a3a8c1407abded1fda0fe Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Fri, 27 Feb 2026 08:50:22 +0800 Subject: [PATCH 27/34] Remove COREML backend condition Updated the CMakeLists.txt to eliminate the COREML backend condition, retaining only the METAL backend check. Additionally, removed references to COREML backend files from .gitignore to reflect the recent unification of backends. --- .gitignore | 4 ---- cpp/CMakeLists.txt | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 32b9d677b..2e933d553 100644 --- a/.gitignore +++ b/.gitignore @@ -90,7 +90,3 @@ cpp/.ninja_log cpp/build.ninja cpp/KataGoSwift.* cpp/include/KataGoSwift/KataGoSwift-swift.h - -# For Metal Backend -cpp/KataGoCoreML.* -cpp/include/KataGoCoreML/KataGoCoreML-swift.h diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 4578732ae..860e26476 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.18.2) -if(USE_BACKEND STREQUAL "METAL" OR USE_BACKEND STREQUAL "COREML") +if(USE_BACKEND STREQUAL "METAL") project(katago LANGUAGES CXX Swift) else() project(katago) From e59bdf842980b433cd6de3ad1337c8b7ad0ed778 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Sun, 15 Mar 2026 20:26:25 +0800 Subject: [PATCH 28/34] Hard-fail on CoreML inference error in apply() Replace printError (which left output buffers potentially uninitialized) with fatalError, which terminates immediately with a clear message. C++ cannot catch Swift errors, so fatalError is the correct idiom for an unrecoverable error at a C++/Swift call boundary. --- cpp/neuralnet/metalbackend.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/neuralnet/metalbackend.swift b/cpp/neuralnet/metalbackend.swift index e96f81ffd..43e17fa56 100644 --- a/cpp/neuralnet/metalbackend.swift +++ b/cpp/neuralnet/metalbackend.swift @@ -122,7 +122,7 @@ public class CoreMLComputeHandle { ownership: ownership ) } catch { - printError("Metal backend: CoreML inference error: \(error)") + fatalError("Metal backend: CoreML inference error: \(error)") } } } From 8a352edc2ba6faf66c23ef255e13945438c3a384 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Sun, 15 Mar 2026 20:42:22 +0800 Subject: [PATCH 29/34] Fail loudly on unhandled activation kind in Metal backend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the silent default case in activationLayerDescToSwift that returned ActivationKind::identity() — masking unhandled activation types and producing silently wrong results — with a StringError throw. Co-Authored-By: Claude Sonnet 4.6 --- cpp/neuralnet/metalbackend.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/neuralnet/metalbackend.cpp b/cpp/neuralnet/metalbackend.cpp index e4510c505..ac061429c 100644 --- a/cpp/neuralnet/metalbackend.cpp +++ b/cpp/neuralnet/metalbackend.cpp @@ -129,7 +129,7 @@ ActivationKind activationLayerDescToSwift(const ActivationLayerDesc* desc) { case ACTIVATION_IDENTITY: return ActivationKind::identity(); default: - return ActivationKind::identity(); + throw StringError("Unhandled activation kind: " + std::to_string(desc->activation)); } } From b8bd04b6c08955b022e58d595ea4654f56f9f2fa Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Sat, 21 Mar 2026 22:42:37 +0800 Subject: [PATCH 30/34] Remove obsolete assertions in MatMulLayer --- cpp/neuralnet/metallayers.swift | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cpp/neuralnet/metallayers.swift b/cpp/neuralnet/metallayers.swift index 9e3327c32..da5441fed 100644 --- a/cpp/neuralnet/metallayers.swift +++ b/cpp/neuralnet/metallayers.swift @@ -620,11 +620,6 @@ struct MatMulLayer { descriptor: SWMatMulLayerDesc, sourceTensor: MPSGraphTensor ) { - assert( - (sourceTensor.shape?.count == 4) || (sourceTensor.shape?[1] == descriptor.inChannels)) - assert( - (sourceTensor.shape?.count == 2) || (sourceTensor.shape?[1] == descriptor.inChannels)) - let weightsShape = [ descriptor.inChannels, descriptor.outChannels, From 6c3df7afd47cb3af06f42a1fd5841ec61936b3ac Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Sat, 21 Mar 2026 22:47:20 +0800 Subject: [PATCH 31/34] Add assertion for dilation parameters in ConvLayer Introduce an assertion to ensure that dilationX and dilationY are set to 1 in the ConvLayer class, enhancing error detection during layer initialization. --- cpp/neuralnet/metallayers.swift | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cpp/neuralnet/metallayers.swift b/cpp/neuralnet/metallayers.swift index da5441fed..811fe0915 100644 --- a/cpp/neuralnet/metallayers.swift +++ b/cpp/neuralnet/metallayers.swift @@ -502,6 +502,8 @@ class ConvLayer { nnXLen: NSNumber, nnYLen: NSNumber ) { + assert(descriptor.dilationX == 1 && descriptor.dilationY == 1) + let weightsShape = [ descriptor.outChannels, descriptor.inChannels, From 2b8aa73faf327b4389c3d51c580d50a413047481 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Sun, 22 Mar 2026 16:09:41 +0800 Subject: [PATCH 32/34] Vendor katagocoreml-cpp library to make Metal backend self-contained Copy the katagocoreml-cpp source code into cpp/external/katagocoreml/ so that the Metal backend no longer requires an external Homebrew package. The library converts KataGo .bin.gz models to CoreML .mlpackage format at runtime. Replace `brew install katagocoreml` with `brew install protobuf abseil` as the new build-time dependencies. Use pkg-config (not CMake targets) for abseil/protobuf link flags to avoid swiftc-incompatible "-Wl,-framework" flags from abseil's CMake config. Co-Authored-By: Claude Opus 4.6 (1M context) --- .github/workflows/build.yml | 4 +- .gitignore | 1 + Compiling.md | 6 +- cpp/CMakeLists.txt | 9 +- cpp/external/katagocoreml/CMakeLists.txt | 134 + cpp/external/katagocoreml/LICENSE | 29 + cpp/external/katagocoreml/NOTICE | 106 + .../include/katagocoreml/KataGoConverter.hpp | 51 + .../include/katagocoreml/Options.hpp | 117 + .../include/katagocoreml/Version.hpp | 8 + cpp/external/katagocoreml/src/Converter.cpp | 106 + .../katagocoreml/src/builder/MILBuilder.cpp | 2126 ++ .../katagocoreml/src/builder/MILBuilder.hpp | 194 + .../katagocoreml/src/builder/Operations.cpp | 31 + .../katagocoreml/src/builder/Operations.hpp | 77 + .../katagocoreml/src/parser/KataGoParser.cpp | 573 + .../katagocoreml/src/parser/KataGoParser.hpp | 73 + .../src/serializer/CoreMLSerializer.cpp | 289 + .../src/serializer/CoreMLSerializer.hpp | 54 + .../src/serializer/WeightSerializer.cpp | 38 + .../src/serializer/WeightSerializer.hpp | 25 + .../katagocoreml/src/types/KataGoTypes.hpp | 297 + .../katagocoreml/vendor/deps/FP16/LICENSE | 11 + .../katagocoreml/vendor/deps/FP16/README.md | 20 + .../vendor/deps/FP16/include/fp16.h | 11 + .../vendor/deps/FP16/include/fp16/bitcasts.h | 92 + .../vendor/deps/FP16/include/fp16/fp16.h | 451 + .../vendor/deps/FP16/include/fp16/psimd.h | 131 + .../vendor/deps/nlohmann/CODE_OF_CONDUCT.md | 46 + .../vendor/deps/nlohmann/LICENSE.MIT | 21 + .../vendor/deps/nlohmann/README.md | 1643 + .../vendor/deps/nlohmann/json.hpp | 25855 ++++++++++++++++ .../katagocoreml/vendor/mlmodel/LICENSE.txt | 11 + .../format/ArrayFeatureExtractor.proto | 19 + .../mlmodel/format/AudioFeaturePrint.proto | 36 + .../format/BayesianProbitRegressor.proto | 139 + .../mlmodel/format/CategoricalMapping.proto | 38 + .../format/ClassConfidenceThresholding.proto | 41 + .../vendor/mlmodel/format/CustomModel.proto | 30 + .../mlmodel/format/DataStructures.proto | 126 + .../mlmodel/format/DictVectorizer.proto | 36 + .../vendor/mlmodel/format/FeatureTypes.proto | 233 + .../mlmodel/format/FeatureVectorizer.proto | 26 + .../vendor/mlmodel/format/GLMClassifier.proto | 43 + .../vendor/mlmodel/format/GLMRegressor.proto | 28 + .../vendor/mlmodel/format/Gazetteer.proto | 43 + .../vendor/mlmodel/format/Identity.proto | 18 + .../vendor/mlmodel/format/Imputer.proto | 43 + .../format/ItemSimilarityRecommender.proto | 74 + .../vendor/mlmodel/format/LICENSE.txt | 11 + .../vendor/mlmodel/format/LinkedModel.proto | 40 + .../vendor/mlmodel/format/MIL.proto | 371 + .../vendor/mlmodel/format/Model.proto | 415 + .../mlmodel/format/NearestNeighbors.proto | 132 + .../vendor/mlmodel/format/NeuralNetwork.proto | 6531 ++++ .../format/NonMaximumSuppression.proto | 187 + .../vendor/mlmodel/format/Normalizer.proto | 38 + .../vendor/mlmodel/format/OneHotEncoder.proto | 41 + .../vendor/mlmodel/format/Parameters.proto | 52 + .../vendor/mlmodel/format/SVM.proto | 195 + .../vendor/mlmodel/format/Scaler.proto | 34 + .../format/SoundAnalysisPreprocessing.proto | 60 + .../mlmodel/format/TextClassifier.proto | 43 + .../vendor/mlmodel/format/TreeEnsemble.proto | 161 + .../mlmodel/format/VisionFeaturePrint.proto | 67 + .../vendor/mlmodel/format/WordEmbedding.proto | 35 + .../vendor/mlmodel/format/WordTagger.proto | 75 + .../vendor/mlmodel/src/MILBlob/Bf16.hpp | 57 + .../mlmodel/src/MILBlob/Blob/BlobDataType.hpp | 131 + .../mlmodel/src/MILBlob/Blob/FileWriter.cpp | 94 + .../mlmodel/src/MILBlob/Blob/FileWriter.hpp | 63 + .../src/MILBlob/Blob/MMapFileReader.cpp | 62 + .../src/MILBlob/Blob/MMapFileReader.hpp | 67 + .../MILBlob/Blob/MMapFileReaderFactory.cpp | 16 + .../MILBlob/Blob/MMapFileReaderFactory.hpp | 19 + .../src/MILBlob/Blob/StorageFormat.hpp | 92 + .../src/MILBlob/Blob/StorageReader.cpp | 309 + .../src/MILBlob/Blob/StorageReader.hpp | 137 + .../src/MILBlob/Blob/StorageWriter.cpp | 234 + .../src/MILBlob/Blob/StorageWriter.hpp | 88 + .../vendor/mlmodel/src/MILBlob/Fp16.cpp | 31 + .../vendor/mlmodel/src/MILBlob/Fp16.hpp | 53 + .../vendor/mlmodel/src/MILBlob/Fp8.cpp | 189 + .../vendor/mlmodel/src/MILBlob/Fp8.hpp | 107 + .../mlmodel/src/MILBlob/SubByteTypeList.hpp | 13 + .../mlmodel/src/MILBlob/SubByteTypes.cpp | 209 + .../mlmodel/src/MILBlob/SubByteTypes.hpp | 134 + .../vendor/mlmodel/src/MILBlob/Util/Span.hpp | 674 + .../mlmodel/src/MILBlob/Util/SpanCast.hpp | 65 + .../MILBlob/Util/SubByteConversionUtils.hpp | 41 + .../mlmodel/src/MILBlob/Util/Verify.hpp | 31 + .../vendor/modelpackage/LICENSE.txt | 11 + .../vendor/modelpackage/src/ModelPackage.cpp | 603 + .../vendor/modelpackage/src/ModelPackage.hpp | 160 + .../vendor/modelpackage/src/utils/JsonMap.cpp | 171 + .../vendor/modelpackage/src/utils/JsonMap.hpp | 52 + 96 files changed, 46000 insertions(+), 14 deletions(-) create mode 100644 cpp/external/katagocoreml/CMakeLists.txt create mode 100644 cpp/external/katagocoreml/LICENSE create mode 100644 cpp/external/katagocoreml/NOTICE create mode 100644 cpp/external/katagocoreml/include/katagocoreml/KataGoConverter.hpp create mode 100644 cpp/external/katagocoreml/include/katagocoreml/Options.hpp create mode 100644 cpp/external/katagocoreml/include/katagocoreml/Version.hpp create mode 100644 cpp/external/katagocoreml/src/Converter.cpp create mode 100644 cpp/external/katagocoreml/src/builder/MILBuilder.cpp create mode 100644 cpp/external/katagocoreml/src/builder/MILBuilder.hpp create mode 100644 cpp/external/katagocoreml/src/builder/Operations.cpp create mode 100644 cpp/external/katagocoreml/src/builder/Operations.hpp create mode 100644 cpp/external/katagocoreml/src/parser/KataGoParser.cpp create mode 100644 cpp/external/katagocoreml/src/parser/KataGoParser.hpp create mode 100644 cpp/external/katagocoreml/src/serializer/CoreMLSerializer.cpp create mode 100644 cpp/external/katagocoreml/src/serializer/CoreMLSerializer.hpp create mode 100644 cpp/external/katagocoreml/src/serializer/WeightSerializer.cpp create mode 100644 cpp/external/katagocoreml/src/serializer/WeightSerializer.hpp create mode 100644 cpp/external/katagocoreml/src/types/KataGoTypes.hpp create mode 100644 cpp/external/katagocoreml/vendor/deps/FP16/LICENSE create mode 100644 cpp/external/katagocoreml/vendor/deps/FP16/README.md create mode 100644 cpp/external/katagocoreml/vendor/deps/FP16/include/fp16.h create mode 100644 cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/bitcasts.h create mode 100644 cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/fp16.h create mode 100644 cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/psimd.h create mode 100644 cpp/external/katagocoreml/vendor/deps/nlohmann/CODE_OF_CONDUCT.md create mode 100644 cpp/external/katagocoreml/vendor/deps/nlohmann/LICENSE.MIT create mode 100644 cpp/external/katagocoreml/vendor/deps/nlohmann/README.md create mode 100644 cpp/external/katagocoreml/vendor/deps/nlohmann/json.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/LICENSE.txt create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/ArrayFeatureExtractor.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/AudioFeaturePrint.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/BayesianProbitRegressor.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/CategoricalMapping.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/ClassConfidenceThresholding.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/CustomModel.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/DataStructures.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/DictVectorizer.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/FeatureTypes.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/FeatureVectorizer.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/GLMClassifier.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/GLMRegressor.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/Gazetteer.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/Identity.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/Imputer.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/ItemSimilarityRecommender.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/LICENSE.txt create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/LinkedModel.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/MIL.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/Model.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/NearestNeighbors.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/NeuralNetwork.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/NonMaximumSuppression.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/Normalizer.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/OneHotEncoder.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/Parameters.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/SVM.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/Scaler.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/SoundAnalysisPreprocessing.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/TextClassifier.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/TreeEnsemble.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/VisionFeaturePrint.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/WordEmbedding.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/format/WordTagger.proto create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Bf16.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/BlobDataType.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/FileWriter.cpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/FileWriter.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReader.cpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReader.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReaderFactory.cpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReaderFactory.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageFormat.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageReader.cpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageReader.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageWriter.cpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageWriter.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp16.cpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp16.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp8.cpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp8.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypeList.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypes.cpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypes.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/Span.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/SpanCast.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/SubByteConversionUtils.hpp create mode 100644 cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/Verify.hpp create mode 100644 cpp/external/katagocoreml/vendor/modelpackage/LICENSE.txt create mode 100644 cpp/external/katagocoreml/vendor/modelpackage/src/ModelPackage.cpp create mode 100644 cpp/external/katagocoreml/vendor/modelpackage/src/ModelPackage.hpp create mode 100644 cpp/external/katagocoreml/vendor/modelpackage/src/utils/JsonMap.cpp create mode 100644 cpp/external/katagocoreml/vendor/modelpackage/src/utils/JsonMap.hpp diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 97184fc43..f3e44fd1e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -108,9 +108,7 @@ jobs: - name: Install dependencies run: | - brew install ninja zlib libzip - brew tap chinchangyang/katagocoreml-cpp - brew install katagocoreml + brew install ninja zlib libzip protobuf abseil - name: Cache CMake build uses: actions/cache@v4 diff --git a/.gitignore b/.gitignore index 2e933d553..5e8ac9f95 100644 --- a/.gitignore +++ b/.gitignore @@ -90,3 +90,4 @@ cpp/.ninja_log cpp/build.ninja cpp/KataGoSwift.* cpp/include/KataGoSwift/KataGoSwift-swift.h +cpp/external/katagocoreml/proto/ diff --git a/Compiling.md b/Compiling.md index acc1b3cbd..a03302281 100644 --- a/Compiling.md +++ b/Compiling.md @@ -120,10 +120,6 @@ As also mentioned in the instructions below but repeated here for visibility, if ## MacOS * TLDR (Metal backend - recommended for most users, hybrid CPU+GPU+Neural Engine for maximum throughput): ``` - # First, install the katagocoreml library via Homebrew - brew tap chinchangyang/katagocoreml-cpp - brew install katagocoreml - git clone https://github.com/lightvector/KataGo.git cd KataGo/cpp # If you get missing library errors, install the appropriate packages using your system package manager and try again. @@ -136,7 +132,7 @@ As also mentioned in the instructions below but repeated here for visibility, if * CMake with a minimum version of 3.18.2: `brew install cmake`. * AppleClang and Swift compilers: `xcode-select --install`. * If using the Metal backend, [Ninja](https://ninja-build.org): `brew install ninja` - * If using the Metal backend, katagocoreml library: `brew tap chinchangyang/katagocoreml-cpp && brew install katagocoreml` + * If using the Metal backend, protobuf and abseil: `brew install protobuf abseil` * libzip: `brew install libzip`. * If you want to do self-play training and research, probably Google perftools `brew install gperftools` for TCMalloc or some other better malloc implementation. For unknown reasons, the allocation pattern in self-play with large numbers of threads and parallel games causes a lot of memory fragmentation under glibc malloc that will eventually run your machine out of memory, but better mallocs handle it fine. * If compiling to contribute to public distributed training runs, OpenSSL is required (`brew install openssl`). diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 860e26476..c11628c33 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -107,8 +107,7 @@ elseif(USE_BACKEND STREQUAL "METAL") if(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") message(FATAL_ERROR "Project requires building with AppleClang. Have ${CMAKE_CXX_COMPILER_ID}") endif() - find_package(PkgConfig REQUIRED) - pkg_check_modules(KATAGOCOREML REQUIRED katagocoreml) + add_subdirectory(external/katagocoreml) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/external/macos/cmake/modules") include(InitializeSwift) include(AddSwift) @@ -401,10 +400,8 @@ elseif(USE_BACKEND STREQUAL "TENSORRT") target_link_libraries(katago CUDA::cudart_static ${TENSORRT_LIBRARY}) elseif(USE_BACKEND STREQUAL "METAL") target_compile_definitions(katago PRIVATE USE_METAL_BACKEND) - target_include_directories(katago PRIVATE ${KATAGOCOREML_INCLUDE_DIRS}) - find_library(KATAGOCOREML_LIB katagocoreml HINTS /usr/local/lib REQUIRED) - target_link_directories(katago PRIVATE ${KATAGOCOREML_LIBRARY_DIRS}) - target_link_libraries(katago KataGoSwift ${KATAGOCOREML_LIB} ${KATAGOCOREML_LDFLAGS} + target_link_libraries(katago KataGoSwift katagocoreml + ${KATAGOCOREML_DEP_LDFLAGS} "-framework MetalPerformanceShaders" "-framework MetalPerformanceShadersGraph") if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64") diff --git a/cpp/external/katagocoreml/CMakeLists.txt b/cpp/external/katagocoreml/CMakeLists.txt new file mode 100644 index 000000000..e0ef5a0c2 --- /dev/null +++ b/cpp/external/katagocoreml/CMakeLists.txt @@ -0,0 +1,134 @@ +# katagocoreml - KataGo to Core ML Converter (vendored) +# Simplified build for use as a subdirectory of KataGo. +# +# Note: We deliberately avoid linking against CMake's abseil/protobuf targets +# (find_package(absl)) because their INTERFACE_LINK_LIBRARIES propagate +# "-Wl,-framework,CoreFoundation" to the final executable, which swiftc +# (used as the linker for the katago Swift/C++ hybrid) does not understand. +# Instead, we use pkg-config for include/link flags, which produces +# swiftc-compatible flags like "-framework CoreFoundation". + +# ============================================================================ +# External Dependencies +# ============================================================================ + +find_package(ZLIB REQUIRED) +find_package(Protobuf REQUIRED) # Needed for protoc executable and include dirs +find_package(PkgConfig REQUIRED) +pkg_check_modules(KATAGOCOREML_ABSEIL REQUIRED + absl_base absl_log absl_log_internal_check_op absl_log_internal_message + absl_hash absl_strings absl_status absl_statusor +) + +# Export link flags to parent scope for the final executable +pkg_check_modules(KATAGOCOREML_ALL_DEPS REQUIRED + protobuf + absl_base absl_log absl_log_internal_check_op absl_log_internal_message + absl_hash absl_strings absl_status absl_statusor +) +set(KATAGOCOREML_DEP_LDFLAGS ${KATAGOCOREML_ALL_DEPS_LDFLAGS} PARENT_SCOPE) + +# ============================================================================ +# Proto Files (compile from source) +# ============================================================================ + +set(COREMLTOOLS_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/vendor") +set(PROTO_DIR "${COREMLTOOLS_ROOT}/mlmodel/format") +set(PROTO_GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/proto") +file(MAKE_DIRECTORY ${PROTO_GENERATED_DIR}) + +# Get all proto files +file(GLOB PROTO_FILES "${PROTO_DIR}/*.proto") + +# Generate C++ from all proto files +set(PROTO_SRCS) +set(PROTO_HDRS) + +foreach(PROTO_FILE ${PROTO_FILES}) + get_filename_component(PROTO_NAME ${PROTO_FILE} NAME_WE) + set(PROTO_SRC "${PROTO_GENERATED_DIR}/${PROTO_NAME}.pb.cc") + set(PROTO_HDR "${PROTO_GENERATED_DIR}/${PROTO_NAME}.pb.h") + list(APPEND PROTO_SRCS ${PROTO_SRC}) + list(APPEND PROTO_HDRS ${PROTO_HDR}) + + add_custom_command( + OUTPUT ${PROTO_SRC} ${PROTO_HDR} + COMMAND ${Protobuf_PROTOC_EXECUTABLE} + ARGS --cpp_out=${PROTO_GENERATED_DIR} + -I${PROTO_DIR} + ${PROTO_FILE} + DEPENDS ${PROTO_FILE} + COMMENT "Generating C++ from ${PROTO_NAME}.proto" + VERBATIM + ) +endforeach() + +# ============================================================================ +# MILBlob Sources (vendored from coremltools) +# ============================================================================ + +set(MILBLOB_DIR "${COREMLTOOLS_ROOT}/mlmodel/src/MILBlob") +set(MILBLOB_SRCS + "${MILBLOB_DIR}/Blob/FileWriter.cpp" + "${MILBLOB_DIR}/Blob/StorageWriter.cpp" + "${MILBLOB_DIR}/Blob/StorageReader.cpp" + "${MILBLOB_DIR}/Blob/MMapFileReader.cpp" + "${MILBLOB_DIR}/Blob/MMapFileReaderFactory.cpp" + "${MILBLOB_DIR}/SubByteTypes.cpp" + "${MILBLOB_DIR}/Fp8.cpp" + "${MILBLOB_DIR}/Fp16.cpp" +) + +# ============================================================================ +# ModelPackage Sources (vendored from coremltools) +# ============================================================================ + +set(MODELPACKAGE_DIR "${COREMLTOOLS_ROOT}/modelpackage/src") +set(MODELPACKAGE_SRCS + "${MODELPACKAGE_DIR}/ModelPackage.cpp" + "${MODELPACKAGE_DIR}/utils/JsonMap.cpp" +) + +# ============================================================================ +# KataGoCoreML Library Sources +# ============================================================================ + +set(KATAGOCOREML_SRCS + src/parser/KataGoParser.cpp + src/builder/MILBuilder.cpp + src/builder/Operations.cpp + src/serializer/CoreMLSerializer.cpp + src/serializer/WeightSerializer.cpp + src/Converter.cpp +) + +# ============================================================================ +# Library Target +# ============================================================================ + +add_library(katagocoreml STATIC + ${KATAGOCOREML_SRCS} + ${PROTO_SRCS} + ${MILBLOB_SRCS} + ${MODELPACKAGE_SRCS} +) + +target_include_directories(katagocoreml + PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/src + ${PROTO_GENERATED_DIR} + ${MILBLOB_DIR}/.. + ${MODELPACKAGE_DIR} + ${COREMLTOOLS_ROOT}/deps/nlohmann + ${COREMLTOOLS_ROOT}/deps/FP16/include + ${Protobuf_INCLUDE_DIRS} + ${KATAGOCOREML_ABSEIL_INCLUDE_DIRS} +) + +# Only link ZLIB as a CMake target (no swiftc-incompatible flags). +# Protobuf and abseil are linked via pkg-config LDFLAGS in the parent. +target_link_libraries(katagocoreml PRIVATE ZLIB::ZLIB) + +target_compile_definitions(katagocoreml PRIVATE APPLE_BUILD=1) diff --git a/cpp/external/katagocoreml/LICENSE b/cpp/external/katagocoreml/LICENSE new file mode 100644 index 000000000..747217958 --- /dev/null +++ b/cpp/external/katagocoreml/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2025, Chin-Chang Yang +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cpp/external/katagocoreml/NOTICE b/cpp/external/katagocoreml/NOTICE new file mode 100644 index 000000000..4fcb99679 --- /dev/null +++ b/cpp/external/katagocoreml/NOTICE @@ -0,0 +1,106 @@ +katagocoreml-cpp +Copyright (c) 2025, Chin-Chang Yang + +This project includes third-party software components with their own licenses: + +================================================================================ +Core ML Proto Definitions, MILBlob, and ModelPackage +From: Apple coremltools (https://github.com/apple/coremltools) +License: BSD-3-Clause +================================================================================ + +Copyright (c) 2017-2022, Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder(s) nor the names of any contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================================ +KataGo Neural Network Models (tests/models/) +================================================================================ + +**g170 series models (g170-b6c96, g170e-b10c128):** +From: KataGo project (https://github.com/lightvector/KataGo) +License: CC0 (Public Domain) +These are from the oldest KataGo training runs and are released into the public +domain. No restrictions on use. + +**b5c192nbt-distilled model:** +Copyright (c) 2025, Chin-Chang Yang +License: BSD-3-Clause (same as this project) +This is a custom-trained model included for testing purposes. + +================================================================================ +nlohmann/json +From: https://github.com/nlohmann/json +License: MIT +================================================================================ + +Copyright (c) 2013-2021 Niels Lohmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================================ +FP16 +From: https://github.com/Maratyszcza/FP16 +License: MIT +================================================================================ + +Copyright (c) 2017 Facebook Inc. +Copyright (c) 2017 Georgia Institute of Technology +Copyright 2019 Google LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/cpp/external/katagocoreml/include/katagocoreml/KataGoConverter.hpp b/cpp/external/katagocoreml/include/katagocoreml/KataGoConverter.hpp new file mode 100644 index 000000000..ca89f3323 --- /dev/null +++ b/cpp/external/katagocoreml/include/katagocoreml/KataGoConverter.hpp @@ -0,0 +1,51 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#pragma once + +#include "katagocoreml/Options.hpp" +#include + +namespace katagocoreml { + +/// Main converter class for KataGo to Core ML conversion +class KataGoConverter { +public: + /// Supported KataGo model versions + static constexpr int MIN_SUPPORTED_VERSION = 8; + static constexpr int MAX_SUPPORTED_VERSION = 16; + + /// Convert KataGo model file to Core ML mlpackage + /// + /// @param input_path Path to .bin or .bin.gz KataGo model file + /// @param output_path Path for output .mlpackage directory + /// @param options Conversion options + /// @throws std::runtime_error on conversion failure + static void convert( + const std::string& input_path, + const std::string& output_path, + const ConversionOptions& options = ConversionOptions{} + ); + + /// Get model information without full conversion + /// + /// @param input_path Path to .bin or .bin.gz KataGo model file + /// @return ModelInfo structure with model metadata + /// @throws std::runtime_error if file cannot be parsed + static ModelInfo getModelInfo(const std::string& input_path); + + /// Check if a model version is supported + /// + /// @param version KataGo model version number + /// @return true if version is supported + static bool isVersionSupported(int version) { + return version >= MIN_SUPPORTED_VERSION && version <= MAX_SUPPORTED_VERSION; + } + + /// Get library version string + static std::string getVersion() { + return "1.1.0"; + } +}; + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/include/katagocoreml/Options.hpp b/cpp/external/katagocoreml/include/katagocoreml/Options.hpp new file mode 100644 index 000000000..11cfdb5e2 --- /dev/null +++ b/cpp/external/katagocoreml/include/katagocoreml/Options.hpp @@ -0,0 +1,117 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#pragma once + +#include + +namespace katagocoreml { + +/// Conversion options for KataGo to Core ML conversion +struct ConversionOptions { + /// Board width (default: 19) + int board_x_size = 19; + + /// Board height (default: 19) + int board_y_size = 19; + + /// Optimize for full board (skip mask operations) + /// Provides ~6.5% inference speedup but requires all positions valid + bool optimize_identity_mask = false; + + /// Compute precision: "FLOAT32" or "FLOAT16" + std::string compute_precision = "FLOAT32"; + + /// Use FLOAT16 for model inputs/outputs (instead of FLOAT32) + /// Only effective when compute_precision="FLOAT16" + /// When true with compute_precision="FLOAT16", creates pure FP16 model + /// When false (default), uses FP32 I/O with FP16 internal computation + /// Has no effect when compute_precision="FLOAT32" + bool use_fp16_io = false; + + /// Core ML specification version (default: 6 for iOS 15+) + int specification_version = 6; + + /// KataGo model version (set internally during conversion) + int model_version = 0; + + /// Metadata encoder version (0 = no encoder, >0 = has encoder) + int meta_encoder_version = 0; + + /// Number of metadata input channels (192 for human SL networks) + int num_input_meta_channels = 0; + + /// Number of spatial input channels (set internally, typically 22) + int num_input_channels = 0; + + /// Number of global input channels (set internally, typically 19) + int num_input_global_channels = 0; + + /// Minimum batch size for inference (must be >= 1) + /// Default: 1 (single sample inference) + int min_batch_size = 1; + + /// Maximum batch size for inference + /// If equal to min_batch_size, uses fixed batch size + /// If greater than min_batch_size, enables dynamic batch support + /// If <= 0, allows unlimited batch size (unbounded) + /// Default: 1 (fixed single batch, backward compatible) + int max_batch_size = 1; + + /// Author name (who ran the converter) - optional, set via CLI + std::string author; + + /// License for the model - optional, set via CLI + /// Typical values: "MIT", "CC0", "BSD-3-Clause" + std::string license; + + /// Source KataGo model filename (set internally) + std::string source_filename; + + /// Number of residual blocks (set internally) + int num_blocks = 0; + + /// Trunk channel width (set internally) + int trunk_channels = 0; + + /// Model name from KataGo binary (set internally) + std::string model_name; + + /// Check if dynamic batch is enabled + /// Dynamic batch allows variable batch sizes at runtime + bool isDynamicBatch() const { + return min_batch_size != max_batch_size || max_batch_size <= 0; + } +}; + +/// Information about a KataGo model (without full conversion) +struct ModelInfo { + /// Model name from file header + std::string name; + + /// KataGo model version (8-16) + int version = 0; + + /// Number of spatial input channels (typically 22) + int num_input_channels = 0; + + /// Number of global input channels (typically 19) + int num_input_global_channels = 0; + + /// Number of residual blocks + int num_blocks = 0; + + /// Trunk channel width + int trunk_channels = 0; + + /// Whether model has SGF metadata encoder (human SL networks) + bool has_metadata_encoder = false; + + /// Number of policy output channels (1, 2, or 4 depending on version) + int num_policy_channels = 0; + + /// Number of score value channels (4 or 6 depending on version) + int num_score_value_channels = 0; +}; + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/include/katagocoreml/Version.hpp b/cpp/external/katagocoreml/include/katagocoreml/Version.hpp new file mode 100644 index 000000000..9cb663283 --- /dev/null +++ b/cpp/external/katagocoreml/include/katagocoreml/Version.hpp @@ -0,0 +1,8 @@ +#pragma once + +namespace katagocoreml { +constexpr const char* VERSION = "1.1.0"; +constexpr int VERSION_MAJOR = 1; +constexpr int VERSION_MINOR = 1; +constexpr int VERSION_PATCH = 0; +} diff --git a/cpp/external/katagocoreml/src/Converter.cpp b/cpp/external/katagocoreml/src/Converter.cpp new file mode 100644 index 000000000..cb6ca80d9 --- /dev/null +++ b/cpp/external/katagocoreml/src/Converter.cpp @@ -0,0 +1,106 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#include "katagocoreml/KataGoConverter.hpp" +#include "parser/KataGoParser.hpp" +#include "builder/MILBuilder.hpp" +#include "serializer/CoreMLSerializer.hpp" +#include +#include + +namespace katagocoreml { + +void KataGoConverter::convert(const std::string& input_path, + const std::string& output_path, + const ConversionOptions& options) { + // Validate board sizes + if (options.board_x_size < 2 || options.board_x_size > 37) { + throw std::invalid_argument("board_x_size must be in range [2, 37]"); + } + if (options.board_y_size < 2 || options.board_y_size > 37) { + throw std::invalid_argument("board_y_size must be in range [2, 37]"); + } + + // Validate batch sizes + if (options.min_batch_size < 1) { + throw std::invalid_argument("min_batch_size must be at least 1"); + } + if (options.max_batch_size > 0 && options.max_batch_size < options.min_batch_size) { + throw std::invalid_argument("max_batch_size must be >= min_batch_size or <= 0 for unlimited"); + } + + // Parse KataGo model + KataGoParser parser(input_path); + KataGoModelDesc model = parser.parse(); + + // Determine if using FP16 precision + bool use_fp16 = (options.compute_precision == "FLOAT16"); + + // Validate configuration: use_fp16_io requires FP16 compute + if (options.use_fp16_io && !use_fp16) { + throw std::invalid_argument("use_fp16_io requires compute_precision=\"FLOAT16\""); + } + + // Build MIL program + MILBuilder builder(model, + options.board_x_size, + options.board_y_size, + options.optimize_identity_mask, + use_fp16, + options.min_batch_size, + options.max_batch_size, + options.use_fp16_io); + auto program = builder.build(); + + // Get weights from builder + auto weights = builder.getWeights(); + std::vector weights_copy(weights.begin(), weights.end()); + + // Update options with model metadata for serialization + ConversionOptions final_options = options; + final_options.model_version = model.model_version; + final_options.meta_encoder_version = model.meta_encoder_version; + final_options.num_input_meta_channels = model.num_input_meta_channels; + final_options.num_input_channels = model.num_input_channels; + final_options.num_input_global_channels = model.num_input_global_channels; + + // Add model architecture info for metadata + final_options.num_blocks = model.trunk.num_blocks; + final_options.trunk_channels = model.trunk.trunk_num_channels; + final_options.model_name = model.name; + + // Extract filename from input path + if (final_options.source_filename.empty()) { + std::filesystem::path p(input_path); + final_options.source_filename = p.filename().string(); + } + + // FLOAT16 I/O requires specification version >= 7 (iOS 16+) + if (final_options.use_fp16_io && final_options.specification_version < 7) { + final_options.specification_version = 7; + } + + // Serialize to .mlpackage + CoreMLSerializer serializer(final_options.specification_version); + serializer.serialize(program.get(), weights_copy, output_path, final_options); +} + +ModelInfo KataGoConverter::getModelInfo(const std::string& input_path) { + KataGoParser parser(input_path); + KataGoModelDesc model = parser.parse(); + + ModelInfo info; + info.name = model.name; + info.version = model.model_version; + info.num_input_channels = model.num_input_channels; + info.num_input_global_channels = model.num_input_global_channels; + info.num_blocks = model.trunk.num_blocks; + info.trunk_channels = model.trunk.trunk_num_channels; + info.has_metadata_encoder = model.meta_encoder_version > 0; + info.num_policy_channels = model.num_policy_channels; + info.num_score_value_channels = model.num_score_value_channels; + + return info; +} + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/builder/MILBuilder.cpp b/cpp/external/katagocoreml/src/builder/MILBuilder.cpp new file mode 100644 index 000000000..db0c6c4b1 --- /dev/null +++ b/cpp/external/katagocoreml/src/builder/MILBuilder.cpp @@ -0,0 +1,2126 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#include "MILBuilder.hpp" +#include "MILBlob/Fp16.hpp" +#include + +// Include generated protobuf headers +#include "MIL.pb.h" + +namespace katagocoreml { + +MILBuilder::MILBuilder(const KataGoModelDesc& model, + int board_x_size, + int board_y_size, + bool optimize_identity_mask, + bool use_fp16, + int min_batch_size, + int max_batch_size, + bool use_fp16_io) + : m_model(model) + , m_board_x_size(board_x_size) + , m_board_y_size(board_y_size) + , m_optimize_identity_mask(optimize_identity_mask) + , m_use_fp16(use_fp16) + , m_use_fp16_io(use_fp16_io) + , m_min_batch_size(min_batch_size) + , m_max_batch_size(max_batch_size) + , m_weight_dtype(use_fp16 + ? CoreML::Specification::MILSpec::DataType::FLOAT16 + : CoreML::Specification::MILSpec::DataType::FLOAT32) + , m_ops(board_x_size, board_y_size, optimize_identity_mask) + , m_var_counter(0) {} + +void MILBuilder::setBatchDimension(CoreML::Specification::MILSpec::TensorType* tensor_type) { + auto* dim = tensor_type->add_dimensions(); + if (m_min_batch_size == m_max_batch_size && m_max_batch_size > 0) { + // Fixed batch size + dim->mutable_constant()->set_size(m_min_batch_size); + } else { + // Dynamic batch size - use UnknownDimension + dim->mutable_unknown()->set_variadic(false); + } +} + +std::string MILBuilder::genVarName(const std::string& prefix) { + return prefix + "_" + std::to_string(m_var_counter++); +} + +std::unique_ptr MILBuilder::build() { + auto program = std::make_unique(); + program->set_version(1); + + // Create main function + auto& functions = *program->mutable_functions(); + auto& main_func = functions["main"]; + main_func.set_opset("CoreML5"); + + // Create main block + auto& blocks = *main_func.mutable_block_specializations(); + auto& main_block = blocks["CoreML5"]; + + // Define inputs + // spatial_input: [batch, num_input_ch, board_y, board_x] + auto* spatial_input = main_func.add_inputs(); + spatial_input->set_name("spatial_input"); + auto* spatial_type = spatial_input->mutable_type()->mutable_tensortype(); + spatial_type->set_datatype(m_use_fp16 && m_use_fp16_io + ? CoreML::Specification::MILSpec::DataType::FLOAT16 + : CoreML::Specification::MILSpec::DataType::FLOAT32); + spatial_type->set_rank(4); + setBatchDimension(spatial_type); + spatial_type->add_dimensions()->mutable_constant()->set_size(m_model.num_input_channels); + spatial_type->add_dimensions()->mutable_constant()->set_size(m_board_y_size); + spatial_type->add_dimensions()->mutable_constant()->set_size(m_board_x_size); + + // global_input: [batch, num_global_ch] + auto* global_input = main_func.add_inputs(); + global_input->set_name("global_input"); + auto* global_type = global_input->mutable_type()->mutable_tensortype(); + global_type->set_datatype(m_use_fp16 && m_use_fp16_io + ? CoreML::Specification::MILSpec::DataType::FLOAT16 + : CoreML::Specification::MILSpec::DataType::FLOAT32); + global_type->set_rank(2); + setBatchDimension(global_type); + global_type->add_dimensions()->mutable_constant()->set_size(m_model.num_input_global_channels); + + // input_mask: [batch, 1, board_y, board_x] + auto* mask_input = main_func.add_inputs(); + mask_input->set_name("input_mask"); + auto* mask_type = mask_input->mutable_type()->mutable_tensortype(); + mask_type->set_datatype(m_use_fp16 && m_use_fp16_io + ? CoreML::Specification::MILSpec::DataType::FLOAT16 + : CoreML::Specification::MILSpec::DataType::FLOAT32); + mask_type->set_rank(4); + setBatchDimension(mask_type); + mask_type->add_dimensions()->mutable_constant()->set_size(1); + mask_type->add_dimensions()->mutable_constant()->set_size(m_board_y_size); + mask_type->add_dimensions()->mutable_constant()->set_size(m_board_x_size); + + // Optional meta_input for human SL networks + std::string meta_input_name; + if (m_model.meta_encoder_version > 0 && m_model.num_input_meta_channels > 0) { + auto* meta_input = main_func.add_inputs(); + meta_input->set_name("meta_input"); + auto* meta_type = meta_input->mutable_type()->mutable_tensortype(); + meta_type->set_datatype(m_use_fp16 && m_use_fp16_io + ? CoreML::Specification::MILSpec::DataType::FLOAT16 + : CoreML::Specification::MILSpec::DataType::FLOAT32); + meta_type->set_rank(2); + setBatchDimension(meta_type); + meta_type->add_dimensions()->mutable_constant()->set_size(m_model.num_input_meta_channels); + meta_input_name = "meta_input"; + } + + // For FP16 mode with FP32 I/O, add cast operations after inputs + std::string spatial_name = "spatial_input"; + std::string global_name = "global_input"; + std::string mask_name = "input_mask"; + std::string meta_name = meta_input_name; + + if (m_use_fp16 && !m_use_fp16_io) { + // Cast spatial_input: [1, num_input_ch, H, W] fp32 -> fp16 + addCastOp(&main_block, "spatial_input", "spatial_input_cast_fp16", "fp16", + {1, m_model.num_input_channels, m_board_y_size, m_board_x_size}); + spatial_name = "spatial_input_cast_fp16"; + + // Cast global_input: [1, num_global_ch] fp32 -> fp16 + addCastOp(&main_block, "global_input", "global_input_cast_fp16", "fp16", + {1, m_model.num_input_global_channels}); + global_name = "global_input_cast_fp16"; + + // Cast input_mask: [1, 1, H, W] fp32 -> fp16 + addCastOp(&main_block, "input_mask", "input_mask_cast_fp16", "fp16", + {1, 1, m_board_y_size, m_board_x_size}); + mask_name = "input_mask_cast_fp16"; + + // Cast meta_input if present + if (!meta_input_name.empty()) { + addCastOp(&main_block, "meta_input", "meta_input_cast_fp16", "fp16", + {1, m_model.num_input_meta_channels}); + meta_name = "meta_input_cast_fp16"; + } + } + + // Build the network + const std::string* meta_ptr = meta_name.empty() ? nullptr : &meta_name; + std::string trunk_out = buildTrunk(&main_block, spatial_name, global_name, mask_name, meta_ptr); + + // Build heads + std::string policy_out, pass_out; + buildPolicyHead(&main_block, trunk_out, mask_name, policy_out, pass_out); + + std::string value_out, ownership_out, score_value_out; + buildValueHead(&main_block, trunk_out, mask_name, value_out, ownership_out, score_value_out); + + // For FP16 mode with FP32 I/O, add cast operations to convert outputs back to FP32 + std::string final_policy_out = policy_out; + std::string final_pass_out = pass_out; + std::string final_value_out = value_out; + std::string final_ownership_out = ownership_out; + std::string final_score_value_out = score_value_out; + + if (m_use_fp16 && !m_use_fp16_io) { + const auto& ph = m_model.policy_head; + const auto& vh = m_model.value_head; + + // Cast policy_p2_conv: [1, p2_out_channels, H, W] fp16 -> fp32 + final_policy_out = "policy_p2_conv"; + addCastOp(&main_block, policy_out, final_policy_out, "fp32", + {1, ph.p2_conv.out_channels, m_board_y_size, m_board_x_size}); + + // Cast pass output: [1, 2] fp16 -> fp32 + final_pass_out = "policy_pass"; // Python uses policy_pass for all versions + int pass_out_channels = ph.gpool_to_pass_mul2.has_value() + ? ph.gpool_to_pass_mul2->out_channels + : ph.gpool_to_pass_mul.out_channels; + addCastOp(&main_block, pass_out, final_pass_out, "fp32", + {1, pass_out_channels}); + + // Cast value_v3_bias: [1, 3] fp16 -> fp32 + final_value_out = "value_v3_bias"; + addCastOp(&main_block, value_out, final_value_out, "fp32", + {1, vh.v3_mul.out_channels}); + + // Cast ownership: [1, 1, H, W] fp16 -> fp32 + final_ownership_out = "value_ownership_conv"; + addCastOp(&main_block, ownership_out, final_ownership_out, "fp32", + {1, vh.v_ownership_conv.out_channels, m_board_y_size, m_board_x_size}); + + // Cast score_value: [1, num_score_value_channels] fp16 -> fp32 + final_score_value_out = "value_sv3_bias"; + addCastOp(&main_block, score_value_out, final_score_value_out, "fp32", + {1, vh.sv3_mul.out_channels}); + } + + // Set block outputs + main_block.add_outputs(final_policy_out); + main_block.add_outputs(final_pass_out); + main_block.add_outputs(final_value_out); + main_block.add_outputs(final_ownership_out); + main_block.add_outputs(final_score_value_out); + + return program; +} + +// ============================================================================ +// MIL Operation Helpers +// ============================================================================ + +void MILBuilder::addConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + const std::vector& data, + const std::vector& shape) { + // Register weight for blob storage + m_ops.registerWeight(name, data, shape); + + // Add const operation + auto* op = block->add_operations(); + op->set_type("const"); + + // "name" attribute (matching Python structure) + auto& name_attr = (*op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(name); + + // "val" attribute with type and blob reference + auto& val_attr = (*op->mutable_attributes())["val"]; + auto* val_type = val_attr.mutable_type()->mutable_tensortype(); + val_type->set_datatype(m_weight_dtype); + val_type->set_rank(static_cast(shape.size())); + for (int64_t dim : shape) { + val_type->add_dimensions()->mutable_constant()->set_size(dim); + } + auto* blob_val = val_attr.mutable_blobfilevalue(); + blob_val->set_filename("@model_path/weights/weight.bin"); + // Offset will be set during serialization + + // Set output + auto* output = op->add_outputs(); + output->set_name(name); + auto* output_type = output->mutable_type()->mutable_tensortype(); + output_type->set_datatype(m_weight_dtype); + output_type->set_rank(static_cast(shape.size())); + for (int64_t dim : shape) { + output_type->add_dimensions()->mutable_constant()->set_size(dim); + } +} + +// Helper: Add INT32 array const op (for axes, shape) +void MILBuilder::addIntArrayConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + const std::vector& values) { + auto* op = block->add_operations(); + op->set_type("const"); + + // "name" attribute + auto& name_attr = (*op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(name); + + // "val" attribute with INT32 type + auto& val_attr = (*op->mutable_attributes())["val"]; + auto* val_type = val_attr.mutable_type()->mutable_tensortype(); + val_type->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + val_type->set_rank(1); + val_type->add_dimensions()->mutable_constant()->set_size(static_cast(values.size())); + auto* ints = val_attr.mutable_immediatevalue()->mutable_tensor()->mutable_ints(); + for (int32_t v : values) { + ints->add_values(v); + } + + // Output + auto* output = op->add_outputs(); + output->set_name(name); + auto* out_type = output->mutable_type()->mutable_tensortype(); + out_type->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + out_type->set_rank(1); + out_type->add_dimensions()->mutable_constant()->set_size(static_cast(values.size())); +} + +// Helper: Add BOOL scalar const op (for keep_dims) +void MILBuilder::addBoolScalarConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + bool value) { + auto* op = block->add_operations(); + op->set_type("const"); + + // "name" attribute + auto& name_attr = (*op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(name); + + // "val" attribute with BOOL type (rank 0 = scalar) + auto& val_attr = (*op->mutable_attributes())["val"]; + auto* val_type = val_attr.mutable_type()->mutable_tensortype(); + val_type->set_datatype(CoreML::Specification::MILSpec::DataType::BOOL); + val_type->set_rank(0); + val_attr.mutable_immediatevalue()->mutable_tensor()->mutable_bools()->add_values(value); + + // Output + auto* output = op->add_outputs(); + output->set_name(name); + auto* out_type = output->mutable_type()->mutable_tensortype(); + out_type->set_datatype(CoreML::Specification::MILSpec::DataType::BOOL); + out_type->set_rank(0); +} + +// Helper: Add FLOAT32 scalar const op (for y values in sub/mul) +void MILBuilder::addFloatScalarConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + float value) { + auto* op = block->add_operations(); + op->set_type("const"); + + // "name" attribute + auto& name_attr = (*op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(name); + + // "val" attribute with appropriate dtype (rank 0 = scalar) + auto& val_attr = (*op->mutable_attributes())["val"]; + auto* val_type = val_attr.mutable_type()->mutable_tensortype(); + val_type->set_datatype(m_weight_dtype); + val_type->set_rank(0); + + if (m_use_fp16) { + // For FP16, use bytes storage with FP16 representation + MILBlob::Fp16 fp16_val = MILBlob::Fp16::FromFloat(value); + std::string bytes_data(reinterpret_cast(&fp16_val.bytes), sizeof(fp16_val.bytes)); + val_attr.mutable_immediatevalue()->mutable_tensor()->mutable_bytes()->set_values(bytes_data); + } else { + // For FP32, use floats storage + val_attr.mutable_immediatevalue()->mutable_tensor()->mutable_floats()->add_values(value); + } + + // Output + auto* output = op->add_outputs(); + output->set_name(name); + auto* out_type = output->mutable_type()->mutable_tensortype(); + out_type->set_datatype(m_weight_dtype); + out_type->set_rank(0); +} + +// Helper: Add INT32 scalar const op (for concat axis) +void MILBuilder::addIntScalarConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + int32_t value) { + auto* op = block->add_operations(); + op->set_type("const"); + + // "name" attribute + auto& name_attr = (*op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(name); + + // "val" attribute with INT32 type (rank 0 = scalar) + auto& val_attr = (*op->mutable_attributes())["val"]; + auto* val_type = val_attr.mutable_type()->mutable_tensortype(); + val_type->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + val_type->set_rank(0); + val_attr.mutable_immediatevalue()->mutable_tensor()->mutable_ints()->add_values(value); + + // Output + auto* output = op->add_outputs(); + output->set_name(name); + auto* out_type = output->mutable_type()->mutable_tensortype(); + out_type->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + out_type->set_rank(0); +} + +// Helper: Add cast operation for dtype conversion +void MILBuilder::addCastOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const std::string& output, + const std::string& dtype, + const std::vector& shape) { + // Create dtype const (STRING type) + std::string dtype_name = output + "_dtype_0"; + { + auto* op = block->add_operations(); + op->set_type("const"); + + auto& name_attr = (*op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(dtype_name); + + auto& val_attr = (*op->mutable_attributes())["val"]; + val_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + val_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(dtype); + + auto* out = op->add_outputs(); + out->set_name(dtype_name); + out->mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + } + + // Create cast operation + auto* op = block->add_operations(); + op->set_type("cast"); + + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["dtype"].add_arguments()->set_name(dtype_name); + + // Set output with target dtype + auto* out = op->add_outputs(); + out->set_name(output); + auto* tt = out->mutable_type()->mutable_tensortype(); + tt->set_datatype(dtype == "fp16" + ? CoreML::Specification::MILSpec::DataType::FLOAT16 + : CoreML::Specification::MILSpec::DataType::FLOAT32); + tt->set_rank(static_cast(shape.size())); + // First dimension is batch - use setBatchDimension + setBatchDimension(tt); + // Remaining dimensions are constant + for (size_t i = 1; i < shape.size(); i++) { + tt->add_dimensions()->mutable_constant()->set_size(shape[i]); + } +} + +void MILBuilder::addConvOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const ConvLayerDesc& layer, + const std::string& output) { + // Create const operations for all parameters (matching Python structure) + std::string weight_name = output + "_weight_0"; + std::string pad_type_name = output + "_pad_type_0"; + std::string dilations_name = output + "_dilations_0"; + std::string strides_name = output + "_strides_0"; + std::string groups_name = output + "_groups_0"; + std::string pad_name = output + "_pad_0"; + + // Add weight constant + addConstOp(block, weight_name, layer.weights, layer.getWeightShape()); + + // Add pad_type constant ("same") - STRING type + { + auto* const_op = block->add_operations(); + const_op->set_type("const"); + // "name" attribute + auto& name_attr = (*const_op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(pad_type_name); + // "val" attribute with type + auto& val = (*const_op->mutable_attributes())["val"]; + val.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + val.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values("same"); + // Output + auto* out = const_op->add_outputs(); + out->set_name(pad_type_name); + out->mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + } + + // Add dilations constant - INT32 type, shape [2] + { + auto* const_op = block->add_operations(); + const_op->set_type("const"); + // "name" attribute + auto& name_attr = (*const_op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(dilations_name); + // "val" attribute with type + auto& val = (*const_op->mutable_attributes())["val"]; + auto* val_type = val.mutable_type()->mutable_tensortype(); + val_type->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + val_type->set_rank(1); + val_type->add_dimensions()->mutable_constant()->set_size(2); + auto* int_vals = val.mutable_immediatevalue()->mutable_tensor()->mutable_ints(); + int_vals->add_values(layer.dilation_y); + int_vals->add_values(layer.dilation_x); + // Output + auto* out = const_op->add_outputs(); + out->set_name(dilations_name); + auto* tt = out->mutable_type()->mutable_tensortype(); + tt->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + tt->set_rank(1); + tt->add_dimensions()->mutable_constant()->set_size(2); + } + + // Add strides constant - INT32 type, shape [2] + { + auto* const_op = block->add_operations(); + const_op->set_type("const"); + // "name" attribute + auto& name_attr = (*const_op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(strides_name); + // "val" attribute with type + auto& val = (*const_op->mutable_attributes())["val"]; + auto* val_type = val.mutable_type()->mutable_tensortype(); + val_type->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + val_type->set_rank(1); + val_type->add_dimensions()->mutable_constant()->set_size(2); + auto* int_vals = val.mutable_immediatevalue()->mutable_tensor()->mutable_ints(); + int_vals->add_values(1); + int_vals->add_values(1); + // Output + auto* out = const_op->add_outputs(); + out->set_name(strides_name); + auto* tt = out->mutable_type()->mutable_tensortype(); + tt->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + tt->set_rank(1); + tt->add_dimensions()->mutable_constant()->set_size(2); + } + + // Add groups constant (always 1 for standard convolution) - INT32 scalar type + { + auto* const_op = block->add_operations(); + const_op->set_type("const"); + // "name" attribute + auto& name_attr = (*const_op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(groups_name); + // "val" attribute with type (scalar) + auto& val = (*const_op->mutable_attributes())["val"]; + val.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::INT32); + val.mutable_immediatevalue()->mutable_tensor()->mutable_ints()->add_values(1); + // Output + auto* out = const_op->add_outputs(); + out->set_name(groups_name); + out->mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::INT32); + } + + // Add pad constant [0, 0, 0, 0] for "same" padding - INT32 type, shape [4] + { + auto* const_op = block->add_operations(); + const_op->set_type("const"); + // "name" attribute + auto& name_attr = (*const_op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(pad_name); + // "val" attribute with type + auto& val = (*const_op->mutable_attributes())["val"]; + auto* val_type = val.mutable_type()->mutable_tensortype(); + val_type->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + val_type->set_rank(1); + val_type->add_dimensions()->mutable_constant()->set_size(4); + auto* int_vals = val.mutable_immediatevalue()->mutable_tensor()->mutable_ints(); + int_vals->add_values(0); + int_vals->add_values(0); + int_vals->add_values(0); + int_vals->add_values(0); + // Output + auto* out = const_op->add_outputs(); + out->set_name(pad_name); + auto* tt = out->mutable_type()->mutable_tensortype(); + tt->set_datatype(CoreML::Specification::MILSpec::DataType::INT32); + tt->set_rank(1); + tt->add_dimensions()->mutable_constant()->set_size(4); + } + + // Add conv operation referencing all const parameters + auto* op = block->add_operations(); + op->set_type("conv"); + + // Inputs - reference const operations + auto& inputs = *op->mutable_inputs(); + inputs["dilations"].add_arguments()->set_name(dilations_name); + inputs["groups"].add_arguments()->set_name(groups_name); + inputs["pad"].add_arguments()->set_name(pad_name); + inputs["pad_type"].add_arguments()->set_name(pad_type_name); + inputs["strides"].add_arguments()->set_name(strides_name); + inputs["weight"].add_arguments()->set_name(weight_name); + inputs["x"].add_arguments()->set_name(input); + + // Output with dimensions [batch, out_channels, height, width] + auto* out = op->add_outputs(); + out->set_name(output); + auto* out_type = out->mutable_type()->mutable_tensortype(); + out_type->set_datatype(m_weight_dtype); + out_type->set_rank(4); + setBatchDimension(out_type); + out_type->add_dimensions()->mutable_constant()->set_size(layer.out_channels); + out_type->add_dimensions()->mutable_constant()->set_size(m_board_y_size); + out_type->add_dimensions()->mutable_constant()->set_size(m_board_x_size); +} + +// Helper: Set output tensor type with 4D shape [batch, C, H, W] +void MILBuilder::setTensorOutput4D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name, + int channels, int height, int width) { + auto* out = op->add_outputs(); + out->set_name(name); + auto* tt = out->mutable_type()->mutable_tensortype(); + tt->set_datatype(m_weight_dtype); + tt->set_rank(4); + setBatchDimension(tt); + tt->add_dimensions()->mutable_constant()->set_size(channels); + tt->add_dimensions()->mutable_constant()->set_size(height); + tt->add_dimensions()->mutable_constant()->set_size(width); +} + +// Helper: Set output tensor type with 2D shape [batch, C] +void MILBuilder::setTensorOutput2D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name, + int channels) { + auto* out = op->add_outputs(); + out->set_name(name); + auto* tt = out->mutable_type()->mutable_tensortype(); + tt->set_datatype(m_weight_dtype); + tt->set_rank(2); + setBatchDimension(tt); + tt->add_dimensions()->mutable_constant()->set_size(channels); +} + +// Helper: Set output tensor type with 4D shape [batch, C, 1, 1] for pooled results +void MILBuilder::setTensorOutputPooled4D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name, + int channels) { + auto* out = op->add_outputs(); + out->set_name(name); + auto* tt = out->mutable_type()->mutable_tensortype(); + tt->set_datatype(m_weight_dtype); + tt->set_rank(4); + setBatchDimension(tt); + tt->add_dimensions()->mutable_constant()->set_size(channels); + tt->add_dimensions()->mutable_constant()->set_size(1); + tt->add_dimensions()->mutable_constant()->set_size(1); +} + +// Helper: Set output tensor type with 4D shape [batch, 1, 1, 1] (for mask operations) +void MILBuilder::setTensorOutputMask4D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name) { + auto* out = op->add_outputs(); + out->set_name(name); + auto* tt = out->mutable_type()->mutable_tensortype(); + tt->set_datatype(m_weight_dtype); + tt->set_rank(4); + setBatchDimension(tt); + tt->add_dimensions()->mutable_constant()->set_size(1); + tt->add_dimensions()->mutable_constant()->set_size(1); + tt->add_dimensions()->mutable_constant()->set_size(1); +} + +// Helper: Set output tensor type with 4D shape [batch, 1, H, W] (for mask spatial operations) +void MILBuilder::setTensorOutputMaskSpatial4D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name, + int height, int width) { + auto* out = op->add_outputs(); + out->set_name(name); + auto* tt = out->mutable_type()->mutable_tensortype(); + tt->set_datatype(m_weight_dtype); + tt->set_rank(4); + setBatchDimension(tt); + tt->add_dimensions()->mutable_constant()->set_size(1); + tt->add_dimensions()->mutable_constant()->set_size(height); + tt->add_dimensions()->mutable_constant()->set_size(width); +} + +void MILBuilder::addBatchNormActivationOps(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const BatchNormLayerDesc& bn, + const ActivationLayerDesc& act, + const std::string& mask, + const std::string& output) { + // BN: x * scale + bias + std::string scale_name = output + "_bn_scale"; + std::string bias_name = output + "_bn_bias"; + + // Reshape scale/bias to [1, C, 1, 1] + std::vector bn_shape = {1, static_cast(bn.num_channels), 1, 1}; + addConstOp(block, scale_name, bn.merged_scale, bn_shape); + addConstOp(block, bias_name, bn.merged_bias, bn_shape); + + // Mul: x * scale + std::string scaled_name = output + "_scaled"; + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["y"].add_arguments()->set_name(scale_name); + setTensorOutput4D(op, scaled_name, bn.num_channels, m_board_y_size, m_board_x_size); + } + + // Add: scaled + bias + std::string biased_name = output + "_biased"; + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(scaled_name); + inputs["y"].add_arguments()->set_name(bias_name); + setTensorOutput4D(op, biased_name, bn.num_channels, m_board_y_size, m_board_x_size); + } + + std::string bn_output = biased_name; + + // Apply mask if not optimizing + if (!m_optimize_identity_mask) { + std::string masked_name = output + "_masked"; + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(bn_output); + inputs["y"].add_arguments()->set_name(mask); + setTensorOutput4D(op, masked_name, bn.num_channels, m_board_y_size, m_board_x_size); + bn_output = masked_name; + } + + // Activation + if (act.activation_type == ActivationType::Identity) { + // Identity: just rename + // In MIL we need to copy + auto* op = block->add_operations(); + op->set_type("identity"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(bn_output); + setTensorOutput4D(op, output, bn.num_channels, m_board_y_size, m_board_x_size); + } else if (act.activation_type == ActivationType::ReLU) { + auto* op = block->add_operations(); + op->set_type("relu"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(bn_output); + setTensorOutput4D(op, output, bn.num_channels, m_board_y_size, m_board_x_size); + } else if (act.activation_type == ActivationType::Mish) { + addMishOps(block, bn_output, output, 4, bn.num_channels); + } +} + +void MILBuilder::addMishOps(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const std::string& output, + int rank, + int channels) { + // Mish: x / (1 + 2 / (e * (e + 2))) + // e = exp(x) + // + // rank and channels are used to set output type info: + // - rank=4: spatial tensors [1, C, H, W] (uses m_board_y_size, m_board_x_size) + // - rank=2: vector tensors [1, C] + + auto setOutputType = [this, rank, channels](CoreML::Specification::MILSpec::Operation* op, const std::string& name) { + auto* out = op->add_outputs(); + out->set_name(name); + auto* out_type = out->mutable_type()->mutable_tensortype(); + out_type->set_datatype(m_weight_dtype); + out_type->set_rank(rank); + setBatchDimension(out_type); + out_type->add_dimensions()->mutable_constant()->set_size(channels); + if (rank == 4) { + out_type->add_dimensions()->mutable_constant()->set_size(m_board_y_size); + out_type->add_dimensions()->mutable_constant()->set_size(m_board_x_size); + } + }; + + std::string e = output + "_exp"; + std::string ep2 = output + "_ep2"; + std::string emep2 = output + "_emep2"; + std::string tdemep2 = output + "_tdemep2"; + std::string optdemep2 = output + "_optdemep2"; + + // Create scalar constants for Mish computation + std::string const_one = output + "_const_1"; + std::string const_two = output + "_const_2"; + addFloatScalarConstOp(block, const_one, 1.0f); + addFloatScalarConstOp(block, const_two, 2.0f); + + // e = exp(x) + { + auto* op = block->add_operations(); + op->set_type("exp"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + setOutputType(op, e); + } + + // ep2 = e + 2 + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(e); + inputs["y"].add_arguments()->set_name(const_two); + setOutputType(op, ep2); + } + + // emep2 = e * ep2 + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(e); + inputs["y"].add_arguments()->set_name(ep2); + setOutputType(op, emep2); + } + + // tdemep2 = 2 / emep2 + { + auto* op = block->add_operations(); + op->set_type("real_div"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(const_two); + inputs["y"].add_arguments()->set_name(emep2); + setOutputType(op, tdemep2); + } + + // optdemep2 = 1 + tdemep2 + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(const_one); + inputs["y"].add_arguments()->set_name(tdemep2); + setOutputType(op, optdemep2); + } + + // output = x / optdemep2 + { + auto* op = block->add_operations(); + op->set_type("real_div"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["y"].add_arguments()->set_name(optdemep2); + setOutputType(op, output); + } +} + +void MILBuilder::addMatMulOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const MatMulLayerDesc& layer, + const std::string& output) { + // Create const operations for all parameters (matching Python structure) + std::string weight_name = output + "_y_0"; + std::string transpose_x_name = output + "_transpose_x_0"; + std::string transpose_y_name = output + "_transpose_y_0"; + + // Add weight constant + addConstOp(block, weight_name, layer.weights, layer.getWeightShape()); + + // Add transpose_x constant (false) - BOOL type + { + auto* const_op = block->add_operations(); + const_op->set_type("const"); + // "name" attribute + auto& name_attr = (*const_op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(transpose_x_name); + // "val" attribute with type + auto& val = (*const_op->mutable_attributes())["val"]; + val.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::BOOL); + val.mutable_immediatevalue()->mutable_tensor()->mutable_bools()->add_values(false); + // Output + auto* out = const_op->add_outputs(); + out->set_name(transpose_x_name); + out->mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::BOOL); + } + + // Add transpose_y constant (false) - BOOL type + { + auto* const_op = block->add_operations(); + const_op->set_type("const"); + // "name" attribute + auto& name_attr = (*const_op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(transpose_y_name); + // "val" attribute with type + auto& val = (*const_op->mutable_attributes())["val"]; + val.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::BOOL); + val.mutable_immediatevalue()->mutable_tensor()->mutable_bools()->add_values(false); + // Output + auto* out = const_op->add_outputs(); + out->set_name(transpose_y_name); + out->mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::BOOL); + } + + // Add matmul operation + auto* op = block->add_operations(); + op->set_type("matmul"); + auto& inputs = *op->mutable_inputs(); + inputs["transpose_x"].add_arguments()->set_name(transpose_x_name); + inputs["transpose_y"].add_arguments()->set_name(transpose_y_name); + inputs["x"].add_arguments()->set_name(input); + inputs["y"].add_arguments()->set_name(weight_name); + + // Output with 2D shape [batch, out_channels] + auto* out = op->add_outputs(); + out->set_name(output); + auto* out_type = out->mutable_type()->mutable_tensortype(); + out_type->set_datatype(m_weight_dtype); + out_type->set_rank(2); + setBatchDimension(out_type); + out_type->add_dimensions()->mutable_constant()->set_size(layer.out_channels); +} + +void MILBuilder::addMatBiasOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const MatBiasLayerDesc& layer, + const std::string& output) { + // Add bias constant + std::string bias_name = output + "_bias"; + std::vector shape = {static_cast(layer.num_channels)}; + addConstOp(block, bias_name, layer.weights, shape); + + // Add add operation + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["y"].add_arguments()->set_name(bias_name); + + // Output with 2D shape [batch, num_channels] (same as matmul output) + auto* out = op->add_outputs(); + out->set_name(output); + auto* out_type = out->mutable_type()->mutable_tensortype(); + out_type->set_datatype(m_weight_dtype); + out_type->set_rank(2); + setBatchDimension(out_type); + out_type->add_dimensions()->mutable_constant()->set_size(layer.num_channels); +} + +void MILBuilder::addLinearOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const MatMulLayerDesc& matmul, + const MatBiasLayerDesc& bias, + const std::string& output) { + // Create const operations for weight and bias (matching Python's linear op structure) + // Core ML linear expects weights in [out_channels, in_channels] format + // KataGo matmul stores weights in [in_channels, out_channels] format + // We need to transpose the weights to match Python's fuse_matmul_weight_bias pass + std::string weight_name = output + "_weight_0"; + std::string bias_name = output + "_bias_0"; + + // Transpose weights from [in_channels, out_channels] to [out_channels, in_channels] + const int in_ch = matmul.in_channels; + const int out_ch = matmul.out_channels; + std::vector transposed_weights(matmul.weights.size()); + for (int i = 0; i < in_ch; ++i) { + for (int j = 0; j < out_ch; ++j) { + // Original: weights[i * out_ch + j] (row-major [in_ch, out_ch]) + // Transposed: weights[j * in_ch + i] (row-major [out_ch, in_ch]) + transposed_weights[j * in_ch + i] = matmul.weights[i * out_ch + j]; + } + } + + // Add transposed weight constant with shape [out_channels, in_channels] + std::vector transposed_shape = {static_cast(out_ch), static_cast(in_ch)}; + addConstOp(block, weight_name, transposed_weights, transposed_shape); + + // Add bias constant + std::vector bias_shape = {static_cast(bias.num_channels)}; + addConstOp(block, bias_name, bias.weights, bias_shape); + + // Add linear operation + auto* op = block->add_operations(); + op->set_type("linear"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["weight"].add_arguments()->set_name(weight_name); + inputs["bias"].add_arguments()->set_name(bias_name); + + // Output with 2D shape [batch, out_channels] + auto* out = op->add_outputs(); + out->set_name(output); + auto* out_type = out->mutable_type()->mutable_tensortype(); + out_type->set_datatype(m_weight_dtype); + out_type->set_rank(2); + setBatchDimension(out_type); + out_type->add_dimensions()->mutable_constant()->set_size(matmul.out_channels); +} + +void MILBuilder::addGlobalPoolingOps(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const std::string& mask, + int channels, + const std::string& output) { + // KataGo global pooling produces: [mean, mean_scaled, max] + // mean_scaled = mean * (sqrt(count) - 14) * 0.1 + + if (m_optimize_identity_mask) { + // Optimized path: use precomputed constants + const auto& mc = m_ops.getMaskConstants(); + + // Mean pooling: sum / count + std::string sum_name = output + "_sum"; + std::string sum_axes = sum_name + "_axes_0"; + std::string sum_keep_dims = sum_name + "_keep_dims_0"; + addIntArrayConstOp(block, sum_axes, {2, 3}); + addBoolScalarConstOp(block, sum_keep_dims, true); + { + auto* op = block->add_operations(); + op->set_type("reduce_sum"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["axes"].add_arguments()->set_name(sum_axes); + inputs["keep_dims"].add_arguments()->set_name(sum_keep_dims); + setTensorOutputPooled4D(op, sum_name, channels); + } + + std::string mean_name = output + "_mean"; + std::string mean_y = mean_name + "_y_0"; + addFloatScalarConstOp(block, mean_y, mc.mask_sum_reciprocal); + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sum_name); + inputs["y"].add_arguments()->set_name(mean_y); + setTensorOutputPooled4D(op, mean_name, channels); + } + + // Max pooling + std::string max_name = output + "_max"; + std::string max_axes = max_name + "_axes_0"; + std::string max_keep_dims = max_name + "_keep_dims_0"; + addIntArrayConstOp(block, max_axes, {2, 3}); + addBoolScalarConstOp(block, max_keep_dims, true); + { + auto* op = block->add_operations(); + op->set_type("reduce_max"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["axes"].add_arguments()->set_name(max_axes); + inputs["keep_dims"].add_arguments()->set_name(max_keep_dims); + setTensorOutputPooled4D(op, max_name, channels); + } + + // Mean scaled = mean * constant + std::string mean_scaled_name = output + "_mean_scaled"; + std::string mean_scaled_y = mean_scaled_name + "_y_0"; + addFloatScalarConstOp(block, mean_scaled_y, mc.mask_sum_sqrt_s14_m01); + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["y"].add_arguments()->set_name(mean_scaled_y); + setTensorOutputPooled4D(op, mean_scaled_name, channels); + } + + // Squeeze spatial dimensions: [N, C, 1, 1] -> [N, C] + std::string mean_flat = output + "_mean_flat"; + std::string mean_flat_axes = mean_flat + "_axes_0"; + addIntArrayConstOp(block, mean_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["axes"].add_arguments()->set_name(mean_flat_axes); + setTensorOutput2D(op, mean_flat, channels); + } + + std::string mean_scaled_flat = output + "_mean_scaled_flat"; + std::string mean_scaled_flat_axes = mean_scaled_flat + "_axes_0"; + addIntArrayConstOp(block, mean_scaled_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_scaled_name); + inputs["axes"].add_arguments()->set_name(mean_scaled_flat_axes); + setTensorOutput2D(op, mean_scaled_flat, channels); + } + + std::string max_flat = output + "_max_flat"; + std::string max_flat_axes = max_flat + "_axes_0"; + addIntArrayConstOp(block, max_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(max_name); + inputs["axes"].add_arguments()->set_name(max_flat_axes); + setTensorOutput2D(op, max_flat, channels); + } + + // Concatenate: [mean, mean_scaled, max] + std::string concat_axis = output + "_concat_axis_0"; + std::string concat_interleave = output + "_concat_interleave_0"; + addIntScalarConstOp(block, concat_axis, 1); + addBoolScalarConstOp(block, concat_interleave, false); + { + auto* op = block->add_operations(); + op->set_type("concat"); + auto& inputs = *op->mutable_inputs(); + inputs["values"].add_arguments()->set_name(mean_flat); + inputs["values"].add_arguments()->set_name(mean_scaled_flat); + inputs["values"].add_arguments()->set_name(max_flat); + inputs["axis"].add_arguments()->set_name(concat_axis); + inputs["interleave"].add_arguments()->set_name(concat_interleave); + setTensorOutput2D(op, output, channels * 3); + } + } else { + // Full path with mask operations + // Count valid positions (mask is [1, 1, H, W], output is [1, 1, 1, 1]) + std::string mask_sum_name = output + "_mask_sum"; + std::string mask_sum_axes = mask_sum_name + "_axes_0"; + std::string mask_sum_keep_dims = mask_sum_name + "_keep_dims_0"; + addIntArrayConstOp(block, mask_sum_axes, {2, 3}); + addBoolScalarConstOp(block, mask_sum_keep_dims, true); + { + auto* op = block->add_operations(); + op->set_type("reduce_sum"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mask); + inputs["axes"].add_arguments()->set_name(mask_sum_axes); + inputs["keep_dims"].add_arguments()->set_name(mask_sum_keep_dims); + setTensorOutputMask4D(op, mask_sum_name); + } + + // Masked input: [1, C, H, W] * [1, 1, H, W] -> [1, C, H, W] + std::string masked_name = output + "_masked"; + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["y"].add_arguments()->set_name(mask); + setTensorOutput4D(op, masked_name, channels, m_board_y_size, m_board_x_size); + } + + // Sum masked values: [1, C, H, W] -> [1, C, 1, 1] + std::string sum_name = output + "_sum"; + std::string sum_axes = sum_name + "_axes_0"; + std::string sum_keep_dims = sum_name + "_keep_dims_0"; + addIntArrayConstOp(block, sum_axes, {2, 3}); + addBoolScalarConstOp(block, sum_keep_dims, true); + { + auto* op = block->add_operations(); + op->set_type("reduce_sum"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(masked_name); + inputs["axes"].add_arguments()->set_name(sum_axes); + inputs["keep_dims"].add_arguments()->set_name(sum_keep_dims); + setTensorOutputPooled4D(op, sum_name, channels); + } + + // Mean = sum / count: [1, C, 1, 1] / [1, 1, 1, 1] -> [1, C, 1, 1] + std::string mean_name = output + "_mean"; + { + auto* op = block->add_operations(); + op->set_type("real_div"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sum_name); + inputs["y"].add_arguments()->set_name(mask_sum_name); + setTensorOutputPooled4D(op, mean_name, channels); + } + + // Max pooling (with mask adjustment) + // mask_minus_one: [1, 1, H, W] - scalar -> [1, 1, H, W] + std::string mask_minus_one = output + "_mask_minus_one"; + std::string mask_minus_one_y = mask_minus_one + "_y_0"; + addFloatScalarConstOp(block, mask_minus_one_y, 1.0f); + { + auto* op = block->add_operations(); + op->set_type("sub"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mask); + inputs["y"].add_arguments()->set_name(mask_minus_one_y); + setTensorOutputMaskSpatial4D(op, mask_minus_one, m_board_y_size, m_board_x_size); + } + + // x_for_max: [1, C, H, W] + [1, 1, H, W] -> [1, C, H, W] + std::string x_for_max = output + "_x_for_max"; + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(masked_name); + inputs["y"].add_arguments()->set_name(mask_minus_one); + setTensorOutput4D(op, x_for_max, channels, m_board_y_size, m_board_x_size); + } + + // max: [1, C, H, W] -> [1, C, 1, 1] + std::string max_name = output + "_max"; + std::string max_axes = max_name + "_axes_0"; + std::string max_keep_dims = max_name + "_keep_dims_0"; + addIntArrayConstOp(block, max_axes, {2, 3}); + addBoolScalarConstOp(block, max_keep_dims, true); + { + auto* op = block->add_operations(); + op->set_type("reduce_max"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(x_for_max); + inputs["axes"].add_arguments()->set_name(max_axes); + inputs["keep_dims"].add_arguments()->set_name(max_keep_dims); + setTensorOutputPooled4D(op, max_name, channels); + } + + // Mean scaled = mean * (sqrt(count) - 14) * 0.1 + // sqrt_mask_sum: [1, 1, 1, 1] -> [1, 1, 1, 1] + std::string sqrt_mask_sum = output + "_sqrt_mask_sum"; + { + auto* op = block->add_operations(); + op->set_type("sqrt"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mask_sum_name); + setTensorOutputMask4D(op, sqrt_mask_sum); + } + + // sqrt_m14: [1, 1, 1, 1] - scalar -> [1, 1, 1, 1] + std::string sqrt_m14 = output + "_sqrt_m14"; + std::string sqrt_m14_y = sqrt_m14 + "_y_0"; + addFloatScalarConstOp(block, sqrt_m14_y, 14.0f); + { + auto* op = block->add_operations(); + op->set_type("sub"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sqrt_mask_sum); + inputs["y"].add_arguments()->set_name(sqrt_m14_y); + setTensorOutputMask4D(op, sqrt_m14); + } + + // scaled_factor: [1, 1, 1, 1] * scalar -> [1, 1, 1, 1] + std::string scaled_factor = output + "_scaled_factor"; + std::string scaled_factor_y = scaled_factor + "_y_0"; + addFloatScalarConstOp(block, scaled_factor_y, 0.1f); + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sqrt_m14); + inputs["y"].add_arguments()->set_name(scaled_factor_y); + setTensorOutputMask4D(op, scaled_factor); + } + + // mean_scaled: [1, C, 1, 1] * [1, 1, 1, 1] -> [1, C, 1, 1] + std::string mean_scaled = output + "_mean_scaled"; + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["y"].add_arguments()->set_name(scaled_factor); + setTensorOutputPooled4D(op, mean_scaled, channels); + } + + // Squeeze spatial dimensions: [1, C, 1, 1] -> [1, C] + std::string mean_flat = output + "_mean_flat"; + std::string mean_flat_axes = mean_flat + "_axes_0"; + addIntArrayConstOp(block, mean_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["axes"].add_arguments()->set_name(mean_flat_axes); + setTensorOutput2D(op, mean_flat, channels); + } + + std::string mean_scaled_flat = output + "_mean_scaled_flat"; + std::string mean_scaled_flat_axes = mean_scaled_flat + "_axes_0"; + addIntArrayConstOp(block, mean_scaled_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_scaled); + inputs["axes"].add_arguments()->set_name(mean_scaled_flat_axes); + setTensorOutput2D(op, mean_scaled_flat, channels); + } + + std::string max_flat = output + "_max_flat"; + std::string max_flat_axes = max_flat + "_axes_0"; + addIntArrayConstOp(block, max_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(max_name); + inputs["axes"].add_arguments()->set_name(max_flat_axes); + setTensorOutput2D(op, max_flat, channels); + } + + // Concatenate: [mean, mean_scaled, max] -> [1, 3*C] + std::string concat_axis = output + "_concat_axis_0"; + std::string concat_interleave = output + "_concat_interleave_0"; + addIntScalarConstOp(block, concat_axis, 1); + addBoolScalarConstOp(block, concat_interleave, false); + { + auto* op = block->add_operations(); + op->set_type("concat"); + auto& inputs = *op->mutable_inputs(); + inputs["values"].add_arguments()->set_name(mean_flat); + inputs["values"].add_arguments()->set_name(mean_scaled_flat); + inputs["values"].add_arguments()->set_name(max_flat); + inputs["axis"].add_arguments()->set_name(concat_axis); + inputs["interleave"].add_arguments()->set_name(concat_interleave); + setTensorOutput2D(op, output, channels * 3); + } + } +} + +void MILBuilder::addGlobalPoolingValueOps(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const std::string& mask, + int channels, + const std::string& output) { + // KataGo value head global pooling produces: [mean, mean_scaled, mean_f3] + // mean_scaled = mean * (sqrt(count) - 14) * 0.1 + // mean_f3 = mean * ((sqrt(count) - 14)^2 * 0.01 - 0.1) + + if (m_optimize_identity_mask) { + // Optimized path: use precomputed constants + const auto& mc = m_ops.getMaskConstants(); + + // Mean pooling: sum / count -> [1, C, 1, 1] + std::string sum_name = output + "_sum"; + std::string sum_axes = sum_name + "_axes_0"; + std::string sum_keep_dims = sum_name + "_keep_dims_0"; + addIntArrayConstOp(block, sum_axes, {2, 3}); + addBoolScalarConstOp(block, sum_keep_dims, true); + { + auto* op = block->add_operations(); + op->set_type("reduce_sum"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["axes"].add_arguments()->set_name(sum_axes); + inputs["keep_dims"].add_arguments()->set_name(sum_keep_dims); + setTensorOutputPooled4D(op, sum_name, channels); + } + + std::string mean_name = output + "_mean"; + std::string mean_y = mean_name + "_y_0"; + addFloatScalarConstOp(block, mean_y, mc.mask_sum_reciprocal); + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sum_name); + inputs["y"].add_arguments()->set_name(mean_y); + setTensorOutputPooled4D(op, mean_name, channels); + } + + // Mean scaled = mean * constant -> [1, C, 1, 1] + std::string mean_scaled_name = output + "_mean_scaled"; + std::string mean_scaled_y = mean_scaled_name + "_y_0"; + addFloatScalarConstOp(block, mean_scaled_y, mc.mask_sum_sqrt_s14_m01); + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["y"].add_arguments()->set_name(mean_scaled_y); + setTensorOutputPooled4D(op, mean_scaled_name, channels); + } + + // Mean feature 3 = mean * constant -> [1, C, 1, 1] + std::string mean_f3_name = output + "_mean_f3"; + std::string mean_f3_y = mean_f3_name + "_y_0"; + addFloatScalarConstOp(block, mean_f3_y, mc.mask_sum_sqrt_s14_m01_sq_s01); + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["y"].add_arguments()->set_name(mean_f3_y); + setTensorOutputPooled4D(op, mean_f3_name, channels); + } + + // Squeeze spatial dimensions: [N, C, 1, 1] -> [N, C] + std::string mean_flat = output + "_mean_flat"; + std::string mean_flat_axes = mean_flat + "_axes_0"; + addIntArrayConstOp(block, mean_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["axes"].add_arguments()->set_name(mean_flat_axes); + setTensorOutput2D(op, mean_flat, channels); + } + + std::string mean_scaled_flat = output + "_mean_scaled_flat"; + std::string mean_scaled_flat_axes = mean_scaled_flat + "_axes_0"; + addIntArrayConstOp(block, mean_scaled_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_scaled_name); + inputs["axes"].add_arguments()->set_name(mean_scaled_flat_axes); + setTensorOutput2D(op, mean_scaled_flat, channels); + } + + std::string mean_f3_flat = output + "_mean_f3_flat"; + std::string mean_f3_flat_axes = mean_f3_flat + "_axes_0"; + addIntArrayConstOp(block, mean_f3_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_f3_name); + inputs["axes"].add_arguments()->set_name(mean_f3_flat_axes); + setTensorOutput2D(op, mean_f3_flat, channels); + } + + // Concatenate: [mean, mean_scaled, mean_f3] -> [1, 3*C] + std::string concat_axis = output + "_concat_axis_0"; + std::string concat_interleave = output + "_concat_interleave_0"; + addIntScalarConstOp(block, concat_axis, 1); + addBoolScalarConstOp(block, concat_interleave, false); + { + auto* op = block->add_operations(); + op->set_type("concat"); + auto& inputs = *op->mutable_inputs(); + inputs["values"].add_arguments()->set_name(mean_flat); + inputs["values"].add_arguments()->set_name(mean_scaled_flat); + inputs["values"].add_arguments()->set_name(mean_f3_flat); + inputs["axis"].add_arguments()->set_name(concat_axis); + inputs["interleave"].add_arguments()->set_name(concat_interleave); + setTensorOutput2D(op, output, channels * 3); + } + } else { + // Full path with mask operations + // Count valid positions: [1, 1, H, W] -> [1, 1, 1, 1] + std::string mask_sum_name = output + "_mask_sum"; + std::string mask_sum_axes = mask_sum_name + "_axes_0"; + std::string mask_sum_keep_dims = mask_sum_name + "_keep_dims_0"; + addIntArrayConstOp(block, mask_sum_axes, {2, 3}); + addBoolScalarConstOp(block, mask_sum_keep_dims, true); + { + auto* op = block->add_operations(); + op->set_type("reduce_sum"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mask); + inputs["axes"].add_arguments()->set_name(mask_sum_axes); + inputs["keep_dims"].add_arguments()->set_name(mask_sum_keep_dims); + setTensorOutputMask4D(op, mask_sum_name); + } + + // Masked input: [1, C, H, W] * [1, 1, H, W] -> [1, C, H, W] + std::string masked_name = output + "_masked"; + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(input); + inputs["y"].add_arguments()->set_name(mask); + setTensorOutput4D(op, masked_name, channels, m_board_y_size, m_board_x_size); + } + + // Sum masked values: [1, C, H, W] -> [1, C, 1, 1] + std::string sum_name = output + "_sum"; + std::string sum_axes = sum_name + "_axes_0"; + std::string sum_keep_dims = sum_name + "_keep_dims_0"; + addIntArrayConstOp(block, sum_axes, {2, 3}); + addBoolScalarConstOp(block, sum_keep_dims, true); + { + auto* op = block->add_operations(); + op->set_type("reduce_sum"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(masked_name); + inputs["axes"].add_arguments()->set_name(sum_axes); + inputs["keep_dims"].add_arguments()->set_name(sum_keep_dims); + setTensorOutputPooled4D(op, sum_name, channels); + } + + // Mean = sum / count: [1, C, 1, 1] / [1, 1, 1, 1] -> [1, C, 1, 1] + std::string mean_name = output + "_mean"; + { + auto* op = block->add_operations(); + op->set_type("real_div"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sum_name); + inputs["y"].add_arguments()->set_name(mask_sum_name); + setTensorOutputPooled4D(op, mean_name, channels); + } + + // Compute (sqrt(count) - 14): [1, 1, 1, 1] -> [1, 1, 1, 1] + std::string sqrt_mask_sum = output + "_sqrt_mask_sum"; + { + auto* op = block->add_operations(); + op->set_type("sqrt"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mask_sum_name); + setTensorOutputMask4D(op, sqrt_mask_sum); + } + + std::string sqrt_m14 = output + "_sqrt_m14"; + std::string sqrt_m14_y = sqrt_m14 + "_y_0"; + addFloatScalarConstOp(block, sqrt_m14_y, 14.0f); + { + auto* op = block->add_operations(); + op->set_type("sub"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sqrt_mask_sum); + inputs["y"].add_arguments()->set_name(sqrt_m14_y); + setTensorOutputMask4D(op, sqrt_m14); + } + + // Feature 2: Mean * (sqrt(count) - 14) * 0.1 + // scaled_factor: [1, 1, 1, 1] * scalar -> [1, 1, 1, 1] + std::string scaled_factor = output + "_scaled_factor"; + std::string scaled_factor_y = scaled_factor + "_y_0"; + addFloatScalarConstOp(block, scaled_factor_y, 0.1f); + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sqrt_m14); + inputs["y"].add_arguments()->set_name(scaled_factor_y); + setTensorOutputMask4D(op, scaled_factor); + } + + // mean_scaled: [1, C, 1, 1] * [1, 1, 1, 1] -> [1, C, 1, 1] + std::string mean_scaled = output + "_mean_scaled"; + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["y"].add_arguments()->set_name(scaled_factor); + setTensorOutputPooled4D(op, mean_scaled, channels); + } + + // Feature 3: Mean * ((sqrt(count) - 14)^2 * 0.01 - 0.1) + // sqrt_m14_sq: [1, 1, 1, 1] * [1, 1, 1, 1] -> [1, 1, 1, 1] + std::string sqrt_m14_sq = output + "_sqrt_m14_sq"; + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sqrt_m14); + inputs["y"].add_arguments()->set_name(sqrt_m14); + setTensorOutputMask4D(op, sqrt_m14_sq); + } + + // sq_01: [1, 1, 1, 1] * scalar -> [1, 1, 1, 1] + std::string sq_01 = output + "_sq_01"; + std::string sq_01_y = sq_01 + "_y_0"; + addFloatScalarConstOp(block, sq_01_y, 0.01f); + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sqrt_m14_sq); + inputs["y"].add_arguments()->set_name(sq_01_y); + setTensorOutputMask4D(op, sq_01); + } + + // f3_factor: [1, 1, 1, 1] - scalar -> [1, 1, 1, 1] + std::string f3_factor = output + "_f3_factor"; + std::string f3_factor_y = f3_factor + "_y_0"; + addFloatScalarConstOp(block, f3_factor_y, 0.1f); + { + auto* op = block->add_operations(); + op->set_type("sub"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(sq_01); + inputs["y"].add_arguments()->set_name(f3_factor_y); + setTensorOutputMask4D(op, f3_factor); + } + + // mean_f3: [1, C, 1, 1] * [1, 1, 1, 1] -> [1, C, 1, 1] + std::string mean_f3 = output + "_mean_f3"; + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["y"].add_arguments()->set_name(f3_factor); + setTensorOutputPooled4D(op, mean_f3, channels); + } + + // Squeeze spatial dimensions: [1, C, 1, 1] -> [1, C] + std::string mean_flat = output + "_mean_flat"; + std::string mean_flat_axes = mean_flat + "_axes_0"; + addIntArrayConstOp(block, mean_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_name); + inputs["axes"].add_arguments()->set_name(mean_flat_axes); + setTensorOutput2D(op, mean_flat, channels); + } + + std::string mean_scaled_flat = output + "_mean_scaled_flat"; + std::string mean_scaled_flat_axes = mean_scaled_flat + "_axes_0"; + addIntArrayConstOp(block, mean_scaled_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_scaled); + inputs["axes"].add_arguments()->set_name(mean_scaled_flat_axes); + setTensorOutput2D(op, mean_scaled_flat, channels); + } + + std::string mean_f3_flat = output + "_mean_f3_flat"; + std::string mean_f3_flat_axes = mean_f3_flat + "_axes_0"; + addIntArrayConstOp(block, mean_f3_flat_axes, {2, 3}); + { + auto* op = block->add_operations(); + op->set_type("squeeze"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(mean_f3); + inputs["axes"].add_arguments()->set_name(mean_f3_flat_axes); + setTensorOutput2D(op, mean_f3_flat, channels); + } + + // Concatenate: [mean, mean_scaled, mean_f3] -> [1, 3*C] + std::string concat_axis = output + "_concat_axis_0"; + std::string concat_interleave = output + "_concat_interleave_0"; + addIntScalarConstOp(block, concat_axis, 1); + addBoolScalarConstOp(block, concat_interleave, false); + { + auto* op = block->add_operations(); + op->set_type("concat"); + auto& inputs = *op->mutable_inputs(); + inputs["values"].add_arguments()->set_name(mean_flat); + inputs["values"].add_arguments()->set_name(mean_scaled_flat); + inputs["values"].add_arguments()->set_name(mean_f3_flat); + inputs["axis"].add_arguments()->set_name(concat_axis); + inputs["interleave"].add_arguments()->set_name(concat_interleave); + setTensorOutput2D(op, output, channels * 3); + } + } +} + +// ============================================================================ +// Network Component Builders +// ============================================================================ + +std::string MILBuilder::buildTrunk(CoreML::Specification::MILSpec::Block* block, + const std::string& spatial_input, + const std::string& global_input, + const std::string& mask, + const std::string* meta_input) { + const auto& trunk = m_model.trunk; + + // Initial conv + std::string x = genVarName("trunk_init_conv"); + addConvOp(block, spatial_input, trunk.initial_conv, x); + + // Global projection + std::string global_bias = genVarName("trunk_global_proj"); + addMatMulOp(block, global_input, trunk.initial_matmul, global_bias); + + // Reshape global bias to [batch, C, 1, 1] + // Create shape const first (matching Python structure) + std::string global_bias_reshaped = genVarName("trunk_global_reshape"); + std::string reshape_shape_name = global_bias_reshaped + "_shape_0"; + // Use -1 for batch to infer from input, explicit channel count + addIntArrayConstOp(block, reshape_shape_name, {-1, static_cast(trunk.initial_conv.out_channels), 1, 1}); + { + auto* op = block->add_operations(); + op->set_type("reshape"); + // "name" attribute + auto& name_attr = (*op->mutable_attributes())["name"]; + name_attr.mutable_type()->mutable_tensortype()->set_datatype( + CoreML::Specification::MILSpec::DataType::STRING); + name_attr.mutable_immediatevalue()->mutable_tensor()->mutable_strings()->add_values(global_bias_reshaped); + // Inputs + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(global_bias); + inputs["shape"].add_arguments()->set_name(reshape_shape_name); + // Output with dimensions [batch, C, 1, 1] + setTensorOutputPooled4D(op, global_bias_reshaped, trunk.initial_conv.out_channels); + } + + // Add global bias + std::string x_with_global = genVarName("trunk_add_global"); + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(x); + inputs["y"].add_arguments()->set_name(global_bias_reshaped); + // Output with 4D shape [batch, C, H, W] + setTensorOutput4D(op, x_with_global, trunk.trunk_num_channels, m_board_y_size, m_board_x_size); + } + x = x_with_global; + + // Add metadata bias if present + if (trunk.sgf_metadata_encoder.has_value() && meta_input != nullptr) { + std::string meta_bias = buildSGFMetadataEncoder(block, *meta_input, *trunk.sgf_metadata_encoder); + + // Reshape meta bias + std::string meta_bias_reshaped = genVarName("trunk_meta_reshape"); + std::string meta_bias_shape_name = meta_bias_reshaped + "_shape_0"; + // Use -1 for batch to infer from input, explicit channel count + addIntArrayConstOp(block, meta_bias_shape_name, {-1, static_cast(trunk.trunk_num_channels), 1, 1}); + { + auto* op = block->add_operations(); + op->set_type("reshape"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(meta_bias); + inputs["shape"].add_arguments()->set_name(meta_bias_shape_name); + // Output with 4D shape [batch, C, 1, 1] + setTensorOutputPooled4D(op, meta_bias_reshaped, trunk.trunk_num_channels); + } + + // Add meta bias + std::string x_with_meta = genVarName("trunk_add_meta"); + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(x); + inputs["y"].add_arguments()->set_name(meta_bias_reshaped); + // Output with 4D shape [batch, C, H, W] + setTensorOutput4D(op, x_with_meta, trunk.trunk_num_channels, m_board_y_size, m_board_x_size); + } + x = x_with_meta; + } + + // Apply initial mask + std::string x_masked = genVarName("trunk_init_mask"); + { + auto* op = block->add_operations(); + op->set_type("mul"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(x); + inputs["y"].add_arguments()->set_name(mask); + // Output with 4D shape [batch, C, H, W] + setTensorOutput4D(op, x_masked, trunk.trunk_num_channels, m_board_y_size, m_board_x_size); + } + x = x_masked; + + // Process residual blocks + for (size_t i = 0; i < trunk.blocks.size(); i++) { + const auto& entry = trunk.blocks[i]; + std::string prefix = "trunk_block_" + std::to_string(i); + + if (entry.block_kind == ORDINARY_BLOCK_KIND) { + const auto& block_desc = std::get(*entry.block); + x = buildResidualBlock(block, x, block_desc, mask, prefix); + } else if (entry.block_kind == GLOBAL_POOLING_BLOCK_KIND) { + const auto& block_desc = std::get(*entry.block); + x = buildGlobalPoolingResidualBlock(block, x, block_desc, mask, prefix); + } else if (entry.block_kind == NESTED_BOTTLENECK_BLOCK_KIND) { + const auto& block_desc = std::get(*entry.block); + x = buildNestedBottleneckBlock(block, x, block_desc, mask, prefix); + } + } + + // Trunk tip + std::string trunk_out = genVarName("trunk_tip"); + addBatchNormActivationOps(block, x, trunk.trunk_tip_bn, trunk.trunk_tip_activation, mask, trunk_out); + + return trunk_out; +} + +std::string MILBuilder::buildResidualBlock(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const ResidualBlockDesc& block_desc, + const std::string& mask, + const std::string& prefix) { + // Pre BN + activation + std::string pre_out = genVarName(prefix + "_pre"); + addBatchNormActivationOps(block, input, block_desc.pre_bn, block_desc.pre_activation, mask, pre_out); + + // First conv + std::string conv1_out = genVarName(prefix + "_conv1"); + addConvOp(block, pre_out, block_desc.regular_conv, conv1_out); + + // Mid BN + activation + std::string mid_out = genVarName(prefix + "_mid"); + addBatchNormActivationOps(block, conv1_out, block_desc.mid_bn, block_desc.mid_activation, mask, mid_out); + + // Second conv + std::string conv2_out = genVarName(prefix + "_conv2"); + addConvOp(block, mid_out, block_desc.final_conv, conv2_out); + + // Residual add + std::string output = genVarName(prefix + "_residual"); + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(conv2_out); + inputs["y"].add_arguments()->set_name(input); + // Set proper 4D output type [1, C, H, W] + setTensorOutput4D(op, output, block_desc.final_conv.out_channels, m_board_y_size, m_board_x_size); + } + + return output; +} + +std::string MILBuilder::buildGlobalPoolingResidualBlock(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const GlobalPoolingResidualBlockDesc& block_desc, + const std::string& mask, + const std::string& prefix) { + // Pre BN + activation + std::string pre_out = genVarName(prefix + "_pre"); + addBatchNormActivationOps(block, input, block_desc.pre_bn, block_desc.pre_activation, mask, pre_out); + + // Regular conv + std::string regular_out = genVarName(prefix + "_regular"); + addConvOp(block, pre_out, block_desc.regular_conv, regular_out); + + // Gpool conv + std::string gpool_conv_out = genVarName(prefix + "_gpool_conv"); + addConvOp(block, pre_out, block_desc.gpool_conv, gpool_conv_out); + + // Gpool BN + activation + std::string gpool_bn_out = genVarName(prefix + "_gpool_bn"); + addBatchNormActivationOps(block, gpool_conv_out, block_desc.gpool_bn, block_desc.gpool_activation, mask, gpool_bn_out); + + // Global pooling + std::string gpool_features = genVarName(prefix + "_gpool_features"); + addGlobalPoolingOps(block, gpool_bn_out, mask, block_desc.gpool_conv.out_channels, gpool_features); + + // Project to bias + std::string gpool_bias = genVarName(prefix + "_gpool_bias"); + addMatMulOp(block, gpool_features, block_desc.gpool_to_bias_mul, gpool_bias); + + // Reshape bias + std::string gpool_bias_reshaped = genVarName(prefix + "_gpool_bias_reshape"); + std::string gpool_bias_reshape_shape = gpool_bias_reshaped + "_shape_0"; + // Use -1 for batch to infer from input, explicit channel count + addIntArrayConstOp(block, gpool_bias_reshape_shape, {-1, static_cast(block_desc.regular_conv.out_channels), 1, 1}); + { + auto* op = block->add_operations(); + op->set_type("reshape"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(gpool_bias); + inputs["shape"].add_arguments()->set_name(gpool_bias_reshape_shape); + // Output is [batch, regular_conv.out_channels, 1, 1] + setTensorOutputPooled4D(op, gpool_bias_reshaped, block_desc.regular_conv.out_channels); + } + + // Add bias to regular path + std::string combined = genVarName(prefix + "_combined"); + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(regular_out); + inputs["y"].add_arguments()->set_name(gpool_bias_reshaped); + // Output is [1, regular_conv.out_channels, H, W] + setTensorOutput4D(op, combined, block_desc.regular_conv.out_channels, m_board_y_size, m_board_x_size); + } + + // Mid BN + activation + std::string mid_out = genVarName(prefix + "_mid"); + addBatchNormActivationOps(block, combined, block_desc.mid_bn, block_desc.mid_activation, mask, mid_out); + + // Final conv + std::string final_conv_out = genVarName(prefix + "_final"); + addConvOp(block, mid_out, block_desc.final_conv, final_conv_out); + + // Residual add + std::string output = genVarName(prefix + "_residual"); + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(final_conv_out); + inputs["y"].add_arguments()->set_name(input); + // Set proper 4D output type [1, C, H, W] + setTensorOutput4D(op, output, block_desc.final_conv.out_channels, m_board_y_size, m_board_x_size); + } + + return output; +} + +std::string MILBuilder::buildNestedBottleneckBlock(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const NestedBottleneckResidualBlockDesc& block_desc, + const std::string& mask, + const std::string& prefix) { + // Pre BN + activation + std::string pre_out = genVarName(prefix + "_pre"); + addBatchNormActivationOps(block, input, block_desc.pre_bn, block_desc.pre_activation, mask, pre_out); + + // Pre conv (bottleneck reduction) + std::string pre_conv_out = genVarName(prefix + "_pre_conv"); + addConvOp(block, pre_out, block_desc.pre_conv, pre_conv_out); + + std::string x = pre_conv_out; + + // Process nested blocks + for (size_t i = 0; i < block_desc.blocks.size(); i++) { + const auto& entry = block_desc.blocks[i]; + std::string nested_prefix = prefix + "_nested_" + std::to_string(i); + + if (entry.block_kind == ORDINARY_BLOCK_KIND) { + const auto& nested = std::get(*entry.block); + x = buildResidualBlock(block, x, nested, mask, nested_prefix); + } else if (entry.block_kind == GLOBAL_POOLING_BLOCK_KIND) { + const auto& nested = std::get(*entry.block); + x = buildGlobalPoolingResidualBlock(block, x, nested, mask, nested_prefix); + } + } + + // Post BN + activation + std::string post_out = genVarName(prefix + "_post"); + addBatchNormActivationOps(block, x, block_desc.post_bn, block_desc.post_activation, mask, post_out); + + // Post conv (bottleneck expansion) + std::string post_conv_out = genVarName(prefix + "_post_conv"); + addConvOp(block, post_out, block_desc.post_conv, post_conv_out); + + // Residual add + std::string output = genVarName(prefix + "_residual"); + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(post_conv_out); + inputs["y"].add_arguments()->set_name(input); + // Set proper 4D output type [1, C, H, W] + setTensorOutput4D(op, output, block_desc.post_conv.out_channels, m_board_y_size, m_board_x_size); + } + + return output; +} + +void MILBuilder::buildPolicyHead(CoreML::Specification::MILSpec::Block* block, + const std::string& trunk_out, + const std::string& mask, + std::string& policy_out, + std::string& pass_out) { + const auto& ph = m_model.policy_head; + + // P1 conv + std::string p1 = genVarName("policy_p1"); + addConvOp(block, trunk_out, ph.p1_conv, p1); + + // G1 conv + BN + activation + std::string g1_conv = genVarName("policy_g1_conv"); + addConvOp(block, trunk_out, ph.g1_conv, g1_conv); + + std::string g1 = genVarName("policy_g1"); + addBatchNormActivationOps(block, g1_conv, ph.g1_bn, ph.g1_activation, mask, g1); + + // Global pooling on G1 + std::string g1_pooled = genVarName("policy_g1_pool"); + addGlobalPoolingOps(block, g1, mask, ph.g1_conv.out_channels, g1_pooled); + + // Project to spatial bias + std::string gpool_bias = genVarName("policy_gpool_bias"); + addMatMulOp(block, g1_pooled, ph.gpool_to_bias_mul, gpool_bias); + + // Reshape bias + std::string gpool_bias_reshaped = genVarName("policy_gpool_bias_reshape"); + std::string policy_gpool_reshape_shape = gpool_bias_reshaped + "_shape_0"; + // Use -1 for batch to infer from input, explicit channel count + addIntArrayConstOp(block, policy_gpool_reshape_shape, {-1, static_cast(ph.p1_conv.out_channels), 1, 1}); + { + auto* op = block->add_operations(); + op->set_type("reshape"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(gpool_bias); + inputs["shape"].add_arguments()->set_name(policy_gpool_reshape_shape); + // Output is [batch, p1_conv.out_channels, 1, 1] + setTensorOutputPooled4D(op, gpool_bias_reshaped, ph.p1_conv.out_channels); + } + + // Add bias to P1 + std::string p1_biased = genVarName("policy_p1_biased"); + { + auto* op = block->add_operations(); + op->set_type("add"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(p1); + inputs["y"].add_arguments()->set_name(gpool_bias_reshaped); + // Output is [1, p1_conv.out_channels, H, W] + setTensorOutput4D(op, p1_biased, ph.p1_conv.out_channels, m_board_y_size, m_board_x_size); + } + + // P1 BN + activation + std::string p1_activated = genVarName("policy_p1_act"); + addBatchNormActivationOps(block, p1_biased, ph.p1_bn, ph.p1_activation, mask, p1_activated); + + // P2 conv -> policy output + // Mixed precision uses _fp16 suffix for this intermediate op; cast ops later rename to base name + policy_out = (m_use_fp16 && !m_use_fp16_io) ? "policy_p2_conv_fp16" : "policy_p2_conv"; + addConvOp(block, p1_activated, ph.p2_conv, policy_out); + + // Pass move + if (ph.gpool_to_pass_mul2.has_value()) { + // v15+: two-layer pass (first layer fused matmul+bias -> linear) + std::string pass_biased = genVarName("policy_pass_biased"); + addLinearOp(block, g1_pooled, ph.gpool_to_pass_mul, *ph.gpool_to_pass_bias, pass_biased); + + // Activation + std::string pass_activated = genVarName("policy_pass_act"); + if (ph.pass_activation->activation_type == ActivationType::ReLU) { + auto* op = block->add_operations(); + op->set_type("relu"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(pass_biased); + setTensorOutput2D(op, pass_activated, ph.gpool_to_pass_mul.out_channels); + } else if (ph.pass_activation->activation_type == ActivationType::Mish) { + addMishOps(block, pass_biased, pass_activated, 2, ph.gpool_to_pass_mul.out_channels); + } else { + pass_activated = pass_biased; + } + + // Mixed precision: _fp16 intermediate, cast ops rename to base name + pass_out = (m_use_fp16 && !m_use_fp16_io) ? "policy_pass_fp16" : "policy_pass"; + addMatMulOp(block, pass_activated, *ph.gpool_to_pass_mul2, pass_out); + } else { + // Pre-v15: single layer pass + // Mixed precision: _fp16 intermediate, cast ops rename to base name (pre-v15) + pass_out = (m_use_fp16 && !m_use_fp16_io) ? "policy_pass_fp16" : "policy_pass"; + addMatMulOp(block, g1_pooled, ph.gpool_to_pass_mul, pass_out); + } +} + +void MILBuilder::buildValueHead(CoreML::Specification::MILSpec::Block* block, + const std::string& trunk_out, + const std::string& mask, + std::string& value_out, + std::string& ownership_out, + std::string& score_value_out) { + const auto& vh = m_model.value_head; + + // V1 conv + BN + activation + std::string v1_conv = genVarName("value_v1_conv"); + addConvOp(block, trunk_out, vh.v1_conv, v1_conv); + + std::string v1 = genVarName("value_v1"); + addBatchNormActivationOps(block, v1_conv, vh.v1_bn, vh.v1_activation, mask, v1); + + // Global pooling (value head version) + std::string v1_pooled = genVarName("value_v1_pool"); + addGlobalPoolingValueOps(block, v1, mask, vh.v1_conv.out_channels, v1_pooled); + + // V2: linear + activation (fused matmul+bias -> linear) + std::string v2_bias = genVarName("value_v2_bias"); + addLinearOp(block, v1_pooled, vh.v2_mul, vh.v2_bias, v2_bias); + + std::string v2 = genVarName("value_v2"); + if (vh.v2_activation.activation_type == ActivationType::ReLU) { + auto* op = block->add_operations(); + op->set_type("relu"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(v2_bias); + setTensorOutput2D(op, v2, vh.v2_mul.out_channels); + } else if (vh.v2_activation.activation_type == ActivationType::Mish) { + addMishOps(block, v2_bias, v2, 2, vh.v2_mul.out_channels); + } else { + v2 = v2_bias; + } + + // V3: linear -> value output (fused matmul+bias -> linear) + // Mixed precision: _fp16 intermediate, cast ops rename to base name + value_out = (m_use_fp16 && !m_use_fp16_io) ? "value_v3_bias_fp16" : "value_v3_bias"; + addLinearOp(block, v2, vh.v3_mul, vh.v3_bias, value_out); + + // SV3: linear -> score value output (fused matmul+bias -> linear) + // Mixed precision: _fp16 intermediate, cast ops rename to base name + score_value_out = (m_use_fp16 && !m_use_fp16_io) ? "value_sv3_bias_fp16" : "value_sv3_bias"; + addLinearOp(block, v2, vh.sv3_mul, vh.sv3_bias, score_value_out); + + // Ownership conv + // Mixed precision: _fp16 intermediate, cast ops rename to base name + ownership_out = (m_use_fp16 && !m_use_fp16_io) ? "value_ownership_conv_fp16" : "value_ownership_conv"; + addConvOp(block, v1, vh.v_ownership_conv, ownership_out); +} + +std::string MILBuilder::buildSGFMetadataEncoder(CoreML::Specification::MILSpec::Block* block, + const std::string& meta_input, + const SGFMetadataEncoderDesc& encoder) { + // Layer 1 (fused matmul+bias -> linear) + std::string bias1 = genVarName("meta_bias1"); + addLinearOp(block, meta_input, encoder.mul1, encoder.bias1, bias1); + + std::string act1 = genVarName("meta_act1"); + if (encoder.act1.activation_type == ActivationType::ReLU) { + auto* op = block->add_operations(); + op->set_type("relu"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(bias1); + setTensorOutput2D(op, act1, encoder.mul1.out_channels); + } else if (encoder.act1.activation_type == ActivationType::Mish) { + addMishOps(block, bias1, act1, 2, encoder.mul1.out_channels); + } else { + // Identity activation - create identity op to preserve type information + auto* op = block->add_operations(); + op->set_type("identity"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(bias1); + setTensorOutput2D(op, act1, encoder.mul1.out_channels); + } + + // Layer 2 (fused matmul+bias -> linear) + std::string bias2 = genVarName("meta_bias2"); + addLinearOp(block, act1, encoder.mul2, encoder.bias2, bias2); + + std::string act2 = genVarName("meta_act2"); + if (encoder.act2.activation_type == ActivationType::ReLU) { + auto* op = block->add_operations(); + op->set_type("relu"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(bias2); + setTensorOutput2D(op, act2, encoder.mul2.out_channels); + } else if (encoder.act2.activation_type == ActivationType::Mish) { + addMishOps(block, bias2, act2, 2, encoder.mul2.out_channels); + } else { + // Identity activation - create identity op to preserve type information + auto* op = block->add_operations(); + op->set_type("identity"); + auto& inputs = *op->mutable_inputs(); + inputs["x"].add_arguments()->set_name(bias2); + setTensorOutput2D(op, act2, encoder.mul2.out_channels); + } + + // Layer 3 (output) + std::string output = genVarName("meta_output"); + addMatMulOp(block, act2, encoder.mul3, output); + + return output; +} + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/builder/MILBuilder.hpp b/cpp/external/katagocoreml/src/builder/MILBuilder.hpp new file mode 100644 index 000000000..042f9fc16 --- /dev/null +++ b/cpp/external/katagocoreml/src/builder/MILBuilder.hpp @@ -0,0 +1,194 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#pragma once + +#include "../types/KataGoTypes.hpp" +#include "Operations.hpp" +#include "MIL.pb.h" +#include +#include +#include + +namespace katagocoreml { + +/// Builder for constructing MIL programs from KataGo models. +/// Converts a parsed KataGo model description into a MIL protobuf program. +class MILBuilder { +public: + MILBuilder(const KataGoModelDesc& model, + int board_x_size, + int board_y_size, + bool optimize_identity_mask, + bool use_fp16 = false, + int min_batch_size = 1, + int max_batch_size = 1, + bool use_fp16_io = false); + + /// Build and return the MIL program protobuf + /// @return Unique pointer to MIL Program protobuf + std::unique_ptr build(); + + /// Get weight entries for blob serialization + const std::vector& getWeights() const { return m_ops.getWeights(); } + + /// Get board dimensions + int getBoardXSize() const { return m_board_x_size; } + int getBoardYSize() const { return m_board_y_size; } + +private: + const KataGoModelDesc& m_model; + int m_board_x_size; + int m_board_y_size; + bool m_optimize_identity_mask; + bool m_use_fp16; + bool m_use_fp16_io; + int m_min_batch_size; + int m_max_batch_size; + CoreML::Specification::MILSpec::DataType m_weight_dtype; + KataGoOps m_ops; + + // Batch size helpers + bool isDynamicBatch() const { + return m_min_batch_size != m_max_batch_size || m_max_batch_size <= 0; + } + void setBatchDimension(CoreML::Specification::MILSpec::TensorType* tensor_type); + + // Tensor output helpers with batch dimension support + void setTensorOutput4D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name, + int channels, int height, int width); + void setTensorOutput2D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name, + int channels); + void setTensorOutputPooled4D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name, + int channels); + void setTensorOutputMask4D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name); + void setTensorOutputMaskSpatial4D(CoreML::Specification::MILSpec::Operation* op, + const std::string& name, + int height, int width); + + // Operation name counter for unique names + int m_var_counter = 0; + std::string genVarName(const std::string& prefix); + + // MIL program construction helpers + void addConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + const std::vector& data, + const std::vector& shape); + + void addIntArrayConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + const std::vector& values); + + void addBoolScalarConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + bool value); + + void addFloatScalarConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + float value); + + void addIntScalarConstOp(CoreML::Specification::MILSpec::Block* block, + const std::string& name, + int32_t value); + + void addCastOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const std::string& output, + const std::string& dtype, + const std::vector& shape); + + void addConvOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const ConvLayerDesc& layer, + const std::string& output); + + void addBatchNormActivationOps(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const BatchNormLayerDesc& bn, + const ActivationLayerDesc& act, + const std::string& mask, + const std::string& output); + + void addMishOps(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const std::string& output, + int rank, + int channels); + + void addGlobalPoolingOps(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const std::string& mask, + int channels, + const std::string& output); + + void addGlobalPoolingValueOps(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const std::string& mask, + int channels, + const std::string& output); + + void addMatMulOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const MatMulLayerDesc& layer, + const std::string& output); + + void addMatBiasOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const MatBiasLayerDesc& layer, + const std::string& output); + + void addLinearOp(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const MatMulLayerDesc& matmul, + const MatBiasLayerDesc& bias, + const std::string& output); + + // Network component builders + std::string buildTrunk(CoreML::Specification::MILSpec::Block* block, + const std::string& spatial_input, + const std::string& global_input, + const std::string& mask, + const std::string* meta_input); + + std::string buildResidualBlock(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const ResidualBlockDesc& block_desc, + const std::string& mask, + const std::string& prefix); + + std::string buildGlobalPoolingResidualBlock(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const GlobalPoolingResidualBlockDesc& block_desc, + const std::string& mask, + const std::string& prefix); + + std::string buildNestedBottleneckBlock(CoreML::Specification::MILSpec::Block* block, + const std::string& input, + const NestedBottleneckResidualBlockDesc& block_desc, + const std::string& mask, + const std::string& prefix); + + void buildPolicyHead(CoreML::Specification::MILSpec::Block* block, + const std::string& trunk_out, + const std::string& mask, + std::string& policy_out, + std::string& pass_out); + + void buildValueHead(CoreML::Specification::MILSpec::Block* block, + const std::string& trunk_out, + const std::string& mask, + std::string& value_out, + std::string& ownership_out, + std::string& score_value_out); + + std::string buildSGFMetadataEncoder(CoreML::Specification::MILSpec::Block* block, + const std::string& meta_input, + const SGFMetadataEncoderDesc& encoder); +}; + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/builder/Operations.cpp b/cpp/external/katagocoreml/src/builder/Operations.cpp new file mode 100644 index 000000000..c0c036292 --- /dev/null +++ b/cpp/external/katagocoreml/src/builder/Operations.cpp @@ -0,0 +1,31 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#include "Operations.hpp" + +namespace katagocoreml { + +KataGoOps::KataGoOps(int board_x_size, int board_y_size, bool optimize_identity_mask) + : m_board_x_size(board_x_size) + , m_board_y_size(board_y_size) + , m_optimize_identity_mask(optimize_identity_mask) + , m_mask_constants(board_x_size, board_y_size) + , m_op_counter(0) {} + +std::string KataGoOps::registerWeight(const std::string& name, + const std::vector& data, + const std::vector& shape) { + WeightEntry entry; + entry.name = name; + entry.data = data; + entry.shape = shape; + entry.blob_offset = 0; // Will be set during serialization + m_weights.push_back(std::move(entry)); + return name; +} + +std::string KataGoOps::genOpName(const std::string& prefix) { + return prefix + "_" + std::to_string(m_op_counter++); +} + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/builder/Operations.hpp b/cpp/external/katagocoreml/src/builder/Operations.hpp new file mode 100644 index 000000000..3fc72ad88 --- /dev/null +++ b/cpp/external/katagocoreml/src/builder/Operations.hpp @@ -0,0 +1,77 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#pragma once + +#include "../types/KataGoTypes.hpp" +#include +#include +#include + +namespace katagocoreml { + +/// Weight entry for blob file storage +struct WeightEntry { + std::string name; + std::vector data; + std::vector shape; + uint64_t blob_offset = 0; // Set during serialization +}; + +/// Precomputed constants for identity mask optimization +struct MaskConstants { + float mask_sum = 361.0f; // 19 * 19 + float mask_sum_reciprocal = 1.0f / 361.0f; + float mask_sum_sqrt_s14_m01 = 0.5f; // (sqrt(361) - 14) * 0.1 + float mask_sum_sqrt_s14_m01_sq_s01 = 0.15f; // (0.5^2) - 0.1 + + MaskConstants() = default; + + MaskConstants(int board_x_size, int board_y_size) { + mask_sum = static_cast(board_x_size * board_y_size); + mask_sum_reciprocal = 1.0f / mask_sum; + float sqrt_mask_sum = std::sqrt(mask_sum); + mask_sum_sqrt_s14_m01 = (sqrt_mask_sum - 14.0f) * 0.1f; + float sq = mask_sum_sqrt_s14_m01 * mask_sum_sqrt_s14_m01; + mask_sum_sqrt_s14_m01_sq_s01 = sq - 0.1f; + } +}; + +/// KataGo operation builder for MIL program construction +/// This class builds the structure needed for MIL program generation +class KataGoOps { +public: + KataGoOps(int board_x_size, int board_y_size, bool optimize_identity_mask); + + /// Get the board dimensions + int getBoardXSize() const { return m_board_x_size; } + int getBoardYSize() const { return m_board_y_size; } + bool isOptimizeIdentityMask() const { return m_optimize_identity_mask; } + + /// Get precomputed mask constants + const MaskConstants& getMaskConstants() const { return m_mask_constants; } + + /// Register a weight tensor and return its reference name + std::string registerWeight(const std::string& name, + const std::vector& data, + const std::vector& shape); + + /// Get all registered weights + const std::vector& getWeights() const { return m_weights; } + + /// Clear all registered weights + void clearWeights() { m_weights.clear(); } + + /// Generate unique operation name + std::string genOpName(const std::string& prefix); + +private: + int m_board_x_size; + int m_board_y_size; + bool m_optimize_identity_mask; + MaskConstants m_mask_constants; + std::vector m_weights; + int m_op_counter = 0; +}; + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/parser/KataGoParser.cpp b/cpp/external/katagocoreml/src/parser/KataGoParser.cpp new file mode 100644 index 000000000..884add38f --- /dev/null +++ b/cpp/external/katagocoreml/src/parser/KataGoParser.cpp @@ -0,0 +1,573 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#include "KataGoParser.hpp" +#include +#include +#include +#include +#include +#include + +namespace katagocoreml { + +// ============================================================================ +// Constructor +// ============================================================================ + +KataGoParser::KataGoParser(const std::string& model_path) + : m_model_path(model_path) {} + +// ============================================================================ +// Version Support +// ============================================================================ + +bool KataGoParser::isVersionSupported(int version) { + for (int v : SUPPORTED_VERSIONS) { + if (v == version) return true; + } + return false; +} + +// ============================================================================ +// File Loading +// ============================================================================ + +void KataGoParser::loadFile() { + // Check if gzip compressed + bool is_gzip = false; + if (m_model_path.size() >= 3) { + std::string ext = m_model_path.substr(m_model_path.size() - 3); + is_gzip = (ext == ".gz"); + } + + if (is_gzip) { + // Read gzipped file + gzFile gz = gzopen(m_model_path.c_str(), "rb"); + if (!gz) { + throw std::runtime_error("Cannot open gzip file: " + m_model_path); + } + + // Read in chunks + m_buffer.clear(); + std::vector chunk(1024 * 1024); // 1MB chunks + int bytes_read; + while ((bytes_read = gzread(gz, chunk.data(), static_cast(chunk.size()))) > 0) { + m_buffer.insert(m_buffer.end(), chunk.begin(), chunk.begin() + bytes_read); + } + + if (bytes_read < 0) { + int errnum; + const char* errmsg = gzerror(gz, &errnum); + gzclose(gz); + throw std::runtime_error("Error reading gzip file: " + std::string(errmsg)); + } + + gzclose(gz); + } else { + // Read regular file + std::ifstream file(m_model_path, std::ios::binary | std::ios::ate); + if (!file) { + throw std::runtime_error("Cannot open file: " + m_model_path); + } + + std::streamsize size = file.tellg(); + file.seekg(0, std::ios::beg); + + m_buffer.resize(static_cast(size)); + if (!file.read(reinterpret_cast(m_buffer.data()), size)) { + throw std::runtime_error("Error reading file: " + m_model_path); + } + } +} + +// ============================================================================ +// Main Parse Function +// ============================================================================ + +KataGoModelDesc KataGoParser::parse() { + loadFile(); + m_pos = 0; + + // Detect if binary format (check for @BIN@ marker) + const std::string bin_marker = "@BIN@"; + auto it = std::search(m_buffer.begin(), m_buffer.end(), + bin_marker.begin(), bin_marker.end()); + m_binary_floats = (it != m_buffer.end()); + + return parseModel(); +} + +// ============================================================================ +// Low-Level Reading Functions +// ============================================================================ + +void KataGoParser::skipWhitespace() { + while (m_pos < m_buffer.size()) { + char c = static_cast(m_buffer[m_pos]); + if (c != ' ' && c != '\t' && c != '\n' && c != '\r') { + break; + } + m_pos++; + } +} + +void KataGoParser::readUntilWhitespace(std::string& out) { + out.clear(); + while (m_pos < m_buffer.size()) { + char c = static_cast(m_buffer[m_pos]); + if (c == ' ' || c == '\t' || c == '\n' || c == '\r') { + break; + } + out += c; + m_pos++; + } +} + +std::string KataGoParser::readString() { + skipWhitespace(); + std::string token; + readUntilWhitespace(token); + return token; +} + +int KataGoParser::readInt() { + std::string token = readString(); + return std::stoi(token); +} + +float KataGoParser::readFloat() { + std::string token = readString(); + return std::stof(token); +} + +bool KataGoParser::readBool() { + return readInt() != 0; +} + +std::vector KataGoParser::readFloats(size_t count, const std::string& name) { + std::vector floats(count); + + if (!m_binary_floats) { + // Text format + for (size_t i = 0; i < count; i++) { + floats[i] = readFloat(); + } + } else { + // Binary format - find @BIN@ marker + while (m_pos < m_buffer.size()) { + if (m_buffer[m_pos] == '@') { + break; + } + m_pos++; + } + + // Check for @BIN@ header + if (m_pos + 5 > m_buffer.size() || + std::memcmp(&m_buffer[m_pos], "@BIN@", 5) != 0) { + throw std::runtime_error(name + ": expected @BIN@ marker for binary float block"); + } + m_pos += 5; + + // Read binary floats (little-endian) + size_t num_bytes = count * 4; + if (m_pos + num_bytes > m_buffer.size()) { + throw std::runtime_error(name + ": not enough bytes for " + std::to_string(count) + " floats"); + } + + // Copy as little-endian float32 + std::memcpy(floats.data(), &m_buffer[m_pos], num_bytes); + m_pos += num_bytes; + } + + return floats; +} + +// ============================================================================ +// Layer Parsing Functions +// ============================================================================ + +ConvLayerDesc KataGoParser::parseConvLayer() { + ConvLayerDesc layer; + layer.name = readString(); + layer.conv_y_size = readInt(); + layer.conv_x_size = readInt(); + layer.in_channels = readInt(); + layer.out_channels = readInt(); + layer.dilation_y = readInt(); + layer.dilation_x = readInt(); + + // Read weights in file order: [y, x, ic, oc] + size_t num_weights = static_cast(layer.conv_y_size) * layer.conv_x_size * + layer.in_channels * layer.out_channels; + std::vector weights_flat = readFloats(num_weights, layer.name); + + // Transpose from [y, x, ic, oc] to [oc, ic, y, x] + layer.weights.resize(num_weights); + int y_size = layer.conv_y_size; + int x_size = layer.conv_x_size; + int ic = layer.in_channels; + int oc = layer.out_channels; + + for (int out_c = 0; out_c < oc; out_c++) { + for (int in_c = 0; in_c < ic; in_c++) { + for (int y = 0; y < y_size; y++) { + for (int x = 0; x < x_size; x++) { + // Source index: [y, x, ic, oc] + size_t src_idx = static_cast(y) * x_size * ic * oc + + x * ic * oc + + in_c * oc + + out_c; + // Dest index: [oc, ic, y, x] + size_t dst_idx = static_cast(out_c) * ic * y_size * x_size + + in_c * y_size * x_size + + y * x_size + + x; + layer.weights[dst_idx] = weights_flat[src_idx]; + } + } + } + } + + return layer; +} + +BatchNormLayerDesc KataGoParser::parseBatchNormLayer() { + BatchNormLayerDesc layer; + layer.name = readString(); + layer.num_channels = readInt(); + layer.epsilon = readFloat(); + layer.has_scale = readBool(); + layer.has_bias = readBool(); + + layer.mean = readFloats(layer.num_channels, layer.name + "/mean"); + layer.variance = readFloats(layer.num_channels, layer.name + "/variance"); + + if (layer.has_scale) { + layer.scale = readFloats(layer.num_channels, layer.name + "/scale"); + } else { + layer.scale.resize(layer.num_channels, 1.0f); + } + + if (layer.has_bias) { + layer.bias = readFloats(layer.num_channels, layer.name + "/bias"); + } else { + layer.bias.resize(layer.num_channels, 0.0f); + } + + // Compute merged scale and bias + layer.merged_scale.resize(layer.num_channels); + layer.merged_bias.resize(layer.num_channels); + for (int i = 0; i < layer.num_channels; i++) { + layer.merged_scale[i] = layer.scale[i] / std::sqrt(layer.variance[i] + layer.epsilon); + layer.merged_bias[i] = layer.bias[i] - layer.merged_scale[i] * layer.mean[i]; + } + + return layer; +} + +ActivationLayerDesc KataGoParser::parseActivationLayer(int model_version) { + ActivationLayerDesc layer; + layer.name = readString(); + + if (model_version >= 11) { + std::string activation_str = readString(); + if (activation_str == "ACTIVATION_IDENTITY") { + layer.activation_type = ActivationType::Identity; + } else if (activation_str == "ACTIVATION_RELU") { + layer.activation_type = ActivationType::ReLU; + } else if (activation_str == "ACTIVATION_MISH") { + layer.activation_type = ActivationType::Mish; + } else { + throw std::runtime_error("Unknown activation type: " + activation_str); + } + } else { + // Pre-v11 models only have ReLU + layer.activation_type = ActivationType::ReLU; + } + + return layer; +} + +MatMulLayerDesc KataGoParser::parseMatMulLayer() { + MatMulLayerDesc layer; + layer.name = readString(); + layer.in_channels = readInt(); + layer.out_channels = readInt(); + + // Weights in [ic, oc] order + size_t num_weights = static_cast(layer.in_channels) * layer.out_channels; + layer.weights = readFloats(num_weights, layer.name); + + return layer; +} + +MatBiasLayerDesc KataGoParser::parseMatBiasLayer() { + MatBiasLayerDesc layer; + layer.name = readString(); + layer.num_channels = readInt(); + layer.weights = readFloats(layer.num_channels, layer.name); + + return layer; +} + +// ============================================================================ +// Block Parsing Functions +// ============================================================================ + +ResidualBlockDesc KataGoParser::parseResidualBlock(int model_version) { + ResidualBlockDesc block; + block.name = readString(); + block.pre_bn = parseBatchNormLayer(); + block.pre_activation = parseActivationLayer(model_version); + block.regular_conv = parseConvLayer(); + block.mid_bn = parseBatchNormLayer(); + block.mid_activation = parseActivationLayer(model_version); + block.final_conv = parseConvLayer(); + + return block; +} + +GlobalPoolingResidualBlockDesc KataGoParser::parseGlobalPoolingResidualBlock(int model_version) { + GlobalPoolingResidualBlockDesc block; + block.name = readString(); + block.model_version = model_version; + block.pre_bn = parseBatchNormLayer(); + block.pre_activation = parseActivationLayer(model_version); + block.regular_conv = parseConvLayer(); + block.gpool_conv = parseConvLayer(); + block.gpool_bn = parseBatchNormLayer(); + block.gpool_activation = parseActivationLayer(model_version); + block.gpool_to_bias_mul = parseMatMulLayer(); + block.mid_bn = parseBatchNormLayer(); + block.mid_activation = parseActivationLayer(model_version); + block.final_conv = parseConvLayer(); + + return block; +} + +NestedBottleneckResidualBlockDesc KataGoParser::parseNestedBottleneckBlock(int model_version, int trunk_num_channels) { + NestedBottleneckResidualBlockDesc block; + block.name = readString(); + block.num_blocks = readInt(); + + block.pre_bn = parseBatchNormLayer(); + block.pre_activation = parseActivationLayer(model_version); + block.pre_conv = parseConvLayer(); + + block.blocks = parseBlockStack(model_version, block.num_blocks, block.pre_conv.out_channels); + + block.post_bn = parseBatchNormLayer(); + block.post_activation = parseActivationLayer(model_version); + block.post_conv = parseConvLayer(); + + return block; +} + +std::vector KataGoParser::parseBlockStack(int model_version, int num_blocks, int trunk_num_channels) { + std::vector blocks; + blocks.reserve(num_blocks); + + for (int i = 0; i < num_blocks; i++) { + std::string block_kind_name = readString(); + BlockEntry entry; + + if (block_kind_name == "ordinary_block") { + entry.block_kind = ORDINARY_BLOCK_KIND; + entry.block = std::make_shared(parseResidualBlock(model_version)); + } else if (block_kind_name == "gpool_block") { + entry.block_kind = GLOBAL_POOLING_BLOCK_KIND; + entry.block = std::make_shared(parseGlobalPoolingResidualBlock(model_version)); + } else if (block_kind_name == "nested_bottleneck_block") { + entry.block_kind = NESTED_BOTTLENECK_BLOCK_KIND; + entry.block = std::make_shared(parseNestedBottleneckBlock(model_version, trunk_num_channels)); + } else { + throw std::runtime_error("Unknown block kind: " + block_kind_name); + } + + blocks.push_back(std::move(entry)); + } + + return blocks; +} + +// ============================================================================ +// Component Parsing Functions +// ============================================================================ + +SGFMetadataEncoderDesc KataGoParser::parseSGFMetadataEncoder(int model_version, int meta_encoder_version) { + SGFMetadataEncoderDesc encoder; + encoder.name = readString(); + encoder.meta_encoder_version = meta_encoder_version; + encoder.num_input_meta_channels = readInt(); + + encoder.mul1 = parseMatMulLayer(); + encoder.bias1 = parseMatBiasLayer(); + encoder.act1 = parseActivationLayer(model_version); + encoder.mul2 = parseMatMulLayer(); + encoder.bias2 = parseMatBiasLayer(); + encoder.act2 = parseActivationLayer(model_version); + encoder.mul3 = parseMatMulLayer(); + + return encoder; +} + +TrunkDesc KataGoParser::parseTrunk(int model_version, int meta_encoder_version) { + TrunkDesc trunk; + trunk.name = readString(); + trunk.model_version = model_version; + trunk.meta_encoder_version = meta_encoder_version; + trunk.num_blocks = readInt(); + trunk.trunk_num_channels = readInt(); + trunk.mid_num_channels = readInt(); + trunk.regular_num_channels = readInt(); + readInt(); // dilatedNumChannels (unused) + trunk.gpool_num_channels = readInt(); + + // Version >= 15 has 6 unused int parameters + if (model_version >= 15) { + for (int i = 0; i < 6; i++) { + readInt(); + } + } + + trunk.initial_conv = parseConvLayer(); + trunk.initial_matmul = parseMatMulLayer(); + + // Parse SGF metadata encoder if present + if (meta_encoder_version > 0) { + trunk.sgf_metadata_encoder = parseSGFMetadataEncoder(model_version, meta_encoder_version); + } + + // Parse residual blocks + trunk.blocks = parseBlockStack(model_version, trunk.num_blocks, trunk.trunk_num_channels); + + trunk.trunk_tip_bn = parseBatchNormLayer(); + trunk.trunk_tip_activation = parseActivationLayer(model_version); + + return trunk; +} + +PolicyHeadDesc KataGoParser::parsePolicyHead(int model_version) { + PolicyHeadDesc head; + head.name = readString(); + head.model_version = model_version; + + head.p1_conv = parseConvLayer(); + head.g1_conv = parseConvLayer(); + head.g1_bn = parseBatchNormLayer(); + head.g1_activation = parseActivationLayer(model_version); + head.gpool_to_bias_mul = parseMatMulLayer(); + head.p1_bn = parseBatchNormLayer(); + head.p1_activation = parseActivationLayer(model_version); + head.p2_conv = parseConvLayer(); + head.gpool_to_pass_mul = parseMatMulLayer(); + + // Version >= 15 has additional pass move layers + if (model_version >= 15) { + head.gpool_to_pass_bias = parseMatBiasLayer(); + head.pass_activation = parseActivationLayer(model_version); + head.gpool_to_pass_mul2 = parseMatMulLayer(); + } + + // Determine policy output channels based on version + if (model_version >= 16) { + head.policy_out_channels = 4; + } else if (model_version >= 12) { + head.policy_out_channels = 2; + } else { + head.policy_out_channels = 1; + } + + return head; +} + +ValueHeadDesc KataGoParser::parseValueHead(int model_version) { + ValueHeadDesc head; + head.name = readString(); + head.model_version = model_version; + + head.v1_conv = parseConvLayer(); + head.v1_bn = parseBatchNormLayer(); + head.v1_activation = parseActivationLayer(model_version); + head.v2_mul = parseMatMulLayer(); + head.v2_bias = parseMatBiasLayer(); + head.v2_activation = parseActivationLayer(model_version); + head.v3_mul = parseMatMulLayer(); + head.v3_bias = parseMatBiasLayer(); + head.sv3_mul = parseMatMulLayer(); + head.sv3_bias = parseMatBiasLayer(); + head.v_ownership_conv = parseConvLayer(); + + return head; +} + +// ============================================================================ +// Main Model Parsing +// ============================================================================ + +KataGoModelDesc KataGoParser::parseModel() { + KataGoModelDesc model; + + // Read header + model.name = readString(); + model.model_version = readInt(); + + if (!isVersionSupported(model.model_version)) { + throw std::runtime_error( + "Only KataGo model versions 8-16 are supported, got version " + + std::to_string(model.model_version)); + } + + model.num_input_channels = readInt(); + model.num_input_global_channels = readInt(); + + // Parse post-process params (version >= 13) + if (model.model_version >= 13) { + model.post_process_params.td_score_multiplier = readFloat(); + model.post_process_params.score_mean_multiplier = readFloat(); + model.post_process_params.score_stdev_multiplier = readFloat(); + model.post_process_params.lead_multiplier = readFloat(); + model.post_process_params.variance_time_multiplier = readFloat(); + model.post_process_params.shortterm_value_error_multiplier = readFloat(); + model.post_process_params.shortterm_score_error_multiplier = readFloat(); + } + + // Parse meta encoder version (version >= 15) + model.meta_encoder_version = 0; + model.num_input_meta_channels = 0; + if (model.model_version >= 15) { + model.meta_encoder_version = readInt(); + // Read unused params + for (int i = 0; i < 7; i++) { + readInt(); + } + + if (model.meta_encoder_version > 0) { + model.num_input_meta_channels = 192; // SGFMetadata::METADATA_INPUT_NUM_CHANNELS + } + } + + // Parse trunk, policy head, value head + model.trunk = parseTrunk(model.model_version, model.meta_encoder_version); + model.policy_head = parsePolicyHead(model.model_version); + model.value_head = parseValueHead(model.model_version); + + // Determine output channel counts + model.num_policy_channels = model.policy_head.policy_out_channels; + model.num_value_channels = 3; // win, loss, noresult + + if (model.model_version >= 9) { + model.num_score_value_channels = 6; + } else if (model.model_version >= 8) { + model.num_score_value_channels = 4; + } else { + model.num_score_value_channels = 1; + } + + model.num_ownership_channels = 1; + + return model; +} + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/parser/KataGoParser.hpp b/cpp/external/katagocoreml/src/parser/KataGoParser.hpp new file mode 100644 index 000000000..cbcfdefa8 --- /dev/null +++ b/cpp/external/katagocoreml/src/parser/KataGoParser.hpp @@ -0,0 +1,73 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#pragma once + +#include "../types/KataGoTypes.hpp" +#include +#include +#include + +namespace katagocoreml { + +/// Parser for KataGo neural network model files. +/// Supports versions 8-16 models in binary format (.bin, .bin.gz). +class KataGoParser { +public: + /// Supported KataGo model versions + static constexpr std::array SUPPORTED_VERSIONS = {8, 9, 10, 11, 12, 13, 14, 15, 16}; + + /// Constructor + /// @param model_path Path to the KataGo model file (.bin or .bin.gz) + explicit KataGoParser(const std::string& model_path); + + /// Parse the model file and return a structured model description + /// @return KataGoModelDesc containing all model parameters + /// @throws std::runtime_error if the file cannot be read or parsed + KataGoModelDesc parse(); + + /// Check if a version is supported + static bool isVersionSupported(int version); + +private: + std::string m_model_path; + std::vector m_buffer; + size_t m_pos = 0; + bool m_binary_floats = true; + + // Low-level reading functions + void readUntilWhitespace(std::string& out); + void skipWhitespace(); + std::string readString(); + int readInt(); + float readFloat(); + bool readBool(); + std::vector readFloats(size_t count, const std::string& name); + + // Layer parsing functions + ConvLayerDesc parseConvLayer(); + BatchNormLayerDesc parseBatchNormLayer(); + ActivationLayerDesc parseActivationLayer(int model_version); + MatMulLayerDesc parseMatMulLayer(); + MatBiasLayerDesc parseMatBiasLayer(); + + // Block parsing functions + ResidualBlockDesc parseResidualBlock(int model_version); + GlobalPoolingResidualBlockDesc parseGlobalPoolingResidualBlock(int model_version); + NestedBottleneckResidualBlockDesc parseNestedBottleneckBlock(int model_version, int trunk_num_channels); + std::vector parseBlockStack(int model_version, int num_blocks, int trunk_num_channels); + + // Component parsing functions + SGFMetadataEncoderDesc parseSGFMetadataEncoder(int model_version, int meta_encoder_version); + TrunkDesc parseTrunk(int model_version, int meta_encoder_version); + PolicyHeadDesc parsePolicyHead(int model_version); + ValueHeadDesc parseValueHead(int model_version); + + // Main model parsing + KataGoModelDesc parseModel(); + + // Helper to load file (handles gzip) + void loadFile(); +}; + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/serializer/CoreMLSerializer.cpp b/cpp/external/katagocoreml/src/serializer/CoreMLSerializer.cpp new file mode 100644 index 000000000..f271f5526 --- /dev/null +++ b/cpp/external/katagocoreml/src/serializer/CoreMLSerializer.cpp @@ -0,0 +1,289 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#include "CoreMLSerializer.hpp" +#include "WeightSerializer.hpp" +#include "katagocoreml/Version.hpp" +#include "MIL.pb.h" +#include "Model.pb.h" +#include "FeatureTypes.pb.h" +#include "ModelPackage.hpp" +#include +#include +#include +#include + +namespace katagocoreml { + +CoreMLSerializer::CoreMLSerializer(int spec_version) + : m_spec_version(spec_version) {} + +void CoreMLSerializer::serialize(CoreML::Specification::MILSpec::Program* program, + std::vector& weights, + const std::string& output_path, + const ConversionOptions& options) { + // Create temporary directory for weights + std::filesystem::path temp_dir = std::filesystem::temp_directory_path() / "katagocoreml_weights"; + std::filesystem::create_directories(temp_dir); + std::string weights_dir = temp_dir.string(); + + // Determine if using FP16 precision + bool use_fp16 = (options.compute_precision == "FLOAT16"); + bool use_fp16_io = use_fp16 && options.use_fp16_io; + + // Write weight blob (this sets blob_offset on each WeightEntry) + writeWeightBlob(weights_dir, weights, use_fp16); + + // Update MIL program with calculated blob offsets + updateBlobOffsets(program, weights); + + // Create Model spec wrapping the MIL program + auto model = createModelSpec(program, options); + + // Create .mlpackage + createPackage(output_path, model.get(), weights_dir); + + // Cleanup temp directory + std::filesystem::remove_all(temp_dir); +} + +std::unique_ptr CoreMLSerializer::createModelSpec( + CoreML::Specification::MILSpec::Program* program, + const ConversionOptions& options) { + + auto model = std::make_unique(); + model->set_specificationversion(m_spec_version); + + // Set description + auto* desc = model->mutable_description(); + + // Helper lambda to set up batch dimension (either fixed shape or shape range) + auto setBatchShape = [&options](CoreML::Specification::ArrayFeatureType* array_type, + std::vector other_dims) { + if (options.isDynamicBatch()) { + // Use ShapeRange for dynamic batch + auto* shape_range = array_type->mutable_shaperange(); + + // Batch dimension range + auto* batch_range = shape_range->add_sizeranges(); + batch_range->set_lowerbound(options.min_batch_size); + batch_range->set_upperbound(options.max_batch_size); + + // Other dimensions are fixed + for (int64_t dim : other_dims) { + auto* range = shape_range->add_sizeranges(); + range->set_lowerbound(dim); + range->set_upperbound(dim); + } + + // Also set default shape for batch=min_batch_size + array_type->add_shape(options.min_batch_size); + for (int64_t dim : other_dims) { + array_type->add_shape(dim); + } + } else { + // Fixed batch size + array_type->add_shape(options.min_batch_size); + for (int64_t dim : other_dims) { + array_type->add_shape(dim); + } + } + }; + + // Determine data type for inputs/outputs + auto io_datatype = (options.compute_precision == "FLOAT16" && options.use_fp16_io) + ? CoreML::Specification::ArrayFeatureType::FLOAT16 + : CoreML::Specification::ArrayFeatureType::FLOAT32; + + // Add input descriptions + // spatial_input: [batch, num_input_channels, board_y, board_x] + auto* spatial_input = desc->add_input(); + spatial_input->set_name("spatial_input"); + auto* spatial_type = spatial_input->mutable_type()->mutable_multiarraytype(); + spatial_type->set_datatype(io_datatype); + setBatchShape(spatial_type, {options.num_input_channels, options.board_y_size, options.board_x_size}); + + // global_input: [batch, num_input_global_channels] + auto* global_input = desc->add_input(); + global_input->set_name("global_input"); + auto* global_type = global_input->mutable_type()->mutable_multiarraytype(); + global_type->set_datatype(io_datatype); + setBatchShape(global_type, {options.num_input_global_channels}); + + // input_mask: [batch, 1, board_y, board_x] + auto* mask_input = desc->add_input(); + mask_input->set_name("input_mask"); + auto* mask_type = mask_input->mutable_type()->mutable_multiarraytype(); + mask_type->set_datatype(io_datatype); + setBatchShape(mask_type, {1, options.board_y_size, options.board_x_size}); + + // meta_input (optional, for human SL networks with metadata encoder): [batch, num_meta_channels] + if (options.meta_encoder_version > 0 && options.num_input_meta_channels > 0) { + auto* meta_input = desc->add_input(); + meta_input->set_name("meta_input"); + auto* meta_type = meta_input->mutable_type()->mutable_multiarraytype(); + meta_type->set_datatype(io_datatype); + setBatchShape(meta_type, {options.num_input_meta_channels}); + } + + // Add output descriptions (names match Python coremltools converter) + auto* policy_output = desc->add_output(); + policy_output->set_name("policy_p2_conv"); + auto* policy_type = policy_output->mutable_type()->mutable_multiarraytype(); + policy_type->set_datatype(io_datatype); + + auto* pass_output = desc->add_output(); + // Pass output name: Python uses "policy_pass" for all model versions + pass_output->set_name("policy_pass"); + auto* pass_type = pass_output->mutable_type()->mutable_multiarraytype(); + pass_type->set_datatype(io_datatype); + + auto* value_output = desc->add_output(); + value_output->set_name("value_v3_bias"); + auto* value_type = value_output->mutable_type()->mutable_multiarraytype(); + value_type->set_datatype(io_datatype); + + auto* ownership_output = desc->add_output(); + ownership_output->set_name("value_ownership_conv"); + auto* ownership_type = ownership_output->mutable_type()->mutable_multiarraytype(); + ownership_type->set_datatype(io_datatype); + + auto* score_output = desc->add_output(); + score_output->set_name("value_sv3_bias"); + auto* score_type = score_output->mutable_type()->mutable_multiarraytype(); + score_type->set_datatype(io_datatype); + + // Set metadata + auto* metadata = desc->mutable_metadata(); + + // Build enhanced description: "KataGo - 10 blocks, 128 channels (from model.bin.gz)" + std::string description = "KataGo"; + if (options.num_blocks > 0 && options.trunk_channels > 0) { + description += " - " + std::to_string(options.num_blocks) + " blocks, " + + std::to_string(options.trunk_channels) + " channels"; + } else { + description += " neural network model"; + } + if (!options.source_filename.empty()) { + description += " (from " + options.source_filename + ")"; + } + metadata->set_shortdescription(description); + + // Set author if provided + if (!options.author.empty()) { + metadata->set_author(options.author); + } + + // Set license if provided + if (!options.license.empty()) { + metadata->set_license(options.license); + } + + // Set version string to model name + if (!options.model_name.empty()) { + metadata->set_versionstring(options.model_name); + } + + // User-defined metadata + auto& user_meta = *metadata->mutable_userdefined(); + user_meta["board_x_size"] = std::to_string(options.board_x_size); + user_meta["board_y_size"] = std::to_string(options.board_y_size); + user_meta["converter"] = "katagocoreml"; + user_meta["converter_version"] = VERSION; + + // Model info + user_meta["model_version"] = std::to_string(options.model_version); + if (options.meta_encoder_version > 0) { + user_meta["meta_encoder_version"] = std::to_string(options.meta_encoder_version); + } + user_meta["optimize_identity_mask"] = options.optimize_identity_mask ? "true" : "false"; + + // Precision info + user_meta["compute_precision"] = options.compute_precision; + user_meta["io_precision"] = options.use_fp16_io ? "FLOAT16" : "FLOAT32"; + + // Set the MIL program (use Swap to transfer ownership) + auto* ml_program = model->mutable_mlprogram(); + ml_program->Swap(program); + + return model; +} + +void CoreMLSerializer::writeWeightBlob(const std::string& weights_dir, + std::vector& weights, + bool use_fp16) { + std::filesystem::create_directories(weights_dir); + std::string blob_path = weights_dir + "/weight.bin"; + WeightSerializer::serialize(weights, blob_path, use_fp16); +} + +void CoreMLSerializer::createPackage(const std::string& output_path, + CoreML::Specification::Model* model, + const std::string& weights_dir) { + // Create package using MPL::ModelPackage + MPL::ModelPackage package(output_path, true, false); + + // Serialize model spec to temp file + std::filesystem::path temp_spec = std::filesystem::temp_directory_path() / "model.mlmodel"; + { + std::ofstream out(temp_spec, std::ios::binary); + if (!out) { + throw std::runtime_error("Failed to create temp model file"); + } + if (!model->SerializeToOstream(&out)) { + throw std::runtime_error("Failed to serialize model spec"); + } + } + + // Set root model + package.setRootModel(temp_spec.string(), "model.mlmodel", "com.apple.CoreML", "KataGo Core ML Model"); + + // Add weights + package.addItem(weights_dir, "weights", "com.apple.CoreML", "Model Weights"); + + // Cleanup temp file + std::filesystem::remove(temp_spec); +} + +void CoreMLSerializer::updateBlobOffsets(CoreML::Specification::MILSpec::Program* program, + const std::vector& weights) { + // Build a map from weight name to blob offset + std::unordered_map offset_map; + for (const auto& entry : weights) { + offset_map[entry.name] = entry.blob_offset; + } + + // Navigate through MIL program structure to find all blobfilevalue entries + // Structure: Program -> functions -> blocks -> operations -> attributes["val"] + for (auto& func_pair : *program->mutable_functions()) { + auto& func = func_pair.second; + for (auto& block_pair : *func.mutable_block_specializations()) { + auto& block = block_pair.second; + for (int op_idx = 0; op_idx < block.operations_size(); ++op_idx) { + auto* op = block.mutable_operations(op_idx); + // Check if this is a const operation + if (op->type() == "const") { + // Get the "val" attribute + auto* attrs = op->mutable_attributes(); + auto val_it = attrs->find("val"); + if (val_it != attrs->end()) { + auto& val = val_it->second; + // Check if it's a blobfilevalue + if (val.has_blobfilevalue()) { + // Get the output name to look up the offset + if (op->outputs_size() > 0) { + const std::string& output_name = op->outputs(0).name(); + auto offset_it = offset_map.find(output_name); + if (offset_it != offset_map.end()) { + val.mutable_blobfilevalue()->set_offset(offset_it->second); + } + } + } + } + } + } + } + } +} + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/serializer/CoreMLSerializer.hpp b/cpp/external/katagocoreml/src/serializer/CoreMLSerializer.hpp new file mode 100644 index 000000000..2828839d3 --- /dev/null +++ b/cpp/external/katagocoreml/src/serializer/CoreMLSerializer.hpp @@ -0,0 +1,54 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#pragma once + +#include "../builder/MILBuilder.hpp" +#include "katagocoreml/Options.hpp" +#include "Model.pb.h" +#include +#include + +namespace katagocoreml { + +/// Serializes MIL program to Core ML .mlpackage format +class CoreMLSerializer { +public: + /// Constructor + /// @param spec_version Core ML specification version (default: 6 for iOS 15+) + explicit CoreMLSerializer(int spec_version = 6); + + /// Serialize MIL program to .mlpackage + /// @param program The MIL program protobuf + /// @param weights Weight entries for blob serialization + /// @param output_path Path for .mlpackage directory + /// @param options Conversion options for metadata + void serialize(CoreML::Specification::MILSpec::Program* program, + std::vector& weights, + const std::string& output_path, + const ConversionOptions& options); + +private: + int m_spec_version; + + /// Create the top-level Model protobuf wrapping the MIL program + std::unique_ptr createModelSpec( + CoreML::Specification::MILSpec::Program* program, + const ConversionOptions& options); + + /// Write weight blob file + void writeWeightBlob(const std::string& weights_dir, + std::vector& weights, + bool use_fp16); + + /// Create .mlpackage directory structure + void createPackage(const std::string& output_path, + CoreML::Specification::Model* model, + const std::string& weights_dir); + + /// Update blob offsets in MIL program after weights are serialized + void updateBlobOffsets(CoreML::Specification::MILSpec::Program* program, + const std::vector& weights); +}; + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/serializer/WeightSerializer.cpp b/cpp/external/katagocoreml/src/serializer/WeightSerializer.cpp new file mode 100644 index 000000000..2ac23a3da --- /dev/null +++ b/cpp/external/katagocoreml/src/serializer/WeightSerializer.cpp @@ -0,0 +1,38 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#include "WeightSerializer.hpp" +#include "MILBlob/Blob/StorageWriter.hpp" +#include "MILBlob/Fp16.hpp" +#include "MILBlob/Util/Span.hpp" + +namespace katagocoreml { + +size_t WeightSerializer::serialize(std::vector& weights, + const std::string& blob_path, + bool use_fp16) { + MILBlob::Blob::StorageWriter writer(blob_path, true); + size_t total_bytes = 0; + + for (auto& entry : weights) { + if (use_fp16) { + // Convert FP32 weights to FP16 + std::vector fp16_data(entry.data.size()); + for (size_t i = 0; i < entry.data.size(); ++i) { + fp16_data[i] = MILBlob::Fp16::FromFloat(entry.data[i]); + } + MILBlob::Util::Span span(fp16_data.data(), fp16_data.size()); + entry.blob_offset = writer.WriteData(span); + total_bytes += entry.data.size() * sizeof(MILBlob::Fp16); + } else { + // Write FP32 weights + MILBlob::Util::Span span(entry.data.data(), entry.data.size()); + entry.blob_offset = writer.WriteData(span); + total_bytes += entry.data.size() * sizeof(float); + } + } + + return total_bytes; +} + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/serializer/WeightSerializer.hpp b/cpp/external/katagocoreml/src/serializer/WeightSerializer.hpp new file mode 100644 index 000000000..e561ff442 --- /dev/null +++ b/cpp/external/katagocoreml/src/serializer/WeightSerializer.hpp @@ -0,0 +1,25 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#pragma once + +#include "../builder/Operations.hpp" +#include +#include + +namespace katagocoreml { + +/// Serializes model weights to MIL blob storage format +class WeightSerializer { +public: + /// Write weights to blob file + /// @param weights Vector of weight entries to serialize + /// @param blob_path Path to output blob file + /// @param use_fp16 If true, convert weights to FLOAT16 + /// @return Total bytes written + static size_t serialize(std::vector& weights, + const std::string& blob_path, + bool use_fp16 = false); +}; + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/src/types/KataGoTypes.hpp b/cpp/external/katagocoreml/src/types/KataGoTypes.hpp new file mode 100644 index 000000000..284b26cd3 --- /dev/null +++ b/cpp/external/katagocoreml/src/types/KataGoTypes.hpp @@ -0,0 +1,297 @@ +// katagocoreml - Standalone C++ KataGo to Core ML Converter +// Copyright (c) 2025, Chin-Chang Yang + +#pragma once + +#include +#include +#include +#include +#include + +namespace katagocoreml { + +// ============================================================================ +// Activation Types +// ============================================================================ + +/// Activation function types used in KataGo models +enum class ActivationType : int { + Identity = 0, + ReLU = 1, + Mish = 2 + // MISH_SCALE8 = 12 is internal optimization, treated as Mish +}; + +// ============================================================================ +// Block Kind Constants +// ============================================================================ + +/// Block kind constants (matching KataGo's desc.h) +constexpr int ORDINARY_BLOCK_KIND = 0; +constexpr int GLOBAL_POOLING_BLOCK_KIND = 2; +constexpr int NESTED_BOTTLENECK_BLOCK_KIND = 3; + +// ============================================================================ +// Layer Descriptors +// ============================================================================ + +/// Convolutional layer descriptor +struct ConvLayerDesc { + std::string name; + int conv_y_size = 0; + int conv_x_size = 0; + int in_channels = 0; + int out_channels = 0; + int dilation_y = 1; + int dilation_x = 1; + std::vector weights; // Shape: [out_channels, in_channels, y, x] (OIHW) + + /// Get weight shape as vector + std::vector getWeightShape() const { + return {out_channels, in_channels, conv_y_size, conv_x_size}; + } +}; + +/// Batch normalization layer descriptor +/// KataGo pre-computes merged scale and bias for efficiency: +/// merged_scale = scale / sqrt(variance + epsilon) +/// merged_bias = bias - mean * merged_scale +struct BatchNormLayerDesc { + std::string name; + int num_channels = 0; + float epsilon = 1e-5f; + bool has_scale = true; + bool has_bias = true; + std::vector mean; + std::vector variance; + std::vector scale; + std::vector bias; + std::vector merged_scale; // Pre-computed + std::vector merged_bias; // Pre-computed +}; + +/// Activation layer descriptor +struct ActivationLayerDesc { + std::string name; + ActivationType activation_type = ActivationType::ReLU; +}; + +/// Matrix multiplication (fully connected) layer descriptor +/// Computes: output = input @ weights +struct MatMulLayerDesc { + std::string name; + int in_channels = 0; + int out_channels = 0; + std::vector weights; // Shape: [in_channels, out_channels] + + std::vector getWeightShape() const { + return {in_channels, out_channels}; + } +}; + +/// Bias addition layer descriptor +/// Computes: output = input + bias +struct MatBiasLayerDesc { + std::string name; + int num_channels = 0; + std::vector weights; // Shape: [num_channels] +}; + +// ============================================================================ +// Block Descriptors +// ============================================================================ + +/// Forward declarations for recursive block types +struct ResidualBlockDesc; +struct GlobalPoolingResidualBlockDesc; +struct NestedBottleneckResidualBlockDesc; + +/// Block descriptor variant +using BlockDesc = std::variant< + ResidualBlockDesc, + GlobalPoolingResidualBlockDesc, + NestedBottleneckResidualBlockDesc +>; + +/// Block with its kind +struct BlockEntry { + int block_kind = ORDINARY_BLOCK_KIND; + std::shared_ptr block; +}; + +/// Standard residual block descriptor +/// Architecture: +/// input -> preBN -> preActivation -> regularConv -> +/// midBN -> midActivation -> finalConv -> + input +struct ResidualBlockDesc { + std::string name; + BatchNormLayerDesc pre_bn; + ActivationLayerDesc pre_activation; + ConvLayerDesc regular_conv; + BatchNormLayerDesc mid_bn; + ActivationLayerDesc mid_activation; + ConvLayerDesc final_conv; +}; + +/// Global pooling residual block descriptor +/// Similar to ResidualBlock but includes a global pooling path +struct GlobalPoolingResidualBlockDesc { + std::string name; + int model_version = 0; + BatchNormLayerDesc pre_bn; + ActivationLayerDesc pre_activation; + ConvLayerDesc regular_conv; + ConvLayerDesc gpool_conv; + BatchNormLayerDesc gpool_bn; + ActivationLayerDesc gpool_activation; + MatMulLayerDesc gpool_to_bias_mul; + BatchNormLayerDesc mid_bn; + ActivationLayerDesc mid_activation; + ConvLayerDesc final_conv; +}; + +/// Nested bottleneck residual block descriptor +/// A bottleneck block that can contain other blocks inside it +struct NestedBottleneckResidualBlockDesc { + std::string name; + int num_blocks = 0; + BatchNormLayerDesc pre_bn; + ActivationLayerDesc pre_activation; + ConvLayerDesc pre_conv; + std::vector blocks; + BatchNormLayerDesc post_bn; + ActivationLayerDesc post_activation; + ConvLayerDesc post_conv; +}; + +// ============================================================================ +// SGF Metadata Encoder (v15+) +// ============================================================================ + +/// SGF metadata encoder descriptor (model version >= 15) +/// Encodes game metadata through a 3-layer MLP +struct SGFMetadataEncoderDesc { + std::string name; + int meta_encoder_version = 0; + int num_input_meta_channels = 0; + MatMulLayerDesc mul1; + MatBiasLayerDesc bias1; + ActivationLayerDesc act1; + MatMulLayerDesc mul2; + MatBiasLayerDesc bias2; + ActivationLayerDesc act2; + MatMulLayerDesc mul3; +}; + +// ============================================================================ +// Network Component Descriptors +// ============================================================================ + +/// Trunk (backbone) network descriptor +struct TrunkDesc { + std::string name; + int model_version = 0; + int num_blocks = 0; + int trunk_num_channels = 0; + int mid_num_channels = 0; + int regular_num_channels = 0; + int gpool_num_channels = 0; + int meta_encoder_version = 0; + ConvLayerDesc initial_conv; + MatMulLayerDesc initial_matmul; + std::optional sgf_metadata_encoder; + std::vector blocks; + BatchNormLayerDesc trunk_tip_bn; + ActivationLayerDesc trunk_tip_activation; +}; + +/// Policy head descriptor +struct PolicyHeadDesc { + std::string name; + int model_version = 0; + int policy_out_channels = 0; + ConvLayerDesc p1_conv; + ConvLayerDesc g1_conv; + BatchNormLayerDesc g1_bn; + ActivationLayerDesc g1_activation; + MatMulLayerDesc gpool_to_bias_mul; + BatchNormLayerDesc p1_bn; + ActivationLayerDesc p1_activation; + ConvLayerDesc p2_conv; + MatMulLayerDesc gpool_to_pass_mul; + std::optional gpool_to_pass_bias; // v15+ + std::optional pass_activation; // v15+ + std::optional gpool_to_pass_mul2; // v15+ +}; + +/// Value head descriptor +struct ValueHeadDesc { + std::string name; + int model_version = 0; + ConvLayerDesc v1_conv; + BatchNormLayerDesc v1_bn; + ActivationLayerDesc v1_activation; + MatMulLayerDesc v2_mul; + MatBiasLayerDesc v2_bias; + ActivationLayerDesc v2_activation; + MatMulLayerDesc v3_mul; + MatBiasLayerDesc v3_bias; + MatMulLayerDesc sv3_mul; + MatBiasLayerDesc sv3_bias; + ConvLayerDesc v_ownership_conv; +}; + +// ============================================================================ +// Post-Processing Parameters +// ============================================================================ + +/// Post-processing parameters for model outputs (v13+) +struct ModelPostProcessParams { + float td_score_multiplier = 20.0f; + float score_mean_multiplier = 20.0f; + float score_stdev_multiplier = 20.0f; + float lead_multiplier = 20.0f; + float variance_time_multiplier = 40.0f; + float shortterm_value_error_multiplier = 0.25f; + float shortterm_score_error_multiplier = 30.0f; + float output_scale_multiplier = 1.0f; +}; + +// ============================================================================ +// Complete Model Descriptor +// ============================================================================ + +/// Complete KataGo model descriptor +struct KataGoModelDesc { + std::string name; + std::string sha256; + int model_version = 0; + int num_input_channels = 0; + int num_input_global_channels = 0; + int num_input_meta_channels = 0; + int num_policy_channels = 0; + int num_value_channels = 3; // Always 3: win/loss/noresult + int num_score_value_channels = 0; + int num_ownership_channels = 1; // Always 1 + int meta_encoder_version = 0; + ModelPostProcessParams post_process_params; + TrunkDesc trunk; + PolicyHeadDesc policy_head; + ValueHeadDesc value_head; + + /// Get number of policy channels based on model version + static int getPolicyChannels(int version) { + if (version >= 16) return 4; + if (version >= 12) return 2; + return 1; + } + + /// Get number of score value channels based on model version + static int getScoreValueChannels(int version) { + if (version >= 9) return 6; + return 4; + } +}; + +} // namespace katagocoreml diff --git a/cpp/external/katagocoreml/vendor/deps/FP16/LICENSE b/cpp/external/katagocoreml/vendor/deps/FP16/LICENSE new file mode 100644 index 000000000..eabec6c86 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/FP16/LICENSE @@ -0,0 +1,11 @@ +The MIT License (MIT) + +Copyright (c) 2017 Facebook Inc. +Copyright (c) 2017 Georgia Institute of Technology +Copyright 2019 Google LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cpp/external/katagocoreml/vendor/deps/FP16/README.md b/cpp/external/katagocoreml/vendor/deps/FP16/README.md new file mode 100644 index 000000000..6cba15862 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/FP16/README.md @@ -0,0 +1,20 @@ +# FP16 +Header-only library for conversion to/from half-precision floating point formats + +## Features + +- Supports IEEE and ARM alternative half-precision floating-point format + - Property converts infinities and NaNs + - Properly converts denormal numbers, even on systems without denormal support +- Header-only library, no installation or build required +- Compatible with C99 and C++11 +- Fully covered with unit tests and microbenchmarks + +## Acknowledgements + +[![HPC Garage logo](https://github.com/Maratyszcza/PeachPy/blob/master/logo/hpcgarage.png)](http://hpcgarage.org) +[![Georgia Tech College of Computing logo](https://github.com/Maratyszcza/PeachPy/blob/master/logo/college-of-computing.gif)](http://www.cse.gatech.edu/) + +The library is developed by [Marat Dukhan](http://www.maratdukhan.com) of Georgia Tech. FP16 is a research project at [Richard Vuduc](http://vuduc.org)'s HPC Garage lab in the Georgia Institute of Technology, College of Computing, School of Computational Science and Engineering. + +This material is based upon work supported by the U.S. National Science Foundation (NSF) Award Number 1339745. Any opinions, findings and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect those of NSF. diff --git a/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16.h b/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16.h new file mode 100644 index 000000000..9d7366e99 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16.h @@ -0,0 +1,11 @@ +#pragma once +#ifndef FP16_H +#define FP16_H + +#include + +#if defined(PSIMD_H) +#include +#endif + +#endif /* FP16_H */ diff --git a/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/bitcasts.h b/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/bitcasts.h new file mode 100644 index 000000000..86a4e22c4 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/bitcasts.h @@ -0,0 +1,92 @@ +#pragma once +#ifndef FP16_BITCASTS_H +#define FP16_BITCASTS_H + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include +#elif !defined(__OPENCL_VERSION__) + #include +#endif + +#if defined(__INTEL_COMPILER) + #include +#endif + +#if defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64)) + #include +#endif + + +static inline float fp32_from_bits(uint32_t w) { +#if defined(__OPENCL_VERSION__) + return as_float(w); +#elif defined(__CUDA_ARCH__) + return __uint_as_float((unsigned int) w); +#elif defined(__INTEL_COMPILER) + return _castu32_f32(w); +#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64)) + return _CopyFloatFromInt32((__int32) w); +#else + union { + uint32_t as_bits; + float as_value; + } fp32 = { w }; + return fp32.as_value; +#endif +} + +static inline uint32_t fp32_to_bits(float f) { +#if defined(__OPENCL_VERSION__) + return as_uint(f); +#elif defined(__CUDA_ARCH__) + return (uint32_t) __float_as_uint(f); +#elif defined(__INTEL_COMPILER) + return _castf32_u32(f); +#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64)) + return (uint32_t) _CopyInt32FromFloat(f); +#else + union { + float as_value; + uint32_t as_bits; + } fp32 = { f }; + return fp32.as_bits; +#endif +} + +static inline double fp64_from_bits(uint64_t w) { +#if defined(__OPENCL_VERSION__) + return as_double(w); +#elif defined(__CUDA_ARCH__) + return __longlong_as_double((long long) w); +#elif defined(__INTEL_COMPILER) + return _castu64_f64(w); +#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64)) + return _CopyDoubleFromInt64((__int64) w); +#else + union { + uint64_t as_bits; + double as_value; + } fp64 = { w }; + return fp64.as_value; +#endif +} + +static inline uint64_t fp64_to_bits(double f) { +#if defined(__OPENCL_VERSION__) + return as_ulong(f); +#elif defined(__CUDA_ARCH__) + return (uint64_t) __double_as_longlong(f); +#elif defined(__INTEL_COMPILER) + return _castf64_u64(f); +#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64)) + return (uint64_t) _CopyInt64FromDouble(f); +#else + union { + double as_value; + uint64_t as_bits; + } fp64 = { f }; + return fp64.as_bits; +#endif +} + +#endif /* FP16_BITCASTS_H */ diff --git a/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/fp16.h b/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/fp16.h new file mode 100644 index 000000000..b95aa15f5 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/fp16.h @@ -0,0 +1,451 @@ +#pragma once +#ifndef FP16_FP16_H +#define FP16_FP16_H + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include + #include +#elif !defined(__OPENCL_VERSION__) + #include + #include +#endif + +#ifdef _MSC_VER + #include +#endif + +#include + + +/* + * Convert a 16-bit floating-point number in IEEE half-precision format, in bit representation, to + * a 32-bit floating-point number in IEEE single-precision format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +static inline uint32_t fp16_ieee_to_fp32_bits(uint16_t h) { + /* + * Extend the half-precision floating-point number to 32 bits and shift to the upper part of the 32-bit word: + * +---+-----+------------+-------------------+ + * | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000| + * +---+-----+------------+-------------------+ + * Bits 31 26-30 16-25 0-15 + * + * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0 - zero bits. + */ + const uint32_t w = (uint32_t) h << 16; + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = w & UINT32_C(0x80000000); + /* + * Extract mantissa and biased exponent of the input number into the bits 0-30 of the 32-bit word: + * + * +---+-----+------------+-------------------+ + * | 0 |EEEEE|MM MMMM MMMM|0000 0000 0000 0000| + * +---+-----+------------+-------------------+ + * Bits 30 27-31 17-26 0-16 + */ + const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF); + /* + * Renorm shift is the number of bits to shift mantissa left to make the half-precision number normalized. + * If the initial number is normalized, some of its high 6 bits (sign == 0 and 5-bit exponent) equals one. + * In this case renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note that if we shift + * denormalized nonsign by renorm_shift, the unit bit of mantissa will shift into exponent, turning the + * biased exponent into 1, and making mantissa normalized (i.e. without leading 1). + */ +#ifdef _MSC_VER + unsigned long nonsign_bsr; + _BitScanReverse(&nonsign_bsr, (unsigned long) nonsign); + uint32_t renorm_shift = (uint32_t) nonsign_bsr ^ 31; +#else + uint32_t renorm_shift = __builtin_clz(nonsign); +#endif + renorm_shift = renorm_shift > 5 ? renorm_shift - 5 : 0; + /* + * Iff half-precision number has exponent of 15, the addition overflows it into bit 31, + * and the subsequent shift turns the high 9 bits into 1. Thus + * inf_nan_mask == + * 0x7F800000 if the half-precision number had exponent of 15 (i.e. was NaN or infinity) + * 0x00000000 otherwise + */ + const int32_t inf_nan_mask = ((int32_t) (nonsign + 0x04000000) >> 8) & INT32_C(0x7F800000); + /* + * Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31 into 1. Otherwise, bit 31 remains 0. + * The signed shift right by 31 broadcasts bit 31 into all bits of the zero_mask. Thus + * zero_mask == + * 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h) + * 0x00000000 otherwise + */ + const int32_t zero_mask = (int32_t) (nonsign - 1) >> 31; + /* + * 1. Shift nonsign left by renorm_shift to normalize it (if the input was denormal) + * 2. Shift nonsign right by 3 so the exponent (5 bits originally) becomes an 8-bit field and 10-bit mantissa + * shifts into the 10 high bits of the 23-bit mantissa of IEEE single-precision number. + * 3. Add 0x70 to the exponent (starting at bit 23) to compensate the different in exponent bias + * (0x7F for single-precision number less 0xF for half-precision number). + * 4. Subtract renorm_shift from the exponent (starting at bit 23) to account for renormalization. As renorm_shift + * is less than 0x70, this can be combined with step 3. + * 5. Binary OR with inf_nan_mask to turn the exponent into 0xFF if the input was NaN or infinity. + * 6. Binary ANDNOT with zero_mask to turn the mantissa and exponent into zero if the input was zero. + * 7. Combine with the sign of the input number. + */ + return sign | ((((nonsign << renorm_shift >> 3) + ((0x70 - renorm_shift) << 23)) | inf_nan_mask) & ~zero_mask); +} + +/* + * Convert a 16-bit floating-point number in IEEE half-precision format, in bit representation, to + * a 32-bit floating-point number in IEEE single-precision format. + * + * @note The implementation relies on IEEE-like (no assumption about rounding mode and no operations on denormals) + * floating-point operations and bitcasts between integer and floating-point variables. + */ +static inline float fp16_ieee_to_fp32_value(uint16_t h) { + /* + * Extend the half-precision floating-point number to 32 bits and shift to the upper part of the 32-bit word: + * +---+-----+------------+-------------------+ + * | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000| + * +---+-----+------------+-------------------+ + * Bits 31 26-30 16-25 0-15 + * + * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0 - zero bits. + */ + const uint32_t w = (uint32_t) h << 16; + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = w & UINT32_C(0x80000000); + /* + * Extract mantissa and biased exponent of the input number into the high bits of the 32-bit word: + * + * +-----+------------+---------------------+ + * |EEEEE|MM MMMM MMMM|0 0000 0000 0000 0000| + * +-----+------------+---------------------+ + * Bits 27-31 17-26 0-16 + */ + const uint32_t two_w = w + w; + + /* + * Shift mantissa and exponent into bits 23-28 and bits 13-22 so they become mantissa and exponent + * of a single-precision floating-point number: + * + * S|Exponent | Mantissa + * +-+---+-----+------------+----------------+ + * |0|000|EEEEE|MM MMMM MMMM|0 0000 0000 0000| + * +-+---+-----+------------+----------------+ + * Bits | 23-31 | 0-22 + * + * Next, there are some adjustments to the exponent: + * - The exponent needs to be corrected by the difference in exponent bias between single-precision and half-precision + * formats (0x7F - 0xF = 0x70) + * - Inf and NaN values in the inputs should become Inf and NaN values after conversion to the single-precision number. + * Therefore, if the biased exponent of the half-precision input was 0x1F (max possible value), the biased exponent + * of the single-precision output must be 0xFF (max possible value). We do this correction in two steps: + * - First, we adjust the exponent by (0xFF - 0x1F) = 0xE0 (see exp_offset below) rather than by 0x70 suggested + * by the difference in the exponent bias (see above). + * - Then we multiply the single-precision result of exponent adjustment by 2**(-112) to reverse the effect of + * exponent adjustment by 0xE0 less the necessary exponent adjustment by 0x70 due to difference in exponent bias. + * The floating-point multiplication hardware would ensure than Inf and NaN would retain their value on at least + * partially IEEE754-compliant implementations. + * + * Note that the above operations do not handle denormal inputs (where biased exponent == 0). However, they also do not + * operate on denormal inputs, and do not produce denormal results. + */ + const uint32_t exp_offset = UINT32_C(0xE0) << 23; +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) + const float exp_scale = 0x1.0p-112f; +#else + const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); +#endif + const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; + + /* + * Convert denormalized half-precision inputs into single-precision results (always normalized). + * Zero inputs are also handled here. + * + * In a denormalized number the biased exponent is zero, and mantissa has on-zero bits. + * First, we shift mantissa into bits 0-9 of the 32-bit word. + * + * zeros | mantissa + * +---------------------------+------------+ + * |0000 0000 0000 0000 0000 00|MM MMMM MMMM| + * +---------------------------+------------+ + * Bits 10-31 0-9 + * + * Now, remember that denormalized half-precision numbers are represented as: + * FP16 = mantissa * 2**(-24). + * The trick is to construct a normalized single-precision number with the same mantissa and thehalf-precision input + * and with an exponent which would scale the corresponding mantissa bits to 2**(-24). + * A normalized single-precision floating-point number is represented as: + * FP32 = (1 + mantissa * 2**(-23)) * 2**(exponent - 127) + * Therefore, when the biased exponent is 126, a unit change in the mantissa of the input denormalized half-precision + * number causes a change of the constructud single-precision number by 2**(-24), i.e. the same ammount. + * + * The last step is to adjust the bias of the constructed single-precision number. When the input half-precision number + * is zero, the constructed single-precision number has the value of + * FP32 = 1 * 2**(126 - 127) = 2**(-1) = 0.5 + * Therefore, we need to subtract 0.5 from the constructed single-precision number to get the numerical equivalent of + * the input half-precision number. + */ + const uint32_t magic_mask = UINT32_C(126) << 23; + const float magic_bias = 0.5f; + const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; + + /* + * - Choose either results of conversion of input as a normalized number, or as a denormalized number, depending on the + * input exponent. The variable two_w contains input exponent in bits 27-31, therefore if its smaller than 2**27, the + * input is either a denormal number, or zero. + * - Combine the result of conversion of exponent and mantissa with the sign of the input number. + */ + const uint32_t denormalized_cutoff = UINT32_C(1) << 27; + const uint32_t result = sign | + (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); + return fp32_from_bits(result); +} + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a 16-bit floating-point number in + * IEEE half-precision format, in bit representation. + * + * @note The implementation relies on IEEE-like (no assumption about rounding mode and no operations on denormals) + * floating-point operations and bitcasts between integer and floating-point variables. + */ +static inline uint16_t fp16_ieee_from_fp32_value(float f) { +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) + const float scale_to_inf = 0x1.0p+112f; + const float scale_to_zero = 0x1.0p-110f; +#else + const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); + const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); +#endif + float base = (fabsf(f) * scale_to_inf) * scale_to_zero; + + const uint32_t w = fp32_to_bits(f); + const uint32_t shl1_w = w + w; + const uint32_t sign = w & UINT32_C(0x80000000); + uint32_t bias = shl1_w & UINT32_C(0xFF000000); + if (bias < UINT32_C(0x71000000)) { + bias = UINT32_C(0x71000000); + } + + base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; + const uint32_t bits = fp32_to_bits(base); + const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); + const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); + const uint32_t nonsign = exp_bits + mantissa_bits; + return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); +} + +/* + * Convert a 16-bit floating-point number in ARM alternative half-precision format, in bit representation, to + * a 32-bit floating-point number in IEEE single-precision format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +static inline uint32_t fp16_alt_to_fp32_bits(uint16_t h) { + /* + * Extend the half-precision floating-point number to 32 bits and shift to the upper part of the 32-bit word: + * +---+-----+------------+-------------------+ + * | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000| + * +---+-----+------------+-------------------+ + * Bits 31 26-30 16-25 0-15 + * + * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0 - zero bits. + */ + const uint32_t w = (uint32_t) h << 16; + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = w & UINT32_C(0x80000000); + /* + * Extract mantissa and biased exponent of the input number into the bits 0-30 of the 32-bit word: + * + * +---+-----+------------+-------------------+ + * | 0 |EEEEE|MM MMMM MMMM|0000 0000 0000 0000| + * +---+-----+------------+-------------------+ + * Bits 30 27-31 17-26 0-16 + */ + const uint32_t nonsign = w & UINT32_C(0x7FFFFFFF); + /* + * Renorm shift is the number of bits to shift mantissa left to make the half-precision number normalized. + * If the initial number is normalized, some of its high 6 bits (sign == 0 and 5-bit exponent) equals one. + * In this case renorm_shift == 0. If the number is denormalize, renorm_shift > 0. Note that if we shift + * denormalized nonsign by renorm_shift, the unit bit of mantissa will shift into exponent, turning the + * biased exponent into 1, and making mantissa normalized (i.e. without leading 1). + */ +#ifdef _MSC_VER + unsigned long nonsign_bsr; + _BitScanReverse(&nonsign_bsr, (unsigned long) nonsign); + uint32_t renorm_shift = (uint32_t) nonsign_bsr ^ 31; +#else + uint32_t renorm_shift = __builtin_clz(nonsign); +#endif + renorm_shift = renorm_shift > 5 ? renorm_shift - 5 : 0; + /* + * Iff nonsign is 0, it overflows into 0xFFFFFFFF, turning bit 31 into 1. Otherwise, bit 31 remains 0. + * The signed shift right by 31 broadcasts bit 31 into all bits of the zero_mask. Thus + * zero_mask == + * 0xFFFFFFFF if the half-precision number was zero (+0.0h or -0.0h) + * 0x00000000 otherwise + */ + const int32_t zero_mask = (int32_t) (nonsign - 1) >> 31; + /* + * 1. Shift nonsign left by renorm_shift to normalize it (if the input was denormal) + * 2. Shift nonsign right by 3 so the exponent (5 bits originally) becomes an 8-bit field and 10-bit mantissa + * shifts into the 10 high bits of the 23-bit mantissa of IEEE single-precision number. + * 3. Add 0x70 to the exponent (starting at bit 23) to compensate the different in exponent bias + * (0x7F for single-precision number less 0xF for half-precision number). + * 4. Subtract renorm_shift from the exponent (starting at bit 23) to account for renormalization. As renorm_shift + * is less than 0x70, this can be combined with step 3. + * 5. Binary ANDNOT with zero_mask to turn the mantissa and exponent into zero if the input was zero. + * 6. Combine with the sign of the input number. + */ + return sign | (((nonsign << renorm_shift >> 3) + ((0x70 - renorm_shift) << 23)) & ~zero_mask); +} + +/* + * Convert a 16-bit floating-point number in ARM alternative half-precision format, in bit representation, to + * a 32-bit floating-point number in IEEE single-precision format. + * + * @note The implementation relies on IEEE-like (no assumption about rounding mode and no operations on denormals) + * floating-point operations and bitcasts between integer and floating-point variables. + */ +static inline float fp16_alt_to_fp32_value(uint16_t h) { + /* + * Extend the half-precision floating-point number to 32 bits and shift to the upper part of the 32-bit word: + * +---+-----+------------+-------------------+ + * | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000| + * +---+-----+------------+-------------------+ + * Bits 31 26-30 16-25 0-15 + * + * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0 - zero bits. + */ + const uint32_t w = (uint32_t) h << 16; + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = w & UINT32_C(0x80000000); + /* + * Extract mantissa and biased exponent of the input number into the high bits of the 32-bit word: + * + * +-----+------------+---------------------+ + * |EEEEE|MM MMMM MMMM|0 0000 0000 0000 0000| + * +-----+------------+---------------------+ + * Bits 27-31 17-26 0-16 + */ + const uint32_t two_w = w + w; + + /* + * Shift mantissa and exponent into bits 23-28 and bits 13-22 so they become mantissa and exponent + * of a single-precision floating-point number: + * + * S|Exponent | Mantissa + * +-+---+-----+------------+----------------+ + * |0|000|EEEEE|MM MMMM MMMM|0 0000 0000 0000| + * +-+---+-----+------------+----------------+ + * Bits | 23-31 | 0-22 + * + * Next, the exponent is adjusted for the difference in exponent bias between single-precision and half-precision + * formats (0x7F - 0xF = 0x70). This operation never overflows or generates non-finite values, as the largest + * half-precision exponent is 0x1F and after the adjustment is can not exceed 0x8F < 0xFE (largest single-precision + * exponent for non-finite values). + * + * Note that this operation does not handle denormal inputs (where biased exponent == 0). However, they also do not + * operate on denormal inputs, and do not produce denormal results. + */ + const uint32_t exp_offset = UINT32_C(0x70) << 23; + const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset); + + /* + * Convert denormalized half-precision inputs into single-precision results (always normalized). + * Zero inputs are also handled here. + * + * In a denormalized number the biased exponent is zero, and mantissa has on-zero bits. + * First, we shift mantissa into bits 0-9 of the 32-bit word. + * + * zeros | mantissa + * +---------------------------+------------+ + * |0000 0000 0000 0000 0000 00|MM MMMM MMMM| + * +---------------------------+------------+ + * Bits 10-31 0-9 + * + * Now, remember that denormalized half-precision numbers are represented as: + * FP16 = mantissa * 2**(-24). + * The trick is to construct a normalized single-precision number with the same mantissa and thehalf-precision input + * and with an exponent which would scale the corresponding mantissa bits to 2**(-24). + * A normalized single-precision floating-point number is represented as: + * FP32 = (1 + mantissa * 2**(-23)) * 2**(exponent - 127) + * Therefore, when the biased exponent is 126, a unit change in the mantissa of the input denormalized half-precision + * number causes a change of the constructud single-precision number by 2**(-24), i.e. the same ammount. + * + * The last step is to adjust the bias of the constructed single-precision number. When the input half-precision number + * is zero, the constructed single-precision number has the value of + * FP32 = 1 * 2**(126 - 127) = 2**(-1) = 0.5 + * Therefore, we need to subtract 0.5 from the constructed single-precision number to get the numerical equivalent of + * the input half-precision number. + */ + const uint32_t magic_mask = UINT32_C(126) << 23; + const float magic_bias = 0.5f; + const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; + + /* + * - Choose either results of conversion of input as a normalized number, or as a denormalized number, depending on the + * input exponent. The variable two_w contains input exponent in bits 27-31, therefore if its smaller than 2**27, the + * input is either a denormal number, or zero. + * - Combine the result of conversion of exponent and mantissa with the sign of the input number. + */ + const uint32_t denormalized_cutoff = UINT32_C(1) << 27; + const uint32_t result = sign | + (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); + return fp32_from_bits(result); +} + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a 16-bit floating-point number in + * ARM alternative half-precision format, in bit representation. + * + * @note The implementation relies on IEEE-like (no assumption about rounding mode and no operations on denormals) + * floating-point operations and bitcasts between integer and floating-point variables. + */ +static inline uint16_t fp16_alt_from_fp32_value(float f) { + const uint32_t w = fp32_to_bits(f); + const uint32_t sign = w & UINT32_C(0x80000000); + const uint32_t shl1_w = w + w; + + const uint32_t shl1_max_fp16_fp32 = UINT32_C(0x8FFFC000); + const uint32_t shl1_base = shl1_w > shl1_max_fp16_fp32 ? shl1_max_fp16_fp32 : shl1_w; + uint32_t shl1_bias = shl1_base & UINT32_C(0xFF000000); + const uint32_t exp_difference = 23 - 10; + const uint32_t shl1_bias_min = (127 - 1 - exp_difference) << 24; + if (shl1_bias < shl1_bias_min) { + shl1_bias = shl1_bias_min; + } + + const float bias = fp32_from_bits((shl1_bias >> 1) + ((exp_difference + 2) << 23)); + const float base = fp32_from_bits((shl1_base >> 1) + (2 << 23)) + bias; + + const uint32_t exp_f = fp32_to_bits(base) >> 13; + return (sign >> 16) | ((exp_f & UINT32_C(0x00007C00)) + (fp32_to_bits(base) & UINT32_C(0x00000FFF))); +} + +#endif /* FP16_FP16_H */ diff --git a/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/psimd.h b/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/psimd.h new file mode 100644 index 000000000..428ab0651 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/FP16/include/fp16/psimd.h @@ -0,0 +1,131 @@ +#pragma once +#ifndef FP16_PSIMD_H +#define FP16_PSIMD_H + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include +#elif !defined(__OPENCL_VERSION__) + #include +#endif + +#include + + +PSIMD_INTRINSIC psimd_f32 fp16_ieee_to_fp32_psimd(psimd_u16 half) { + const psimd_u32 word = (psimd_u32) psimd_interleave_lo_u16(psimd_zero_u16(), half); + + const psimd_u32 sign = word & psimd_splat_u32(UINT32_C(0x80000000)); + const psimd_u32 shr3_nonsign = (word + word) >> psimd_splat_u32(4); + + const psimd_u32 exp_offset = psimd_splat_u32(UINT32_C(0x70000000)); +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) + const psimd_f32 exp_scale = psimd_splat_f32(0x1.0p-112f); +#else + const psimd_f32 exp_scale = psimd_splat_f32(fp32_from_bits(UINT32_C(0x7800000))); +#endif + const psimd_f32 norm_nonsign = psimd_mul_f32((psimd_f32) (shr3_nonsign + exp_offset), exp_scale); + + const psimd_u16 magic_mask = psimd_splat_u16(UINT16_C(0x3E80)); + const psimd_f32 magic_bias = psimd_splat_f32(0.25f); + const psimd_f32 denorm_nonsign = psimd_sub_f32((psimd_f32) psimd_interleave_lo_u16(half + half, magic_mask), magic_bias); + + const psimd_s32 denorm_cutoff = psimd_splat_s32(INT32_C(0x00800000)); + const psimd_s32 denorm_mask = (psimd_s32) shr3_nonsign < denorm_cutoff; + return (psimd_f32) (sign | (psimd_s32) psimd_blend_f32(denorm_mask, denorm_nonsign, norm_nonsign)); +} + +PSIMD_INTRINSIC psimd_f32x2 fp16_ieee_to_fp32x2_psimd(psimd_u16 half) { + const psimd_u32 word_lo = (psimd_u32) psimd_interleave_lo_u16(psimd_zero_u16(), half); + const psimd_u32 word_hi = (psimd_u32) psimd_interleave_hi_u16(psimd_zero_u16(), half); + + const psimd_u32 sign_mask = psimd_splat_u32(UINT32_C(0x80000000)); + const psimd_u32 sign_lo = word_lo & sign_mask; + const psimd_u32 sign_hi = word_hi & sign_mask; + const psimd_u32 shr3_nonsign_lo = (word_lo + word_lo) >> psimd_splat_u32(4); + const psimd_u32 shr3_nonsign_hi = (word_hi + word_hi) >> psimd_splat_u32(4); + + const psimd_u32 exp_offset = psimd_splat_u32(UINT32_C(0x70000000)); +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) + const psimd_f32 exp_scale = psimd_splat_f32(0x1.0p-112f); +#else + const psimd_f32 exp_scale = psimd_splat_f32(fp32_from_bits(UINT32_C(0x7800000))); +#endif + const psimd_f32 norm_nonsign_lo = psimd_mul_f32((psimd_f32) (shr3_nonsign_lo + exp_offset), exp_scale); + const psimd_f32 norm_nonsign_hi = psimd_mul_f32((psimd_f32) (shr3_nonsign_hi + exp_offset), exp_scale); + + const psimd_u16 magic_mask = psimd_splat_u16(UINT16_C(0x3E80)); + const psimd_u16 shl1_half = half + half; + const psimd_f32 magic_bias = psimd_splat_f32(0.25f); + const psimd_f32 denorm_nonsign_lo = psimd_sub_f32((psimd_f32) psimd_interleave_lo_u16(shl1_half, magic_mask), magic_bias); + const psimd_f32 denorm_nonsign_hi = psimd_sub_f32((psimd_f32) psimd_interleave_hi_u16(shl1_half, magic_mask), magic_bias); + + const psimd_s32 denorm_cutoff = psimd_splat_s32(INT32_C(0x00800000)); + const psimd_s32 denorm_mask_lo = (psimd_s32) shr3_nonsign_lo < denorm_cutoff; + const psimd_s32 denorm_mask_hi = (psimd_s32) shr3_nonsign_hi < denorm_cutoff; + + psimd_f32x2 result; + result.lo = (psimd_f32) (sign_lo | (psimd_s32) psimd_blend_f32(denorm_mask_lo, denorm_nonsign_lo, norm_nonsign_lo)); + result.hi = (psimd_f32) (sign_hi | (psimd_s32) psimd_blend_f32(denorm_mask_hi, denorm_nonsign_hi, norm_nonsign_hi)); + return result; +} + +PSIMD_INTRINSIC psimd_f32 fp16_alt_to_fp32_psimd(psimd_u16 half) { + const psimd_u32 word = (psimd_u32) psimd_interleave_lo_u16(psimd_zero_u16(), half); + + const psimd_u32 sign = word & psimd_splat_u32(INT32_C(0x80000000)); + const psimd_u32 shr3_nonsign = (word + word) >> psimd_splat_u32(4); + +#if 0 + const psimd_s32 exp112_offset = psimd_splat_s32(INT32_C(0x38000000)); + const psimd_s32 nonsign_bits = (psimd_s32) shr3_nonsign + exp112_offset; + const psimd_s32 exp1_offset = psimd_splat_s32(INT32_C(0x00800000)); + const psimd_f32 two_nonsign = (psimd_f32) (nonsign_bits + exp1_offset); + const psimd_s32 exp113_offset = exp112_offset | exp1_offset; + return (psimd_f32) (sign | (psimd_s32) psimd_sub_f32(two_nonsign, (psimd_f32) psimd_max_s32(nonsign_bits, exp113_offset))); +#else + const psimd_u32 exp_offset = psimd_splat_u32(UINT32_C(0x38000000)); + const psimd_f32 nonsign = (psimd_f32) (shr3_nonsign + exp_offset); +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) + const psimd_f32 denorm_bias = psimd_splat_f32(0x1.0p-14f); +#else + const psimd_f32 denorm_bias = psimd_splat_f32(fp32_from_bits(UINT32_C(0x38800000))); +#endif + return (psimd_f32) (sign | (psimd_s32) psimd_sub_f32(psimd_add_f32(nonsign, nonsign), psimd_max_f32(nonsign, denorm_bias))); +#endif +} + +PSIMD_INTRINSIC psimd_f32x2 fp16_alt_to_fp32x2_psimd(psimd_u16 half) { + const psimd_u32 word_lo = (psimd_u32) psimd_interleave_lo_u16(psimd_zero_u16(), half); + const psimd_u32 word_hi = (psimd_u32) psimd_interleave_hi_u16(psimd_zero_u16(), half); + + const psimd_u32 sign_mask = psimd_splat_u32(UINT32_C(0x80000000)); + const psimd_u32 sign_lo = word_lo & sign_mask; + const psimd_u32 sign_hi = word_hi & sign_mask; + const psimd_u32 shr3_nonsign_lo = (word_lo + word_lo) >> psimd_splat_u32(4); + const psimd_u32 shr3_nonsign_hi = (word_hi + word_hi) >> psimd_splat_u32(4); + +#if 1 + const psimd_s32 exp112_offset = psimd_splat_s32(INT32_C(0x38000000)); + const psimd_s32 nonsign_bits_lo = (psimd_s32) shr3_nonsign_lo + exp112_offset; + const psimd_s32 nonsign_bits_hi = (psimd_s32) shr3_nonsign_hi + exp112_offset; + const psimd_s32 exp1_offset = psimd_splat_s32(INT32_C(0x00800000)); + const psimd_f32 two_nonsign_lo = (psimd_f32) (nonsign_bits_lo + exp1_offset); + const psimd_f32 two_nonsign_hi = (psimd_f32) (nonsign_bits_hi + exp1_offset); + const psimd_s32 exp113_offset = exp1_offset | exp112_offset; + psimd_f32x2 result; + result.lo = (psimd_f32) (sign_lo | (psimd_s32) psimd_sub_f32(two_nonsign_lo, (psimd_f32) psimd_max_s32(nonsign_bits_lo, exp113_offset))); + result.hi = (psimd_f32) (sign_hi | (psimd_s32) psimd_sub_f32(two_nonsign_hi, (psimd_f32) psimd_max_s32(nonsign_bits_hi, exp113_offset))); + return result; +#else + const psimd_u32 exp_offset = psimd_splat_u32(UINT32_C(0x38000000)); + const psimd_f32 nonsign_lo = (psimd_f32) (shr3_nonsign_lo + exp_offset); + const psimd_f32 nonsign_hi = (psimd_f32) (shr3_nonsign_hi + exp_offset); + const psimd_f32 denorm_bias = psimd_splat_f32(0x1.0p-14f); + psimd_f32x2 result; + result.lo = (psimd_f32) (sign_lo | (psimd_s32) psimd_sub_f32(psimd_add_f32(nonsign_lo, nonsign_lo), psimd_max_f32(nonsign_lo, denorm_bias))); + result.hi = (psimd_f32) (sign_hi | (psimd_s32) psimd_sub_f32(psimd_add_f32(nonsign_hi, nonsign_hi), psimd_max_f32(nonsign_hi, denorm_bias))); + return result; +#endif +} + +#endif /* FP16_PSIMD_H */ diff --git a/cpp/external/katagocoreml/vendor/deps/nlohmann/CODE_OF_CONDUCT.md b/cpp/external/katagocoreml/vendor/deps/nlohmann/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..770b8173e --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/nlohmann/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at mail@nlohmann.me. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/cpp/external/katagocoreml/vendor/deps/nlohmann/LICENSE.MIT b/cpp/external/katagocoreml/vendor/deps/nlohmann/LICENSE.MIT new file mode 100644 index 000000000..f0622d6dc --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/nlohmann/LICENSE.MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2013-2021 Niels Lohmann + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/cpp/external/katagocoreml/vendor/deps/nlohmann/README.md b/cpp/external/katagocoreml/vendor/deps/nlohmann/README.md new file mode 100644 index 000000000..5d354f1ed --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/nlohmann/README.md @@ -0,0 +1,1643 @@ +[![JSON for Modern C++](https://raw.githubusercontent.com/nlohmann/json/master/doc/json.gif)](https://github.com/nlohmann/json/releases) + +[![Build Status](https://travis-ci.org/nlohmann/json.svg?branch=master)](https://travis-ci.org/nlohmann/json) +[![Build Status](https://ci.appveyor.com/api/projects/status/1acb366xfyg3qybk/branch/develop?svg=true)](https://ci.appveyor.com/project/nlohmann/json) +[![Ubuntu](https://github.com/nlohmann/json/workflows/Ubuntu/badge.svg)](https://github.com/nlohmann/json/actions?query=workflow%3AUbuntu) +[![macOS](https://github.com/nlohmann/json/workflows/macOS/badge.svg)](https://github.com/nlohmann/json/actions?query=workflow%3AmacOS) +[![Windows](https://github.com/nlohmann/json/workflows/Windows/badge.svg)](https://github.com/nlohmann/json/actions?query=workflow%3AWindows) +[![Build Status](https://circleci.com/gh/nlohmann/json.svg?style=svg)](https://circleci.com/gh/nlohmann/json) +[![Coverage Status](https://coveralls.io/repos/github/nlohmann/json/badge.svg?branch=develop)](https://coveralls.io/github/nlohmann/json?branch=develop) +[![Coverity Scan Build Status](https://scan.coverity.com/projects/5550/badge.svg)](https://scan.coverity.com/projects/nlohmann-json) +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/f3732b3327e34358a0e9d1fe9f661f08)](https://www.codacy.com/app/nlohmann/json?utm_source=github.com&utm_medium=referral&utm_content=nlohmann/json&utm_campaign=Badge_Grade) +[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/nlohmann/json.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/nlohmann/json/context:cpp) +[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/json.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:json) +[![Try online](https://img.shields.io/badge/try-online-blue.svg)](https://wandbox.org/permlink/3lCHrFUZANONKv7a) +[![Documentation](https://img.shields.io/badge/docs-doxygen-blue.svg)](https://nlohmann.github.io/json/doxygen/index.html) +[![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/nlohmann/json/master/LICENSE.MIT) +[![GitHub Releases](https://img.shields.io/github/release/nlohmann/json.svg)](https://github.com/nlohmann/json/releases) +[![GitHub Downloads](https://img.shields.io/github/downloads/nlohmann/json/total)](https://github.com/nlohmann/json/releases) +[![GitHub Issues](https://img.shields.io/github/issues/nlohmann/json.svg)](https://github.com/nlohmann/json/issues) +[![Average time to resolve an issue](https://isitmaintained.com/badge/resolution/nlohmann/json.svg)](https://isitmaintained.com/project/nlohmann/json "Average time to resolve an issue") +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/289/badge)](https://bestpractices.coreinfrastructure.org/projects/289) +[![GitHub Sponsors](https://img.shields.io/badge/GitHub-Sponsors-ff69b4)](https://github.com/sponsors/nlohmann) + +- [Design goals](#design-goals) +- [Sponsors](#sponsors) +- [Integration](#integration) + - [CMake](#cmake) + - [Package Managers](#package-managers) + - [Pkg-config](#pkg-config) +- [Examples](#examples) + - [JSON as first-class data type](#json-as-first-class-data-type) + - [Serialization / Deserialization](#serialization--deserialization) + - [STL-like access](#stl-like-access) + - [Conversion from STL containers](#conversion-from-stl-containers) + - [JSON Pointer and JSON Patch](#json-pointer-and-json-patch) + - [JSON Merge Patch](#json-merge-patch) + - [Implicit conversions](#implicit-conversions) + - [Conversions to/from arbitrary types](#arbitrary-types-conversions) + - [Specializing enum conversion](#specializing-enum-conversion) + - [Binary formats (BSON, CBOR, MessagePack, and UBJSON)](#binary-formats-bson-cbor-messagepack-and-ubjson) +- [Supported compilers](#supported-compilers) +- [License](#license) +- [Contact](#contact) +- [Thanks](#thanks) +- [Used third-party tools](#used-third-party-tools) +- [Projects using JSON for Modern C++](#projects-using-json-for-modern-c) +- [Notes](#notes) +- [Execute unit tests](#execute-unit-tests) + +## Design goals + +There are myriads of [JSON](https://json.org) libraries out there, and each may even have its reason to exist. Our class had these design goals: + +- **Intuitive syntax**. In languages such as Python, JSON feels like a first class data type. We used all the operator magic of modern C++ to achieve the same feeling in your code. Check out the [examples below](#examples) and you'll know what I mean. + +- **Trivial integration**. Our whole code consists of a single header file [`json.hpp`](https://github.com/nlohmann/json/blob/develop/single_include/nlohmann/json.hpp). That's it. No library, no subproject, no dependencies, no complex build system. The class is written in vanilla C++11. All in all, everything should require no adjustment of your compiler flags or project settings. + +- **Serious testing**. Our class is heavily [unit-tested](https://github.com/nlohmann/json/tree/develop/test/src) and covers [100%](https://coveralls.io/r/nlohmann/json) of the code, including all exceptional behavior. Furthermore, we checked with [Valgrind](https://valgrind.org) and the [Clang Sanitizers](https://clang.llvm.org/docs/index.html) that there are no memory leaks. [Google OSS-Fuzz](https://github.com/google/oss-fuzz/tree/master/projects/json) additionally runs fuzz tests against all parsers 24/7, effectively executing billions of tests so far. To maintain high quality, the project is following the [Core Infrastructure Initiative (CII) best practices](https://bestpractices.coreinfrastructure.org/projects/289). + +Other aspects were not so important to us: + +- **Memory efficiency**. Each JSON object has an overhead of one pointer (the maximal size of a union) and one enumeration element (1 byte). The default generalization uses the following C++ data types: `std::string` for strings, `int64_t`, `uint64_t` or `double` for numbers, `std::map` for objects, `std::vector` for arrays, and `bool` for Booleans. However, you can template the generalized class `basic_json` to your needs. + +- **Speed**. There are certainly [faster JSON libraries](https://github.com/miloyip/nativejson-benchmark#parsing-time) out there. However, if your goal is to speed up your development by adding JSON support with a single header, then this library is the way to go. If you know how to use a `std::vector` or `std::map`, you are already set. + +See the [contribution guidelines](https://github.com/nlohmann/json/blob/master/.github/CONTRIBUTING.md#please-dont) for more information. + + +## Sponsors + +You can sponsor this library at [GitHub Sponsors](https://github.com/sponsors/nlohmann). + +### :label: Named Sponsors + +- [Michael Hartmann](https://github.com/reFX-Mike) +- [Stefan Hagen](https://github.com/sthagen) +- [Steve Sperandeo](https://github.com/homer6) +- [Robert Jefe Lindstädt](https://github.com/eljefedelrodeodeljefe) +- [Steve Wagner](https://github.com/ciroque) + +Thanks everyone! + + +## Integration + +[`json.hpp`](https://github.com/nlohmann/json/blob/develop/single_include/nlohmann/json.hpp) is the single required file in `single_include/nlohmann` or [released here](https://github.com/nlohmann/json/releases). You need to add + +```cpp +#include + +// for convenience +using json = nlohmann::json; +``` + +to the files you want to process JSON and set the necessary switches to enable C++11 (e.g., `-std=c++11` for GCC and Clang). + +You can further use file [`include/nlohmann/json_fwd.hpp`](https://github.com/nlohmann/json/blob/develop/include/nlohmann/json_fwd.hpp) for forward-declarations. The installation of json_fwd.hpp (as part of cmake's install step), can be achieved by setting `-DJSON_MultipleHeaders=ON`. + +### CMake + +You can also use the `nlohmann_json::nlohmann_json` interface target in CMake. This target populates the appropriate usage requirements for `INTERFACE_INCLUDE_DIRECTORIES` to point to the appropriate include directories and `INTERFACE_COMPILE_FEATURES` for the necessary C++11 flags. + +#### External + +To use this library from a CMake project, you can locate it directly with `find_package()` and use the namespaced imported target from the generated package configuration: + +```cmake +# CMakeLists.txt +find_package(nlohmann_json 3.2.0 REQUIRED) +... +add_library(foo ...) +... +target_link_libraries(foo PRIVATE nlohmann_json::nlohmann_json) +``` + +The package configuration file, `nlohmann_jsonConfig.cmake`, can be used either from an install tree or directly out of the build tree. + +#### Embedded + +To embed the library directly into an existing CMake project, place the entire source tree in a subdirectory and call `add_subdirectory()` in your `CMakeLists.txt` file: + +```cmake +# Typically you don't care so much for a third party library's tests to be +# run from your own project's code. +set(JSON_BuildTests OFF CACHE INTERNAL "") + +# If you only include this third party in PRIVATE source files, you do not +# need to install it when your main project gets installed. +# set(JSON_Install OFF CACHE INTERNAL "") + +# Don't use include(nlohmann_json/CMakeLists.txt) since that carries with it +# unintended consequences that will break the build. It's generally +# discouraged (although not necessarily well documented as such) to use +# include(...) for pulling in other CMake projects anyways. +add_subdirectory(nlohmann_json) +... +add_library(foo ...) +... +target_link_libraries(foo PRIVATE nlohmann_json::nlohmann_json) +``` + +##### Embedded (FetchContent) + +Since CMake v3.11, +[FetchContent](https://cmake.org/cmake/help/v3.11/module/FetchContent.html) can +be used to automatically download the repository as a dependency at configure type. + +Example: +```cmake +include(FetchContent) + +FetchContent_Declare(json + GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG v3.7.3) + +FetchContent_GetProperties(json) +if(NOT json_POPULATED) + FetchContent_Populate(json) + add_subdirectory(${json_SOURCE_DIR} ${json_BINARY_DIR} EXCLUDE_FROM_ALL) +endif() + +target_link_libraries(foo PRIVATE nlohmann_json::nlohmann_json) +``` + +**Note**: The repository https://github.com/nlohmann/json download size is huge. +It contains all the dataset used for the benchmarks. You might want to depend on +a smaller repository. For instance, you might want to replace the URL above by +https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent + +#### Supporting Both + +To allow your project to support either an externally supplied or an embedded JSON library, you can use a pattern akin to the following: + +``` cmake +# Top level CMakeLists.txt +project(FOO) +... +option(FOO_USE_EXTERNAL_JSON "Use an external JSON library" OFF) +... +add_subdirectory(thirdparty) +... +add_library(foo ...) +... +# Note that the namespaced target will always be available regardless of the +# import method +target_link_libraries(foo PRIVATE nlohmann_json::nlohmann_json) +``` +```cmake +# thirdparty/CMakeLists.txt +... +if(FOO_USE_EXTERNAL_JSON) + find_package(nlohmann_json 3.2.0 REQUIRED) +else() + set(JSON_BuildTests OFF CACHE INTERNAL "") + add_subdirectory(nlohmann_json) +endif() +... +``` + +`thirdparty/nlohmann_json` is then a complete copy of this source tree. + +### Package Managers + +:beer: If you are using OS X and [Homebrew](https://brew.sh), just type `brew tap nlohmann/json` and `brew install nlohmann-json` and you're set. If you want the bleeding edge rather than the latest release, use `brew install nlohmann-json --HEAD`. + +If you are using the [Meson Build System](https://mesonbuild.com), add this source tree as a [meson subproject](https://mesonbuild.com/Subprojects.html#using-a-subproject). You may also use the `include.zip` published in this project's [Releases](https://github.com/nlohmann/json/releases) to reduce the size of the vendored source tree. Alternatively, you can get a wrap file by downloading it from [Meson WrapDB](https://wrapdb.mesonbuild.com/nlohmann_json), or simply use `meson wrap install nlohmann_json`. Please see the meson project for any issues regarding the packaging. + +The provided meson.build can also be used as an alternative to cmake for installing `nlohmann_json` system-wide in which case a pkg-config file is installed. To use it, simply have your build system require the `nlohmann_json` pkg-config dependency. In Meson, it is preferred to use the [`dependency()`](https://mesonbuild.com/Reference-manual.html#dependency) object with a subproject fallback, rather than using the subproject directly. + +If you are using [Conan](https://www.conan.io/) to manage your dependencies, merely add `nlohmann_json/x.y.z` to your `conanfile`'s requires, where `x.y.z` is the release version you want to use. Please file issues [here](https://github.com/conan-io/conan-center-index/issues) if you experience problems with the packages. + +If you are using [Spack](https://www.spack.io/) to manage your dependencies, you can use the [`nlohmann-json` package](https://spack.readthedocs.io/en/latest/package_list.html#nlohmann-json). Please see the [spack project](https://github.com/spack/spack) for any issues regarding the packaging. + +If you are using [hunter](https://github.com/cpp-pm/hunter) on your project for external dependencies, then you can use the [nlohmann_json package](https://hunter.readthedocs.io/en/latest/packages/pkg/nlohmann_json.html). Please see the hunter project for any issues regarding the packaging. + +If you are using [Buckaroo](https://buckaroo.pm), you can install this library's module with `buckaroo add github.com/buckaroo-pm/nlohmann-json`. Please file issues [here](https://github.com/buckaroo-pm/nlohmann-json). There is a demo repo [here](https://github.com/njlr/buckaroo-nholmann-json-example). + +If you are using [vcpkg](https://github.com/Microsoft/vcpkg/) on your project for external dependencies, then you can use the [nlohmann-json package](https://github.com/Microsoft/vcpkg/tree/master/ports/nlohmann-json). Please see the vcpkg project for any issues regarding the packaging. + +If you are using [cget](https://cget.readthedocs.io/en/latest/), you can install the latest development version with `cget install nlohmann/json`. A specific version can be installed with `cget install nlohmann/json@v3.1.0`. Also, the multiple header version can be installed by adding the `-DJSON_MultipleHeaders=ON` flag (i.e., `cget install nlohmann/json -DJSON_MultipleHeaders=ON`). + +If you are using [CocoaPods](https://cocoapods.org), you can use the library by adding pod `"nlohmann_json", '~>3.1.2'` to your podfile (see [an example](https://bitbucket.org/benman/nlohmann_json-cocoapod/src/master/)). Please file issues [here](https://bitbucket.org/benman/nlohmann_json-cocoapod/issues?status=new&status=open). + +If you are using [NuGet](https://www.nuget.org), you can use the package [nlohmann.json](https://www.nuget.org/packages/nlohmann.json/). Please check [this extensive description](https://github.com/nlohmann/json/issues/1132#issuecomment-452250255) on how to use the package. Please files issues [here](https://github.com/hnkb/nlohmann-json-nuget/issues). + +If you are using [conda](https://conda.io/), you can use the package [nlohmann_json](https://github.com/conda-forge/nlohmann_json-feedstock) from [conda-forge](https://conda-forge.org) executing `conda install -c conda-forge nlohmann_json`. Please file issues [here](https://github.com/conda-forge/nlohmann_json-feedstock/issues). + +If you are using [MSYS2](https://www.msys2.org/), your can use the [mingw-w64-nlohmann-json](https://packages.msys2.org/base/mingw-w64-nlohmann-json) package, just type `pacman -S mingw-w64-i686-nlohmann-json` or `pacman -S mingw-w64-x86_64-nlohmann-json` for installation. Please file issues [here](https://github.com/msys2/MINGW-packages/issues/new?title=%5Bnlohmann-json%5D) if you experience problems with the packages. + +If you are using [`build2`](https://build2.org), you can use the [`nlohmann-json`](https://cppget.org/nlohmann-json) package from the public repository https://cppget.org or directly from the [package's sources repository](https://github.com/build2-packaging/nlohmann-json). In your project's `manifest` file, just add `depends: nlohmann-json` (probably with some [version constraints](https://build2.org/build2-toolchain/doc/build2-toolchain-intro.xhtml#guide-add-remove-deps)). If you are not familiar with using dependencies in `build2`, [please read this introduction](https://build2.org/build2-toolchain/doc/build2-toolchain-intro.xhtml). +Please file issues [here](https://github.com/build2-packaging/nlohmann-json) if you experience problems with the packages. + +If you are using [`wsjcpp`](https://wsjcpp.org), you can use the command `wsjcpp install "https://github.com/nlohmann/json:develop"` to get the latest version. Note you can change the branch ":develop" to an existing tag or another branch. + +If you are using [`CPM.cmake`](https://github.com/TheLartians/CPM.cmake), you can check this [`example`](https://github.com/TheLartians/CPM.cmake/tree/master/examples/json). After [adding CPM script](https://github.com/TheLartians/CPM.cmake#adding-cpm) to your project, implement the following snippet to your CMake: + +```cmake +CPMAddPackage( + NAME nlohmann_json + GITHUB_REPOSITORY nlohmann/json + VERSION 3.9.1) +``` + +### Pkg-config + +If you are using bare Makefiles, you can use `pkg-config` to generate the include flags that point to where the library is installed: + +```sh +pkg-config nlohmann_json --cflags +``` + +Users of the Meson build system will also be able to use a system wide library, which will be found by `pkg-config`: + +```meson +json = dependency('nlohmann_json', required: true) +``` + +## Examples + +Beside the examples below, you may want to check the [documentation](https://nlohmann.github.io/json/) where each function contains a separate code example (e.g., check out [`emplace()`](https://nlohmann.github.io/json/api/basic_json/emplace/)). All [example files](https://github.com/nlohmann/json/tree/develop/doc/examples) can be compiled and executed on their own (e.g., file [emplace.cpp](https://github.com/nlohmann/json/blob/develop/doc/examples/emplace.cpp)). + +### JSON as first-class data type + +Here are some examples to give you an idea how to use the class. + +Assume you want to create the JSON object + +```json +{ + "pi": 3.141, + "happy": true, + "name": "Niels", + "nothing": null, + "answer": { + "everything": 42 + }, + "list": [1, 0, 2], + "object": { + "currency": "USD", + "value": 42.99 + } +} +``` + +With this library, you could write: + +```cpp +// create an empty structure (null) +json j; + +// add a number that is stored as double (note the implicit conversion of j to an object) +j["pi"] = 3.141; + +// add a Boolean that is stored as bool +j["happy"] = true; + +// add a string that is stored as std::string +j["name"] = "Niels"; + +// add another null object by passing nullptr +j["nothing"] = nullptr; + +// add an object inside the object +j["answer"]["everything"] = 42; + +// add an array that is stored as std::vector (using an initializer list) +j["list"] = { 1, 0, 2 }; + +// add another object (using an initializer list of pairs) +j["object"] = { {"currency", "USD"}, {"value", 42.99} }; + +// instead, you could also write (which looks very similar to the JSON above) +json j2 = { + {"pi", 3.141}, + {"happy", true}, + {"name", "Niels"}, + {"nothing", nullptr}, + {"answer", { + {"everything", 42} + }}, + {"list", {1, 0, 2}}, + {"object", { + {"currency", "USD"}, + {"value", 42.99} + }} +}; +``` + +Note that in all these cases, you never need to "tell" the compiler which JSON value type you want to use. If you want to be explicit or express some edge cases, the functions [`json::array()`](https://nlohmann.github.io/json/api/basic_json/array/) and [`json::object()`](https://nlohmann.github.io/json/api/basic_json/object/) will help: + +```cpp +// a way to express the empty array [] +json empty_array_explicit = json::array(); + +// ways to express the empty object {} +json empty_object_implicit = json({}); +json empty_object_explicit = json::object(); + +// a way to express an _array_ of key/value pairs [["currency", "USD"], ["value", 42.99]] +json array_not_object = json::array({ {"currency", "USD"}, {"value", 42.99} }); +``` + +### Serialization / Deserialization + +#### To/from strings + +You can create a JSON value (deserialization) by appending `_json` to a string literal: + +```cpp +// create object from string literal +json j = "{ \"happy\": true, \"pi\": 3.141 }"_json; + +// or even nicer with a raw string literal +auto j2 = R"( + { + "happy": true, + "pi": 3.141 + } +)"_json; +``` + +Note that without appending the `_json` suffix, the passed string literal is not parsed, but just used as JSON string value. That is, `json j = "{ \"happy\": true, \"pi\": 3.141 }"` would just store the string `"{ "happy": true, "pi": 3.141 }"` rather than parsing the actual object. + +The above example can also be expressed explicitly using [`json::parse()`](https://nlohmann.github.io/json/api/basic_json/parse/): + +```cpp +// parse explicitly +auto j3 = json::parse("{ \"happy\": true, \"pi\": 3.141 }"); +``` + +You can also get a string representation of a JSON value (serialize): + +```cpp +// explicit conversion to string +std::string s = j.dump(); // {"happy":true,"pi":3.141} + +// serialization with pretty printing +// pass in the amount of spaces to indent +std::cout << j.dump(4) << std::endl; +// { +// "happy": true, +// "pi": 3.141 +// } +``` + +Note the difference between serialization and assignment: + +```cpp +// store a string in a JSON value +json j_string = "this is a string"; + +// retrieve the string value +auto cpp_string = j_string.get(); +// retrieve the string value (alternative when an variable already exists) +std::string cpp_string2; +j_string.get_to(cpp_string2); + +// retrieve the serialized value (explicit JSON serialization) +std::string serialized_string = j_string.dump(); + +// output of original string +std::cout << cpp_string << " == " << cpp_string2 << " == " << j_string.get() << '\n'; +// output of serialized value +std::cout << j_string << " == " << serialized_string << std::endl; +``` + +[`.dump()`](https://nlohmann.github.io/json/api/basic_json/dump/) returns the originally stored string value. + +Note the library only supports UTF-8. When you store strings with different encodings in the library, calling [`dump()`](https://nlohmann.github.io/json/api/basic_json/dump/) may throw an exception unless `json::error_handler_t::replace` or `json::error_handler_t::ignore` are used as error handlers. + +#### To/from streams (e.g. files, string streams) + +You can also use streams to serialize and deserialize: + +```cpp +// deserialize from standard input +json j; +std::cin >> j; + +// serialize to standard output +std::cout << j; + +// the setw manipulator was overloaded to set the indentation for pretty printing +std::cout << std::setw(4) << j << std::endl; +``` + +These operators work for any subclasses of `std::istream` or `std::ostream`. Here is the same example with files: + +```cpp +// read a JSON file +std::ifstream i("file.json"); +json j; +i >> j; + +// write prettified JSON to another file +std::ofstream o("pretty.json"); +o << std::setw(4) << j << std::endl; +``` + +Please note that setting the exception bit for `failbit` is inappropriate for this use case. It will result in program termination due to the `noexcept` specifier in use. + +#### Read from iterator range + +You can also parse JSON from an iterator range; that is, from any container accessible by iterators whose `value_type` is an integral type of 1, 2 or 4 bytes, which will be interpreted as UTF-8, UTF-16 and UTF-32 respectively. For instance, a `std::vector`, or a `std::list`: + +```cpp +std::vector v = {'t', 'r', 'u', 'e'}; +json j = json::parse(v.begin(), v.end()); +``` + +You may leave the iterators for the range [begin, end): + +```cpp +std::vector v = {'t', 'r', 'u', 'e'}; +json j = json::parse(v); +``` + +#### Custom data source + +Since the parse function accepts arbitrary iterator ranges, you can provide your own data sources by implementing the `LegacyInputIterator` concept. + +```cpp +struct MyContainer { + void advance(); + const char& get_current(); +}; + +struct MyIterator { + using difference_type = std::ptrdiff_t; + using value_type = char; + using pointer = const char*; + using reference = const char&; + using iterator_category = std::input_iterator_tag; + + MyIterator& operator++() { + MyContainer.advance(); + return *this; + } + + bool operator!=(const MyIterator& rhs) const { + return rhs.target != target; + } + + reference operator*() const { + return target.get_current(); + } + + MyContainer* target = nullptr; +}; + +MyIterator begin(MyContainer& tgt) { + return MyIterator{&tgt}; +} + +MyIterator end(const MyContainer&) { + return {}; +} + +void foo() { + MyContainer c; + json j = json::parse(c); +} +``` + +#### SAX interface + +The library uses a SAX-like interface with the following functions: + +```cpp +// called when null is parsed +bool null(); + +// called when a boolean is parsed; value is passed +bool boolean(bool val); + +// called when a signed or unsigned integer number is parsed; value is passed +bool number_integer(number_integer_t val); +bool number_unsigned(number_unsigned_t val); + +// called when a floating-point number is parsed; value and original string is passed +bool number_float(number_float_t val, const string_t& s); + +// called when a string is parsed; value is passed and can be safely moved away +bool string(string_t& val); +// called when a binary value is parsed; value is passed and can be safely moved away +bool binary(binary_t& val); + +// called when an object or array begins or ends, resp. The number of elements is passed (or -1 if not known) +bool start_object(std::size_t elements); +bool end_object(); +bool start_array(std::size_t elements); +bool end_array(); +// called when an object key is parsed; value is passed and can be safely moved away +bool key(string_t& val); + +// called when a parse error occurs; byte position, the last token, and an exception is passed +bool parse_error(std::size_t position, const std::string& last_token, const detail::exception& ex); +``` + +The return value of each function determines whether parsing should proceed. + +To implement your own SAX handler, proceed as follows: + +1. Implement the SAX interface in a class. You can use class `nlohmann::json_sax` as base class, but you can also use any class where the functions described above are implemented and public. +2. Create an object of your SAX interface class, e.g. `my_sax`. +3. Call `bool json::sax_parse(input, &my_sax)`; where the first parameter can be any input like a string or an input stream and the second parameter is a pointer to your SAX interface. + +Note the `sax_parse` function only returns a `bool` indicating the result of the last executed SAX event. It does not return a `json` value - it is up to you to decide what to do with the SAX events. Furthermore, no exceptions are thrown in case of a parse error - it is up to you what to do with the exception object passed to your `parse_error` implementation. Internally, the SAX interface is used for the DOM parser (class `json_sax_dom_parser`) as well as the acceptor (`json_sax_acceptor`), see file [`json_sax.hpp`](https://github.com/nlohmann/json/blob/develop/include/nlohmann/detail/input/json_sax.hpp). + +### STL-like access + +We designed the JSON class to behave just like an STL container. In fact, it satisfies the [**ReversibleContainer**](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) requirement. + +```cpp +// create an array using push_back +json j; +j.push_back("foo"); +j.push_back(1); +j.push_back(true); + +// also use emplace_back +j.emplace_back(1.78); + +// iterate the array +for (json::iterator it = j.begin(); it != j.end(); ++it) { + std::cout << *it << '\n'; +} + +// range-based for +for (auto& element : j) { + std::cout << element << '\n'; +} + +// getter/setter +const auto tmp = j[0].get(); +j[1] = 42; +bool foo = j.at(2); + +// comparison +j == "[\"foo\", 42, true, 1.78]"_json; // true + +// other stuff +j.size(); // 3 entries +j.empty(); // false +j.type(); // json::value_t::array +j.clear(); // the array is empty again + +// convenience type checkers +j.is_null(); +j.is_boolean(); +j.is_number(); +j.is_object(); +j.is_array(); +j.is_string(); + +// create an object +json o; +o["foo"] = 23; +o["bar"] = false; +o["baz"] = 3.141; + +// also use emplace +o.emplace("weather", "sunny"); + +// special iterator member functions for objects +for (json::iterator it = o.begin(); it != o.end(); ++it) { + std::cout << it.key() << " : " << it.value() << "\n"; +} + +// the same code as range for +for (auto& el : o.items()) { + std::cout << el.key() << " : " << el.value() << "\n"; +} + +// even easier with structured bindings (C++17) +for (auto& [key, value] : o.items()) { + std::cout << key << " : " << value << "\n"; +} + +// find an entry +if (o.contains("foo")) { + // there is an entry with key "foo" +} + +// or via find and an iterator +if (o.find("foo") != o.end()) { + // there is an entry with key "foo" +} + +// or simpler using count() +int foo_present = o.count("foo"); // 1 +int fob_present = o.count("fob"); // 0 + +// delete an entry +o.erase("foo"); +``` + + +### Conversion from STL containers + +Any sequence container (`std::array`, `std::vector`, `std::deque`, `std::forward_list`, `std::list`) whose values can be used to construct JSON values (e.g., integers, floating point numbers, Booleans, string types, or again STL containers described in this section) can be used to create a JSON array. The same holds for similar associative containers (`std::set`, `std::multiset`, `std::unordered_set`, `std::unordered_multiset`), but in these cases the order of the elements of the array depends on how the elements are ordered in the respective STL container. + +```cpp +std::vector c_vector {1, 2, 3, 4}; +json j_vec(c_vector); +// [1, 2, 3, 4] + +std::deque c_deque {1.2, 2.3, 3.4, 5.6}; +json j_deque(c_deque); +// [1.2, 2.3, 3.4, 5.6] + +std::list c_list {true, true, false, true}; +json j_list(c_list); +// [true, true, false, true] + +std::forward_list c_flist {12345678909876, 23456789098765, 34567890987654, 45678909876543}; +json j_flist(c_flist); +// [12345678909876, 23456789098765, 34567890987654, 45678909876543] + +std::array c_array {{1, 2, 3, 4}}; +json j_array(c_array); +// [1, 2, 3, 4] + +std::set c_set {"one", "two", "three", "four", "one"}; +json j_set(c_set); // only one entry for "one" is used +// ["four", "one", "three", "two"] + +std::unordered_set c_uset {"one", "two", "three", "four", "one"}; +json j_uset(c_uset); // only one entry for "one" is used +// maybe ["two", "three", "four", "one"] + +std::multiset c_mset {"one", "two", "one", "four"}; +json j_mset(c_mset); // both entries for "one" are used +// maybe ["one", "two", "one", "four"] + +std::unordered_multiset c_umset {"one", "two", "one", "four"}; +json j_umset(c_umset); // both entries for "one" are used +// maybe ["one", "two", "one", "four"] +``` + +Likewise, any associative key-value containers (`std::map`, `std::multimap`, `std::unordered_map`, `std::unordered_multimap`) whose keys can construct an `std::string` and whose values can be used to construct JSON values (see examples above) can be used to create a JSON object. Note that in case of multimaps only one key is used in the JSON object and the value depends on the internal order of the STL container. + +```cpp +std::map c_map { {"one", 1}, {"two", 2}, {"three", 3} }; +json j_map(c_map); +// {"one": 1, "three": 3, "two": 2 } + +std::unordered_map c_umap { {"one", 1.2}, {"two", 2.3}, {"three", 3.4} }; +json j_umap(c_umap); +// {"one": 1.2, "two": 2.3, "three": 3.4} + +std::multimap c_mmap { {"one", true}, {"two", true}, {"three", false}, {"three", true} }; +json j_mmap(c_mmap); // only one entry for key "three" is used +// maybe {"one": true, "two": true, "three": true} + +std::unordered_multimap c_ummap { {"one", true}, {"two", true}, {"three", false}, {"three", true} }; +json j_ummap(c_ummap); // only one entry for key "three" is used +// maybe {"one": true, "two": true, "three": true} +``` + +### JSON Pointer and JSON Patch + +The library supports **JSON Pointer** ([RFC 6901](https://tools.ietf.org/html/rfc6901)) as alternative means to address structured values. On top of this, **JSON Patch** ([RFC 6902](https://tools.ietf.org/html/rfc6902)) allows to describe differences between two JSON values - effectively allowing patch and diff operations known from Unix. + +```cpp +// a JSON value +json j_original = R"({ + "baz": ["one", "two", "three"], + "foo": "bar" +})"_json; + +// access members with a JSON pointer (RFC 6901) +j_original["/baz/1"_json_pointer]; +// "two" + +// a JSON patch (RFC 6902) +json j_patch = R"([ + { "op": "replace", "path": "/baz", "value": "boo" }, + { "op": "add", "path": "/hello", "value": ["world"] }, + { "op": "remove", "path": "/foo"} +])"_json; + +// apply the patch +json j_result = j_original.patch(j_patch); +// { +// "baz": "boo", +// "hello": ["world"] +// } + +// calculate a JSON patch from two JSON values +json::diff(j_result, j_original); +// [ +// { "op":" replace", "path": "/baz", "value": ["one", "two", "three"] }, +// { "op": "remove","path": "/hello" }, +// { "op": "add", "path": "/foo", "value": "bar" } +// ] +``` + +### JSON Merge Patch + +The library supports **JSON Merge Patch** ([RFC 7386](https://tools.ietf.org/html/rfc7386)) as a patch format. Instead of using JSON Pointer (see above) to specify values to be manipulated, it describes the changes using a syntax that closely mimics the document being modified. + +```cpp +// a JSON value +json j_document = R"({ + "a": "b", + "c": { + "d": "e", + "f": "g" + } +})"_json; + +// a patch +json j_patch = R"({ + "a":"z", + "c": { + "f": null + } +})"_json; + +// apply the patch +j_document.merge_patch(j_patch); +// { +// "a": "z", +// "c": { +// "d": "e" +// } +// } +``` + +### Implicit conversions + +Supported types can be implicitly converted to JSON values. + +It is recommended to **NOT USE** implicit conversions **FROM** a JSON value. +You can find more details about this recommendation [here](https://www.github.com/nlohmann/json/issues/958). +You can switch off implicit conversions by defining `JSON_USE_IMPLICIT_CONVERSIONS` to `0` before including the `json.hpp` header. When using CMake, you can also achieve this by setting the option `JSON_ImplicitConversions` to `OFF`. + +```cpp +// strings +std::string s1 = "Hello, world!"; +json js = s1; +auto s2 = js.get(); +// NOT RECOMMENDED +std::string s3 = js; +std::string s4; +s4 = js; + +// Booleans +bool b1 = true; +json jb = b1; +auto b2 = jb.get(); +// NOT RECOMMENDED +bool b3 = jb; +bool b4; +b4 = jb; + +// numbers +int i = 42; +json jn = i; +auto f = jn.get(); +// NOT RECOMMENDED +double f2 = jb; +double f3; +f3 = jb; + +// etc. +``` + +Note that `char` types are not automatically converted to JSON strings, but to integer numbers. A conversion to a string must be specified explicitly: + +```cpp +char ch = 'A'; // ASCII value 65 +json j_default = ch; // stores integer number 65 +json j_string = std::string(1, ch); // stores string "A" +``` + +### Arbitrary types conversions + +Every type can be serialized in JSON, not just STL containers and scalar types. Usually, you would do something along those lines: + +```cpp +namespace ns { + // a simple struct to model a person + struct person { + std::string name; + std::string address; + int age; + }; +} + +ns::person p = {"Ned Flanders", "744 Evergreen Terrace", 60}; + +// convert to JSON: copy each value into the JSON object +json j; +j["name"] = p.name; +j["address"] = p.address; +j["age"] = p.age; + +// ... + +// convert from JSON: copy each value from the JSON object +ns::person p { + j["name"].get(), + j["address"].get(), + j["age"].get() +}; +``` + +It works, but that's quite a lot of boilerplate... Fortunately, there's a better way: + +```cpp +// create a person +ns::person p {"Ned Flanders", "744 Evergreen Terrace", 60}; + +// conversion: person -> json +json j = p; + +std::cout << j << std::endl; +// {"address":"744 Evergreen Terrace","age":60,"name":"Ned Flanders"} + +// conversion: json -> person +auto p2 = j.get(); + +// that's it +assert(p == p2); +``` + +#### Basic usage + +To make this work with one of your types, you only need to provide two functions: + +```cpp +using nlohmann::json; + +namespace ns { + void to_json(json& j, const person& p) { + j = json{{"name", p.name}, {"address", p.address}, {"age", p.age}}; + } + + void from_json(const json& j, person& p) { + j.at("name").get_to(p.name); + j.at("address").get_to(p.address); + j.at("age").get_to(p.age); + } +} // namespace ns +``` + +That's all! When calling the `json` constructor with your type, your custom `to_json` method will be automatically called. +Likewise, when calling `get()` or `get_to(your_type&)`, the `from_json` method will be called. + +Some important things: + +* Those methods **MUST** be in your type's namespace (which can be the global namespace), or the library will not be able to locate them (in this example, they are in namespace `ns`, where `person` is defined). +* Those methods **MUST** be available (e.g., proper headers must be included) everywhere you use these conversions. Look at [issue 1108](https://github.com/nlohmann/json/issues/1108) for errors that may occur otherwise. +* When using `get()`, `your_type` **MUST** be [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible). (There is a way to bypass this requirement described later.) +* In function `from_json`, use function [`at()`](https://nlohmann.github.io/json/api/basic_json/at/) to access the object values rather than `operator[]`. In case a key does not exist, `at` throws an exception that you can handle, whereas `operator[]` exhibits undefined behavior. +* You do not need to add serializers or deserializers for STL types like `std::vector`: the library already implements these. + +#### Simplify your life with macros + +If you just want to serialize/deserialize some structs, the `to_json`/`from_json` functions can be a lot of boilerplate. + +There are two macros to make your life easier as long as you (1) want to use a JSON object as serialization and (2) want to use the member variable names as object keys in that object: + +- `NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(name, member1, member2, ...)` is to be defined inside of the namespace of the class/struct to create code for. +- `NLOHMANN_DEFINE_TYPE_INTRUSIVE(name, member1, member2, ...)` is to be defined inside of the class/struct to create code for. This macro can also access private members. + +In both macros, the first parameter is the name of the class/struct, and all remaining parameters name the members. + +##### Examples + +The `to_json`/`from_json` functions for the `person` struct above can be created with: + +```cpp +namespace ns { + NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(person, name, address, age) +} +``` + +Here is an example with private members, where `NLOHMANN_DEFINE_TYPE_INTRUSIVE` is needed: + +```cpp +namespace ns { + class address { + private: + std::string street; + int housenumber; + int postcode; + + public: + NLOHMANN_DEFINE_TYPE_INTRUSIVE(address, street, housenumber, postcode) + }; +} +``` + +#### How do I convert third-party types? + +This requires a bit more advanced technique. But first, let's see how this conversion mechanism works: + +The library uses **JSON Serializers** to convert types to json. +The default serializer for `nlohmann::json` is `nlohmann::adl_serializer` (ADL means [Argument-Dependent Lookup](https://en.cppreference.com/w/cpp/language/adl)). + +It is implemented like this (simplified): + +```cpp +template +struct adl_serializer { + static void to_json(json& j, const T& value) { + // calls the "to_json" method in T's namespace + } + + static void from_json(const json& j, T& value) { + // same thing, but with the "from_json" method + } +}; +``` + +This serializer works fine when you have control over the type's namespace. However, what about `boost::optional` or `std::filesystem::path` (C++17)? Hijacking the `boost` namespace is pretty bad, and it's illegal to add something other than template specializations to `std`... + +To solve this, you need to add a specialization of `adl_serializer` to the `nlohmann` namespace, here's an example: + +```cpp +// partial specialization (full specialization works too) +namespace nlohmann { + template + struct adl_serializer> { + static void to_json(json& j, const boost::optional& opt) { + if (opt == boost::none) { + j = nullptr; + } else { + j = *opt; // this will call adl_serializer::to_json which will + // find the free function to_json in T's namespace! + } + } + + static void from_json(const json& j, boost::optional& opt) { + if (j.is_null()) { + opt = boost::none; + } else { + opt = j.get(); // same as above, but with + // adl_serializer::from_json + } + } + }; +} +``` + +#### How can I use `get()` for non-default constructible/non-copyable types? + +There is a way, if your type is [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible). You will need to specialize the `adl_serializer` as well, but with a special `from_json` overload: + +```cpp +struct move_only_type { + move_only_type() = delete; + move_only_type(int ii): i(ii) {} + move_only_type(const move_only_type&) = delete; + move_only_type(move_only_type&&) = default; + + int i; +}; + +namespace nlohmann { + template <> + struct adl_serializer { + // note: the return type is no longer 'void', and the method only takes + // one argument + static move_only_type from_json(const json& j) { + return {j.get()}; + } + + // Here's the catch! You must provide a to_json method! Otherwise you + // will not be able to convert move_only_type to json, since you fully + // specialized adl_serializer on that type + static void to_json(json& j, move_only_type t) { + j = t.i; + } + }; +} +``` + +#### Can I write my own serializer? (Advanced use) + +Yes. You might want to take a look at [`unit-udt.cpp`](https://github.com/nlohmann/json/blob/develop/test/src/unit-udt.cpp) in the test suite, to see a few examples. + +If you write your own serializer, you'll need to do a few things: + +- use a different `basic_json` alias than `nlohmann::json` (the last template parameter of `basic_json` is the `JSONSerializer`) +- use your `basic_json` alias (or a template parameter) in all your `to_json`/`from_json` methods +- use `nlohmann::to_json` and `nlohmann::from_json` when you need ADL + +Here is an example, without simplifications, that only accepts types with a size <= 32, and uses ADL. + +```cpp +// You should use void as a second template argument +// if you don't need compile-time checks on T +template::type> +struct less_than_32_serializer { + template + static void to_json(BasicJsonType& j, T value) { + // we want to use ADL, and call the correct to_json overload + using nlohmann::to_json; // this method is called by adl_serializer, + // this is where the magic happens + to_json(j, value); + } + + template + static void from_json(const BasicJsonType& j, T& value) { + // same thing here + using nlohmann::from_json; + from_json(j, value); + } +}; +``` + +Be **very** careful when reimplementing your serializer, you can stack overflow if you don't pay attention: + +```cpp +template +struct bad_serializer +{ + template + static void to_json(BasicJsonType& j, const T& value) { + // this calls BasicJsonType::json_serializer::to_json(j, value); + // if BasicJsonType::json_serializer == bad_serializer ... oops! + j = value; + } + + template + static void to_json(const BasicJsonType& j, T& value) { + // this calls BasicJsonType::json_serializer::from_json(j, value); + // if BasicJsonType::json_serializer == bad_serializer ... oops! + value = j.template get(); // oops! + } +}; +``` + +### Specializing enum conversion + +By default, enum values are serialized to JSON as integers. In some cases this could result in undesired behavior. If an enum is modified or re-ordered after data has been serialized to JSON, the later de-serialized JSON data may be undefined or a different enum value than was originally intended. + +It is possible to more precisely specify how a given enum is mapped to and from JSON as shown below: + +```cpp +// example enum type declaration +enum TaskState { + TS_STOPPED, + TS_RUNNING, + TS_COMPLETED, + TS_INVALID=-1, +}; + +// map TaskState values to JSON as strings +NLOHMANN_JSON_SERIALIZE_ENUM( TaskState, { + {TS_INVALID, nullptr}, + {TS_STOPPED, "stopped"}, + {TS_RUNNING, "running"}, + {TS_COMPLETED, "completed"}, +}) +``` + +The `NLOHMANN_JSON_SERIALIZE_ENUM()` macro declares a set of `to_json()` / `from_json()` functions for type `TaskState` while avoiding repetition and boilerplate serialization code. + +**Usage:** + +```cpp +// enum to JSON as string +json j = TS_STOPPED; +assert(j == "stopped"); + +// json string to enum +json j3 = "running"; +assert(j3.get() == TS_RUNNING); + +// undefined json value to enum (where the first map entry above is the default) +json jPi = 3.14; +assert(jPi.get() == TS_INVALID ); +``` + +Just as in [Arbitrary Type Conversions](#arbitrary-types-conversions) above, +- `NLOHMANN_JSON_SERIALIZE_ENUM()` MUST be declared in your enum type's namespace (which can be the global namespace), or the library will not be able to locate it and it will default to integer serialization. +- It MUST be available (e.g., proper headers must be included) everywhere you use the conversions. + +Other Important points: +- When using `get()`, undefined JSON values will default to the first pair specified in your map. Select this default pair carefully. +- If an enum or JSON value is specified more than once in your map, the first matching occurrence from the top of the map will be returned when converting to or from JSON. + +### Binary formats (BSON, CBOR, MessagePack, and UBJSON) + +Though JSON is a ubiquitous data format, it is not a very compact format suitable for data exchange, for instance over a network. Hence, the library supports [BSON](http://bsonspec.org) (Binary JSON), [CBOR](https://cbor.io) (Concise Binary Object Representation), [MessagePack](https://msgpack.org), and [UBJSON](http://ubjson.org) (Universal Binary JSON Specification) to efficiently encode JSON values to byte vectors and to decode such vectors. + +```cpp +// create a JSON value +json j = R"({"compact": true, "schema": 0})"_json; + +// serialize to BSON +std::vector v_bson = json::to_bson(j); + +// 0x1B, 0x00, 0x00, 0x00, 0x08, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x00, 0x01, 0x10, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + +// roundtrip +json j_from_bson = json::from_bson(v_bson); + +// serialize to CBOR +std::vector v_cbor = json::to_cbor(j); + +// 0xA2, 0x67, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0xF5, 0x66, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x00 + +// roundtrip +json j_from_cbor = json::from_cbor(v_cbor); + +// serialize to MessagePack +std::vector v_msgpack = json::to_msgpack(j); + +// 0x82, 0xA7, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0xC3, 0xA6, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x00 + +// roundtrip +json j_from_msgpack = json::from_msgpack(v_msgpack); + +// serialize to UBJSON +std::vector v_ubjson = json::to_ubjson(j); + +// 0x7B, 0x69, 0x07, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x54, 0x69, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x69, 0x00, 0x7D + +// roundtrip +json j_from_ubjson = json::from_ubjson(v_ubjson); +``` + +The library also supports binary types from BSON, CBOR (byte strings), and MessagePack (bin, ext, fixext). They are stored by default as `std::vector` to be processed outside of the library. + +```cpp +// CBOR byte string with payload 0xCAFE +std::vector v = {0x42, 0xCA, 0xFE}; + +// read value +json j = json::from_cbor(v); + +// the JSON value has type binary +j.is_binary(); // true + +// get reference to stored binary value +auto& binary = j.get_binary(); + +// the binary value has no subtype (CBOR has no binary subtypes) +binary.has_subtype(); // false + +// access std::vector member functions +binary.size(); // 2 +binary[0]; // 0xCA +binary[1]; // 0xFE + +// set subtype to 0x10 +binary.set_subtype(0x10); + +// serialize to MessagePack +auto cbor = json::to_msgpack(j); // 0xD5 (fixext2), 0x10, 0xCA, 0xFE +``` + + +## Supported compilers + +Though it's 2021 already, the support for C++11 is still a bit sparse. Currently, the following compilers are known to work: + +- GCC 4.8 - 11.0 (and possibly later) +- Clang 3.4 - 11.0 (and possibly later) +- Apple Clang 9.1 - 12.3 (and possibly later) +- Intel C++ Compiler 17.0.2 (and possibly later) +- Microsoft Visual C++ 2015 / Build Tools 14.0.25123.0 (and possibly later) +- Microsoft Visual C++ 2017 / Build Tools 15.5.180.51428 (and possibly later) +- Microsoft Visual C++ 2019 / Build Tools 16.3.1+1def00d3d (and possibly later) + +I would be happy to learn about other compilers/versions. + +Please note: + +- GCC 4.8 has a bug [57824](https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57824)): multiline raw strings cannot be the arguments to macros. Don't use multiline raw strings directly in macros with this compiler. +- Android defaults to using very old compilers and C++ libraries. To fix this, add the following to your `Application.mk`. This will switch to the LLVM C++ library, the Clang compiler, and enable C++11 and other features disabled by default. + + ``` + APP_STL := c++_shared + NDK_TOOLCHAIN_VERSION := clang3.6 + APP_CPPFLAGS += -frtti -fexceptions + ``` + + The code compiles successfully with [Android NDK](https://developer.android.com/ndk/index.html?hl=ml), Revision 9 - 11 (and possibly later) and [CrystaX's Android NDK](https://www.crystax.net/en/android/ndk) version 10. + +- For GCC running on MinGW or Android SDK, the error `'to_string' is not a member of 'std'` (or similarly, for `strtod` or `strtof`) may occur. Note this is not an issue with the code, but rather with the compiler itself. On Android, see above to build with a newer environment. For MinGW, please refer to [this site](https://tehsausage.com/mingw-to-string) and [this discussion](https://github.com/nlohmann/json/issues/136) for information on how to fix this bug. For Android NDK using `APP_STL := gnustl_static`, please refer to [this discussion](https://github.com/nlohmann/json/issues/219). + +- Unsupported versions of GCC and Clang are rejected by `#error` directives. This can be switched off by defining `JSON_SKIP_UNSUPPORTED_COMPILER_CHECK`. Note that you can expect no support in this case. + +The following compilers are currently used in continuous integration at [Travis](https://travis-ci.org/nlohmann/json), [AppVeyor](https://ci.appveyor.com/project/nlohmann/json), [GitHub Actions](https://github.com/nlohmann/json/actions), and [CircleCI](https://circleci.com/gh/nlohmann/json): + +| Compiler | Operating System | CI Provider | +|-------------------------------------------------------------------|--------------------|----------------| +| Apple Clang 10.0.1 (clang-1001.0.46.4); Xcode 10.2.1 | macOS 10.14.4 | Travis | +| Apple Clang 11.0.0 (clang-1100.0.33.12); Xcode 11.2.1 | macOS 10.14.6 | Travis | +| Apple Clang 11.0.3 (clang-1103.0.32.59); Xcode 11.4.1 | macOS 10.15.4 | GitHub Actions | +| Apple Clang 12.0.0 (clang-1200.0.22.7); Xcode 11.4.1 | macOS 10.15.5 | Travis | +| Clang 3.5.0 (3.5.0-4ubuntu2\~trusty2) | Ubuntu 14.04.5 LTS | Travis | +| Clang 3.6.2 (3.6.2-svn240577-1\~exp1) | Ubuntu 14.04.5 LTS | Travis | +| Clang 3.7.1 (3.7.1-svn253571-1\~exp1) | Ubuntu 14.04.5 LTS | Travis | +| Clang 3.8.0 (3.8.0-2ubuntu3\~trusty5) | Ubuntu 14.04.5 LTS | Travis | +| Clang 3.9.1 (3.9.1-4ubuntu3\~14.04.3) | Ubuntu 14.04.5 LTS | Travis | +| Clang 4.0.1 (4.0.1-svn305264-1\~exp1) | Ubuntu 14.04.5 LTS | Travis | +| Clang 5.0.2 (version 5.0.2-svn328729-1\~exp1\~20180509123505.100) | Ubuntu 14.04.5 LTS | Travis | +| Clang 6.0.1 (6.0.1-svn334776-1\~exp1\~20190309042707.121) | Ubuntu 14.04.5 LTS | Travis | +| Clang 7.1.0 (7.1.0-svn353565-1\~exp1\~20190419134007.64) | Ubuntu 14.04.5 LTS | Travis | +| Clang 7.5.0 (Ubuntu 7.5.0-3ubuntu1\~18.04) | Ubuntu 18.04.4 LTS | Travis | +| Clang 9.0.0 (x86_64-pc-windows-msvc) | Windows-10.0.17763 | GitHub Actions | +| Clang 10.0.0 (x86_64-pc-windows-msvc) | Windows-10.0.17763 | GitHub Actions | +| GCC 4.8.5 (Ubuntu 4.8.5-4ubuntu8\~14.04.2) | Ubuntu 14.04.5 LTS | Travis | +| GCC 4.9.4 (Ubuntu 4.9.4-2ubuntu1\~14.04.1) | Ubuntu 14.04.5 LTS | Travis | +| GCC 5.5.0 (Ubuntu 5.5.0-12ubuntu1\~14.04) | Ubuntu 14.04.5 LTS | Travis | +| GCC 6.3.0 (Debian 6.3.0-18+deb9u1) | Debian 9 | Circle CI | +| GCC 6.5.0 (Ubuntu 6.5.0-2ubuntu1\~14.04.1) | Ubuntu 14.04.5 LTS | Travis | +| GCC 7.3.0 (x86_64-posix-seh-rev0, Built by MinGW-W64 project) | Windows-6.3.9600 | AppVeyor | +| GCC 7.5.0 (Ubuntu 7.5.0-3ubuntu1\~14.04.1) | Ubuntu 14.04.5 LTS | Travis | +| GCC 7.5.0 (Ubuntu 7.5.0-3ubuntu1\~18.04) | Ubuntu 18.04.4 LTS | GitHub Actions | +| GCC 8.4.0 (Ubuntu 8.4.0-1ubuntu1\~14.04) | Ubuntu 14.04.5 LTS | Travis | +| GCC 9.3.0 (Ubuntu 9.3.0-11ubuntu0\~14.04) | Ubuntu 14.04.5 LTS | Travis | +| GCC 10.1.0 (Arch Linux latest) | Arch Linux | Circle CI | +| MSVC 19.0.24241.7 (Build Engine version 14.0.25420.1) | Windows-6.3.9600 | AppVeyor | +| MSVC 19.16.27035.0 (15.9.21+g9802d43bc3 for .NET Framework) | Windows-10.0.14393 | AppVeyor | +| MSVC 19.25.28614.0 (Build Engine version 16.5.0+d4cbfca49 for .NET Framework) | Windows-10.0.17763 | AppVeyor | +| MSVC 19.25.28614.0 (Build Engine version 16.5.0+d4cbfca49 for .NET Framework) | Windows-10.0.17763 | GitHub Actions | +| MSVC 19.25.28614.0 (Build Engine version 16.5.0+d4cbfca49 for .NET Framework) with ClangCL 10.0.0 | Windows-10.0.17763 | GitHub Actions | + +## License + + + +The class is licensed under the [MIT License](http://opensource.org/licenses/MIT): + +Copyright © 2013-2021 [Niels Lohmann](https://nlohmann.me) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +* * * + +The class contains the UTF-8 Decoder from Bjoern Hoehrmann which is licensed under the [MIT License](http://opensource.org/licenses/MIT) (see above). Copyright © 2008-2009 [Björn Hoehrmann](https://bjoern.hoehrmann.de/) + +The class contains a slightly modified version of the Grisu2 algorithm from Florian Loitsch which is licensed under the [MIT License](http://opensource.org/licenses/MIT) (see above). Copyright © 2009 [Florian Loitsch](https://florian.loitsch.com/) + +The class contains a copy of [Hedley](https://nemequ.github.io/hedley/) from Evan Nemerson which is licensed as [CC0-1.0](https://creativecommons.org/publicdomain/zero/1.0/). + +## Contact + +If you have questions regarding the library, I would like to invite you to [open an issue at GitHub](https://github.com/nlohmann/json/issues/new/choose). Please describe your request, problem, or question as detailed as possible, and also mention the version of the library you are using as well as the version of your compiler and operating system. Opening an issue at GitHub allows other users and contributors to this library to collaborate. For instance, I have little experience with MSVC, and most issues in this regard have been solved by a growing community. If you have a look at the [closed issues](https://github.com/nlohmann/json/issues?q=is%3Aissue+is%3Aclosed), you will see that we react quite timely in most cases. + +Only if your request would contain confidential information, please [send me an email](mailto:mail@nlohmann.me). For encrypted messages, please use [this key](https://keybase.io/nlohmann/pgp_keys.asc). + +## Security + +[Commits by Niels Lohmann](https://github.com/nlohmann/json/commits) and [releases](https://github.com/nlohmann/json/releases) are signed with this [PGP Key](https://keybase.io/nlohmann/pgp_keys.asc?fingerprint=797167ae41c0a6d9232e48457f3cea63ae251b69). + +## Thanks + +I deeply appreciate the help of the following people. + + + +- [Teemperor](https://github.com/Teemperor) implemented CMake support and lcov integration, realized escape and Unicode handling in the string parser, and fixed the JSON serialization. +- [elliotgoodrich](https://github.com/elliotgoodrich) fixed an issue with double deletion in the iterator classes. +- [kirkshoop](https://github.com/kirkshoop) made the iterators of the class composable to other libraries. +- [wancw](https://github.com/wanwc) fixed a bug that hindered the class to compile with Clang. +- Tomas Åblad found a bug in the iterator implementation. +- [Joshua C. Randall](https://github.com/jrandall) fixed a bug in the floating-point serialization. +- [Aaron Burghardt](https://github.com/aburgh) implemented code to parse streams incrementally. Furthermore, he greatly improved the parser class by allowing the definition of a filter function to discard undesired elements while parsing. +- [Daniel Kopeček](https://github.com/dkopecek) fixed a bug in the compilation with GCC 5.0. +- [Florian Weber](https://github.com/Florianjw) fixed a bug in and improved the performance of the comparison operators. +- [Eric Cornelius](https://github.com/EricMCornelius) pointed out a bug in the handling with NaN and infinity values. He also improved the performance of the string escaping. +- [易思龙](https://github.com/likebeta) implemented a conversion from anonymous enums. +- [kepkin](https://github.com/kepkin) patiently pushed forward the support for Microsoft Visual studio. +- [gregmarr](https://github.com/gregmarr) simplified the implementation of reverse iterators and helped with numerous hints and improvements. In particular, he pushed forward the implementation of user-defined types. +- [Caio Luppi](https://github.com/caiovlp) fixed a bug in the Unicode handling. +- [dariomt](https://github.com/dariomt) fixed some typos in the examples. +- [Daniel Frey](https://github.com/d-frey) cleaned up some pointers and implemented exception-safe memory allocation. +- [Colin Hirsch](https://github.com/ColinH) took care of a small namespace issue. +- [Huu Nguyen](https://github.com/whoshuu) correct a variable name in the documentation. +- [Silverweed](https://github.com/silverweed) overloaded `parse()` to accept an rvalue reference. +- [dariomt](https://github.com/dariomt) fixed a subtlety in MSVC type support and implemented the `get_ref()` function to get a reference to stored values. +- [ZahlGraf](https://github.com/ZahlGraf) added a workaround that allows compilation using Android NDK. +- [whackashoe](https://github.com/whackashoe) replaced a function that was marked as unsafe by Visual Studio. +- [406345](https://github.com/406345) fixed two small warnings. +- [Glen Fernandes](https://github.com/glenfe) noted a potential portability problem in the `has_mapped_type` function. +- [Corbin Hughes](https://github.com/nibroc) fixed some typos in the contribution guidelines. +- [twelsby](https://github.com/twelsby) fixed the array subscript operator, an issue that failed the MSVC build, and floating-point parsing/dumping. He further added support for unsigned integer numbers and implemented better roundtrip support for parsed numbers. +- [Volker Diels-Grabsch](https://github.com/vog) fixed a link in the README file. +- [msm-](https://github.com/msm-) added support for American Fuzzy Lop. +- [Annihil](https://github.com/Annihil) fixed an example in the README file. +- [Themercee](https://github.com/Themercee) noted a wrong URL in the README file. +- [Lv Zheng](https://github.com/lv-zheng) fixed a namespace issue with `int64_t` and `uint64_t`. +- [abc100m](https://github.com/abc100m) analyzed the issues with GCC 4.8 and proposed a [partial solution](https://github.com/nlohmann/json/pull/212). +- [zewt](https://github.com/zewt) added useful notes to the README file about Android. +- [Róbert Márki](https://github.com/robertmrk) added a fix to use move iterators and improved the integration via CMake. +- [Chris Kitching](https://github.com/ChrisKitching) cleaned up the CMake files. +- [Tom Needham](https://github.com/06needhamt) fixed a subtle bug with MSVC 2015 which was also proposed by [Michael K.](https://github.com/Epidal). +- [Mário Feroldi](https://github.com/thelostt) fixed a small typo. +- [duncanwerner](https://github.com/duncanwerner) found a really embarrassing performance regression in the 2.0.0 release. +- [Damien](https://github.com/dtoma) fixed one of the last conversion warnings. +- [Thomas Braun](https://github.com/t-b) fixed a warning in a test case and adjusted MSVC calls in the CI. +- [Théo DELRIEU](https://github.com/theodelrieu) patiently and constructively oversaw the long way toward [iterator-range parsing](https://github.com/nlohmann/json/issues/290). He also implemented the magic behind the serialization/deserialization of user-defined types and split the single header file into smaller chunks. +- [Stefan](https://github.com/5tefan) fixed a minor issue in the documentation. +- [Vasil Dimov](https://github.com/vasild) fixed the documentation regarding conversions from `std::multiset`. +- [ChristophJud](https://github.com/ChristophJud) overworked the CMake files to ease project inclusion. +- [Vladimir Petrigo](https://github.com/vpetrigo) made a SFINAE hack more readable and added Visual Studio 17 to the build matrix. +- [Denis Andrejew](https://github.com/seeekr) fixed a grammar issue in the README file. +- [Pierre-Antoine Lacaze](https://github.com/palacaze) found a subtle bug in the `dump()` function. +- [TurpentineDistillery](https://github.com/TurpentineDistillery) pointed to [`std::locale::classic()`](https://en.cppreference.com/w/cpp/locale/locale/classic) to avoid too much locale joggling, found some nice performance improvements in the parser, improved the benchmarking code, and realized locale-independent number parsing and printing. +- [cgzones](https://github.com/cgzones) had an idea how to fix the Coverity scan. +- [Jared Grubb](https://github.com/jaredgrubb) silenced a nasty documentation warning. +- [Yixin Zhang](https://github.com/qwename) fixed an integer overflow check. +- [Bosswestfalen](https://github.com/Bosswestfalen) merged two iterator classes into a smaller one. +- [Daniel599](https://github.com/Daniel599) helped to get Travis execute the tests with Clang's sanitizers. +- [Jonathan Lee](https://github.com/vjon) fixed an example in the README file. +- [gnzlbg](https://github.com/gnzlbg) supported the implementation of user-defined types. +- [Alexej Harm](https://github.com/qis) helped to get the user-defined types working with Visual Studio. +- [Jared Grubb](https://github.com/jaredgrubb) supported the implementation of user-defined types. +- [EnricoBilla](https://github.com/EnricoBilla) noted a typo in an example. +- [Martin Hořeňovský](https://github.com/horenmar) found a way for a 2x speedup for the compilation time of the test suite. +- [ukhegg](https://github.com/ukhegg) found proposed an improvement for the examples section. +- [rswanson-ihi](https://github.com/rswanson-ihi) noted a typo in the README. +- [Mihai Stan](https://github.com/stanmihai4) fixed a bug in the comparison with `nullptr`s. +- [Tushar Maheshwari](https://github.com/tusharpm) added [cotire](https://github.com/sakra/cotire) support to speed up the compilation. +- [TedLyngmo](https://github.com/TedLyngmo) noted a typo in the README, removed unnecessary bit arithmetic, and fixed some `-Weffc++` warnings. +- [Krzysztof Woś](https://github.com/krzysztofwos) made exceptions more visible. +- [ftillier](https://github.com/ftillier) fixed a compiler warning. +- [tinloaf](https://github.com/tinloaf) made sure all pushed warnings are properly popped. +- [Fytch](https://github.com/Fytch) found a bug in the documentation. +- [Jay Sistar](https://github.com/Type1J) implemented a Meson build description. +- [Henry Lee](https://github.com/HenryRLee) fixed a warning in ICC and improved the iterator implementation. +- [Vincent Thiery](https://github.com/vthiery) maintains a package for the Conan package manager. +- [Steffen](https://github.com/koemeet) fixed a potential issue with MSVC and `std::min`. +- [Mike Tzou](https://github.com/Chocobo1) fixed some typos. +- [amrcode](https://github.com/amrcode) noted a misleading documentation about comparison of floats. +- [Oleg Endo](https://github.com/olegendo) reduced the memory consumption by replacing `` with ``. +- [dan-42](https://github.com/dan-42) cleaned up the CMake files to simplify including/reusing of the library. +- [Nikita Ofitserov](https://github.com/himikof) allowed for moving values from initializer lists. +- [Greg Hurrell](https://github.com/wincent) fixed a typo. +- [Dmitry Kukovinets](https://github.com/DmitryKuk) fixed a typo. +- [kbthomp1](https://github.com/kbthomp1) fixed an issue related to the Intel OSX compiler. +- [Markus Werle](https://github.com/daixtrose) fixed a typo. +- [WebProdPP](https://github.com/WebProdPP) fixed a subtle error in a precondition check. +- [Alex](https://github.com/leha-bot) noted an error in a code sample. +- [Tom de Geus](https://github.com/tdegeus) reported some warnings with ICC and helped fixing them. +- [Perry Kundert](https://github.com/pjkundert) simplified reading from input streams. +- [Sonu Lohani](https://github.com/sonulohani) fixed a small compilation error. +- [Jamie Seward](https://github.com/jseward) fixed all MSVC warnings. +- [Nate Vargas](https://github.com/eld00d) added a Doxygen tag file. +- [pvleuven](https://github.com/pvleuven) helped fixing a warning in ICC. +- [Pavel](https://github.com/crea7or) helped fixing some warnings in MSVC. +- [Jamie Seward](https://github.com/jseward) avoided unnecessary string copies in `find()` and `count()`. +- [Mitja](https://github.com/Itja) fixed some typos. +- [Jorrit Wronski](https://github.com/jowr) updated the Hunter package links. +- [Matthias Möller](https://github.com/TinyTinni) added a `.natvis` for the MSVC debug view. +- [bogemic](https://github.com/bogemic) fixed some C++17 deprecation warnings. +- [Eren Okka](https://github.com/erengy) fixed some MSVC warnings. +- [abolz](https://github.com/abolz) integrated the Grisu2 algorithm for proper floating-point formatting, allowing more roundtrip checks to succeed. +- [Vadim Evard](https://github.com/Pipeliner) fixed a Markdown issue in the README. +- [zerodefect](https://github.com/zerodefect) fixed a compiler warning. +- [Kert](https://github.com/kaidokert) allowed to template the string type in the serialization and added the possibility to override the exceptional behavior. +- [mark-99](https://github.com/mark-99) helped fixing an ICC error. +- [Patrik Huber](https://github.com/patrikhuber) fixed links in the README file. +- [johnfb](https://github.com/johnfb) found a bug in the implementation of CBOR's indefinite length strings. +- [Paul Fultz II](https://github.com/pfultz2) added a note on the cget package manager. +- [Wilson Lin](https://github.com/wla80) made the integration section of the README more concise. +- [RalfBielig](https://github.com/ralfbielig) detected and fixed a memory leak in the parser callback. +- [agrianius](https://github.com/agrianius) allowed to dump JSON to an alternative string type. +- [Kevin Tonon](https://github.com/ktonon) overworked the C++11 compiler checks in CMake. +- [Axel Huebl](https://github.com/ax3l) simplified a CMake check and added support for the [Spack package manager](https://spack.io). +- [Carlos O'Ryan](https://github.com/coryan) fixed a typo. +- [James Upjohn](https://github.com/jammehcow) fixed a version number in the compilers section. +- [Chuck Atkins](https://github.com/chuckatkins) adjusted the CMake files to the CMake packaging guidelines and provided documentation for the CMake integration. +- [Jan Schöppach](https://github.com/dns13) fixed a typo. +- [martin-mfg](https://github.com/martin-mfg) fixed a typo. +- [Matthias Möller](https://github.com/TinyTinni) removed the dependency from `std::stringstream`. +- [agrianius](https://github.com/agrianius) added code to use alternative string implementations. +- [Daniel599](https://github.com/Daniel599) allowed to use more algorithms with the `items()` function. +- [Julius Rakow](https://github.com/jrakow) fixed the Meson include directory and fixed the links to [cppreference.com](cppreference.com). +- [Sonu Lohani](https://github.com/sonulohani) fixed the compilation with MSVC 2015 in debug mode. +- [grembo](https://github.com/grembo) fixed the test suite and re-enabled several test cases. +- [Hyeon Kim](https://github.com/simnalamburt) introduced the macro `JSON_INTERNAL_CATCH` to control the exception handling inside the library. +- [thyu](https://github.com/thyu) fixed a compiler warning. +- [David Guthrie](https://github.com/LEgregius) fixed a subtle compilation error with Clang 3.4.2. +- [Dennis Fischer](https://github.com/dennisfischer) allowed to call `find_package` without installing the library. +- [Hyeon Kim](https://github.com/simnalamburt) fixed an issue with a double macro definition. +- [Ben Berman](https://github.com/rivertam) made some error messages more understandable. +- [zakalibit](https://github.com/zakalibit) fixed a compilation problem with the Intel C++ compiler. +- [mandreyel](https://github.com/mandreyel) fixed a compilation problem. +- [Kostiantyn Ponomarenko](https://github.com/koponomarenko) added version and license information to the Meson build file. +- [Henry Schreiner](https://github.com/henryiii) added support for GCC 4.8. +- [knilch](https://github.com/knilch0r) made sure the test suite does not stall when run in the wrong directory. +- [Antonio Borondo](https://github.com/antonioborondo) fixed an MSVC 2017 warning. +- [Dan Gendreau](https://github.com/dgendreau) implemented the `NLOHMANN_JSON_SERIALIZE_ENUM` macro to quickly define a enum/JSON mapping. +- [efp](https://github.com/efp) added line and column information to parse errors. +- [julian-becker](https://github.com/julian-becker) added BSON support. +- [Pratik Chowdhury](https://github.com/pratikpc) added support for structured bindings. +- [David Avedissian](https://github.com/davedissian) added support for Clang 5.0.1 (PS4 version). +- [Jonathan Dumaresq](https://github.com/dumarjo) implemented an input adapter to read from `FILE*`. +- [kjpus](https://github.com/kjpus) fixed a link in the documentation. +- [Manvendra Singh](https://github.com/manu-chroma) fixed a typo in the documentation. +- [ziggurat29](https://github.com/ziggurat29) fixed an MSVC warning. +- [Sylvain Corlay](https://github.com/SylvainCorlay) added code to avoid an issue with MSVC. +- [mefyl](https://github.com/mefyl) fixed a bug when JSON was parsed from an input stream. +- [Millian Poquet](https://github.com/mpoquet) allowed to install the library via Meson. +- [Michael Behrns-Miller](https://github.com/moodboom) found an issue with a missing namespace. +- [Nasztanovics Ferenc](https://github.com/naszta) fixed a compilation issue with libc 2.12. +- [Andreas Schwab](https://github.com/andreas-schwab) fixed the endian conversion. +- [Mark-Dunning](https://github.com/Mark-Dunning) fixed a warning in MSVC. +- [Gareth Sylvester-Bradley](https://github.com/garethsb-sony) added `operator/` for JSON Pointers. +- [John-Mark](https://github.com/johnmarkwayve) noted a missing header. +- [Vitaly Zaitsev](https://github.com/xvitaly) fixed compilation with GCC 9.0. +- [Laurent Stacul](https://github.com/stac47) fixed compilation with GCC 9.0. +- [Ivor Wanders](https://github.com/iwanders) helped reducing the CMake requirement to version 3.1. +- [njlr](https://github.com/njlr) updated the Buckaroo instructions. +- [Lion](https://github.com/lieff) fixed a compilation issue with GCC 7 on CentOS. +- [Isaac Nickaein](https://github.com/nickaein) improved the integer serialization performance and implemented the `contains()` function. +- [past-due](https://github.com/past-due) suppressed an unfixable warning. +- [Elvis Oric](https://github.com/elvisoric) improved Meson support. +- [Matěj Plch](https://github.com/Afforix) fixed an example in the README. +- [Mark Beckwith](https://github.com/wythe) fixed a typo. +- [scinart](https://github.com/scinart) fixed bug in the serializer. +- [Patrick Boettcher](https://github.com/pboettch) implemented `push_back()` and `pop_back()` for JSON Pointers. +- [Bruno Oliveira](https://github.com/nicoddemus) added support for Conda. +- [Michele Caini](https://github.com/skypjack) fixed links in the README. +- [Hani](https://github.com/hnkb) documented how to install the library with NuGet. +- [Mark Beckwith](https://github.com/wythe) fixed a typo. +- [yann-morin-1998](https://github.com/yann-morin-1998) helped reducing the CMake requirement to version 3.1. +- [Konstantin Podsvirov](https://github.com/podsvirov) maintains a package for the MSYS2 software distro. +- [remyabel](https://github.com/remyabel) added GNUInstallDirs to the CMake files. +- [Taylor Howard](https://github.com/taylorhoward92) fixed a unit test. +- [Gabe Ron](https://github.com/Macr0Nerd) implemented the `to_string` method. +- [Watal M. Iwasaki](https://github.com/heavywatal) fixed a Clang warning. +- [Viktor Kirilov](https://github.com/onqtam) switched the unit tests from [Catch](https://github.com/philsquared/Catch) to [doctest](https://github.com/onqtam/doctest) +- [Juncheng E](https://github.com/ejcjason) fixed a typo. +- [tete17](https://github.com/tete17) fixed a bug in the `contains` function. +- [Xav83](https://github.com/Xav83) fixed some cppcheck warnings. +- [0xflotus](https://github.com/0xflotus) fixed some typos. +- [Christian Deneke](https://github.com/chris0x44) added a const version of `json_pointer::back`. +- [Julien Hamaide](https://github.com/crazyjul) made the `items()` function work with custom string types. +- [Evan Nemerson](https://github.com/nemequ) updated fixed a bug in Hedley and updated this library accordingly. +- [Florian Pigorsch](https://github.com/flopp) fixed a lot of typos. +- [Camille Bégué](https://github.com/cbegue) fixed an issue in the conversion from `std::pair` and `std::tuple` to `json`. +- [Anthony VH](https://github.com/AnthonyVH) fixed a compile error in an enum deserialization. +- [Yuriy Vountesmery](https://github.com/ua-code-dragon) noted a subtle bug in a preprocessor check. +- [Chen](https://github.com/dota17) fixed numerous issues in the library. +- [Antony Kellermann](https://github.com/aokellermann) added a CI step for GCC 10.1. +- [Alex](https://github.com/gistrec) fixed an MSVC warning. +- [Rainer](https://github.com/rvjr) proposed an improvement in the floating-point serialization in CBOR. +- [Francois Chabot](https://github.com/FrancoisChabot) made performance improvements in the input adapters. +- [Arthur Sonzogni](https://github.com/ArthurSonzogni) documented how the library can be included via `FetchContent`. +- [Rimas Misevičius](https://github.com/rmisev) fixed an error message. +- [Alexander Myasnikov](https://github.com/alexandermyasnikov) fixed some examples and a link in the README. +- [Hubert Chathi](https://github.com/uhoreg) made CMake's version config file architecture-independent. +- [OmnipotentEntity](https://github.com/OmnipotentEntity) implemented the binary values for CBOR, MessagePack, BSON, and UBJSON. +- [ArtemSarmini](https://github.com/ArtemSarmini) fixed a compilation issue with GCC 10 and fixed a leak. +- [Evgenii Sopov](https://github.com/sea-kg) integrated the library to the wsjcpp package manager. +- [Sergey Linev](https://github.com/linev) fixed a compiler warning. +- [Miguel Magalhães](https://github.com/magamig) fixed the year in the copyright. +- [Gareth Sylvester-Bradley](https://github.com/garethsb-sony) fixed a compilation issue with MSVC. +- [Alexander “weej” Jones](https://github.com/alex-weej) fixed an example in the README. +- [Antoine Cœur](https://github.com/Coeur) fixed some typos in the documentation. +- [jothepro](https://github.com/jothepro) updated links to the Hunter package. +- [Dave Lee](https://github.com/kastiglione) fixed link in the README. +- [Joël Lamotte](https://github.com/Klaim) added instruction for using Build2's package manager. +- [Paul Jurczak](https://github.com/pauljurczak) fixed an example in the README. +- [Sonu Lohani](https://github.com/sonulohani) fixed a warning. +- [Carlos Gomes Martinho](https://github.com/gocarlos) updated the Conan package source. +- [Konstantin Podsvirov](https://github.com/podsvirov) fixed the MSYS2 package documentation. +- [Tridacnid](https://github.com/Tridacnid) improved the CMake tests. +- [Michael](https://github.com/MBalszun) fixed MSVC warnings. +- [Quentin Barbarat](https://github.com/quentin-dev) fixed an example in the documentation. +- [XyFreak](https://github.com/XyFreak) fixed a compiler warning. +- [TotalCaesar659](https://github.com/TotalCaesar659) fixed links in the README. +- [Tanuj Garg](https://github.com/tanuj208) improved the fuzzer coverage for UBSAN input. +- [AODQ](https://github.com/AODQ) fixed a compiler warning. +- [jwittbrodt](https://github.com/jwittbrodt) made `NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE` inline. +- [pfeatherstone](https://github.com/pfeatherstone) improved the upper bound of arguments of the `NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE`/`NLOHMANN_DEFINE_TYPE_INTRUSIVE` macros. +- [Jan Procházka](https://github.com/jprochazk) fixed a bug in the CBOR parser for binary and string values. +- [T0b1-iOS](https://github.com/T0b1-iOS) fixed a bug in the new hash implementation. +- [Matthew Bauer](https://github.com/matthewbauer) adjusted the CBOR writer to create tags for binary subtypes. +- [gatopeich](https://github.com/gatopeich) implemented an ordered map container for `nlohmann::ordered_json`. +- [Érico Nogueira Rolim](https://github.com/ericonr) added support for pkg-config. +- [KonanM](https://github.com/KonanM) proposed an implementation for the `NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE`/`NLOHMANN_DEFINE_TYPE_INTRUSIVE` macros. +- [Guillaume Racicot](https://github.com/gracicot) implemented `string_view` support and allowed C++20 support. +- [Alex Reinking](https://github.com/alexreinking) improved CMake support for `FetchContent`. +- [Hannes Domani](https://github.com/ssbssa) provided a GDB pretty printer. + +Thanks a lot for helping out! Please [let me know](mailto:mail@nlohmann.me) if I forgot someone. + + +## Used third-party tools + +The library itself consists of a single header file licensed under the MIT license. However, it is built, tested, documented, and whatnot using a lot of third-party tools and services. Thanks a lot! + +- [**amalgamate.py - Amalgamate C source and header files**](https://github.com/edlund/amalgamate) to create a single header file +- [**American fuzzy lop**](https://lcamtuf.coredump.cx/afl/) for fuzz testing +- [**AppVeyor**](https://www.appveyor.com) for [continuous integration](https://ci.appveyor.com/project/nlohmann/json) on Windows +- [**Artistic Style**](http://astyle.sourceforge.net) for automatic source code indentation +- [**CircleCI**](https://circleci.com) for [continuous integration](https://circleci.com/gh/nlohmann/json). +- [**Clang**](https://clang.llvm.org) for compilation with code sanitizers +- [**CMake**](https://cmake.org) for build automation +- [**Codacity**](https://www.codacy.com) for further [code analysis](https://www.codacy.com/app/nlohmann/json) +- [**Coveralls**](https://coveralls.io) to measure [code coverage](https://coveralls.io/github/nlohmann/json) +- [**Coverity Scan**](https://scan.coverity.com) for [static analysis](https://scan.coverity.com/projects/nlohmann-json) +- [**cppcheck**](http://cppcheck.sourceforge.net) for static analysis +- [**doctest**](https://github.com/onqtam/doctest) for the unit tests +- [**Doxygen**](https://www.doxygen.nl/index.html) to generate [documentation](https://nlohmann.github.io/json/doxygen/index.html) +- [**git-update-ghpages**](https://github.com/rstacruz/git-update-ghpages) to upload the documentation to gh-pages +- [**GitHub Changelog Generator**](https://github.com/skywinder/github-changelog-generator) to generate the [ChangeLog](https://github.com/nlohmann/json/blob/develop/ChangeLog.md) +- [**Google Benchmark**](https://github.com/google/benchmark) to implement the benchmarks +- [**Hedley**](https://nemequ.github.io/hedley/) to avoid re-inventing several compiler-agnostic feature macros +- [**lcov**](http://ltp.sourceforge.net/coverage/lcov.php) to process coverage information and create a HTML view +- [**libFuzzer**](https://llvm.org/docs/LibFuzzer.html) to implement fuzz testing for OSS-Fuzz +- [**OSS-Fuzz**](https://github.com/google/oss-fuzz) for continuous fuzz testing of the library ([project repository](https://github.com/google/oss-fuzz/tree/master/projects/json)) +- [**Probot**](https://probot.github.io) for automating maintainer tasks such as closing stale issues, requesting missing information, or detecting toxic comments. +- [**send_to_wandbox**](https://github.com/nlohmann/json/blob/develop/doc/scripts/send_to_wandbox.py) to send code examples to [Wandbox](http://melpon.org/wandbox) +- [**Travis**](https://travis-ci.org) for [continuous integration](https://travis-ci.org/nlohmann/json) on Linux and macOS +- [**Valgrind**](https://valgrind.org) to check for correct memory management +- [**Wandbox**](https://wandbox.org) for [online examples](https://wandbox.org/permlink/3lCHrFUZANONKv7a) + + +## Projects using JSON for Modern C++ + +The library is currently used in Apple macOS Sierra and iOS 10. I am not sure what they are using the library for, but I am happy that it runs on so many devices. + + +## Notes + +### Character encoding + +The library supports **Unicode input** as follows: + +- Only **UTF-8** encoded input is supported which is the default encoding for JSON according to [RFC 8259](https://tools.ietf.org/html/rfc8259.html#section-8.1). +- `std::u16string` and `std::u32string` can be parsed, assuming UTF-16 and UTF-32 encoding, respectively. These encodings are not supported when reading from files or other input containers. +- Other encodings such as Latin-1 or ISO 8859-1 are **not** supported and will yield parse or serialization errors. +- [Unicode noncharacters](https://www.unicode.org/faq/private_use.html#nonchar1) will not be replaced by the library. +- Invalid surrogates (e.g., incomplete pairs such as `\uDEAD`) will yield parse errors. +- The strings stored in the library are UTF-8 encoded. When using the default string type (`std::string`), note that its length/size functions return the number of stored bytes rather than the number of characters or glyphs. +- When you store strings with different encodings in the library, calling [`dump()`](https://nlohmann.github.io/json/api/basic_json/dump/) may throw an exception unless `json::error_handler_t::replace` or `json::error_handler_t::ignore` are used as error handlers. + +### Comments in JSON + +This library does not support comments by default. It does so for three reasons: + +1. Comments are not part of the [JSON specification](https://tools.ietf.org/html/rfc8259). You may argue that `//` or `/* */` are allowed in JavaScript, but JSON is not JavaScript. +2. This was not an oversight: Douglas Crockford [wrote on this](https://plus.google.com/118095276221607585885/posts/RK8qyGVaGSr) in May 2012: + + > I removed comments from JSON because I saw people were using them to hold parsing directives, a practice which would have destroyed interoperability. I know that the lack of comments makes some people sad, but it shouldn't. + + > Suppose you are using JSON to keep configuration files, which you would like to annotate. Go ahead and insert all the comments you like. Then pipe it through JSMin before handing it to your JSON parser. + +3. It is dangerous for interoperability if some libraries would add comment support while others don't. Please check [The Harmful Consequences of the Robustness Principle](https://tools.ietf.org/html/draft-iab-protocol-maintenance-01) on this. + +However, you can pass set parameter `ignore_comments` to true in the `parse` function to ignore `//` or `/* */` comments. Comments will then be treated as whitespace. + +### Order of object keys + +By default, the library does not preserve the **insertion order of object elements**. This is standards-compliant, as the [JSON standard](https://tools.ietf.org/html/rfc8259.html) defines objects as "an unordered collection of zero or more name/value pairs". + +If you do want to preserve the insertion order, you can try the type [`nlohmann::ordered_json`](https://github.com/nlohmann/json/issues/2179). Alternatively, you can use a more sophisticated ordered map like [`tsl::ordered_map`](https://github.com/Tessil/ordered-map) ([integration](https://github.com/nlohmann/json/issues/546#issuecomment-304447518)) or [`nlohmann::fifo_map`](https://github.com/nlohmann/fifo_map) ([integration](https://github.com/nlohmann/json/issues/485#issuecomment-333652309)). + +### Memory Release + +We checked with Valgrind and the Address Sanitizer (ASAN) that there are no memory leaks. + +If you find that a parsing program with this library does not release memory, please consider the following case and it maybe unrelated to this library. + +**Your program is compiled with glibc.** There is a tunable threshold that glibc uses to decide whether to actually return memory to the system or whether to cache it for later reuse. If in your program you make lots of small allocations and those small allocations are not a contiguous block and are presumably below the threshold, then they will not get returned to the OS. +Here is a related issue [#1924](https://github.com/nlohmann/json/issues/1924). + +### Further notes + +- The code contains numerous debug **assertions** which can be switched off by defining the preprocessor macro `NDEBUG`, see the [documentation of `assert`](https://en.cppreference.com/w/cpp/error/assert). In particular, note [`operator[]`](https://nlohmann.github.io/json/api/basic_json/operator%5B%5D/) implements **unchecked access** for const objects: If the given key is not present, the behavior is undefined (think of a dereferenced null pointer) and yields an [assertion failure](https://github.com/nlohmann/json/issues/289) if assertions are switched on. If you are not sure whether an element in an object exists, use checked access with the [`at()` function](https://nlohmann.github.io/json/api/basic_json/at/). Furthermore, you can define `JSON_ASSERT(x)` to replace calls to `assert(x)`. +- As the exact type of a number is not defined in the [JSON specification](https://tools.ietf.org/html/rfc8259.html), this library tries to choose the best fitting C++ number type automatically. As a result, the type `double` may be used to store numbers which may yield [**floating-point exceptions**](https://github.com/nlohmann/json/issues/181) in certain rare situations if floating-point exceptions have been unmasked in the calling code. These exceptions are not caused by the library and need to be fixed in the calling code, such as by re-masking the exceptions prior to calling library functions. +- The code can be compiled without C++ **runtime type identification** features; that is, you can use the `-fno-rtti` compiler flag. +- **Exceptions** are used widely within the library. They can, however, be switched off with either using the compiler flag `-fno-exceptions` or by defining the symbol `JSON_NOEXCEPTION`. In this case, exceptions are replaced by `abort()` calls. You can further control this behavior by defining `JSON_THROW_USER` (overriding `throw`), `JSON_TRY_USER` (overriding `try`), and `JSON_CATCH_USER` (overriding `catch`). Note that `JSON_THROW_USER` should leave the current scope (e.g., by throwing or aborting), as continuing after it may yield undefined behavior. + +## Execute unit tests + +To compile and run the tests, you need to execute + +```sh +$ mkdir build +$ cd build +$ cmake .. -DJSON_BuildTests=On +$ cmake --build . +$ ctest --output-on-failure +``` + +Note that during the `ctest` stage, several JSON test files are downloaded from an [external repository](https://github.com/nlohmann/json_test_data). If policies forbid downloading artifacts during testing, you can download the files yourself and pass the directory with the test files via `-DJSON_TestDataDirectory=path` to CMake. Then, no Internet connectivity is required. See [issue #2189](https://github.com/nlohmann/json/issues/2189) for more information. + +In case you have downloaded the library rather than checked out the code via Git, test `cmake_fetch_content_configure`. Please execute `ctest -LE git_required` to skip these tests. See [issue #2189](https://github.com/nlohmann/json/issues/2189) for more information. + +Some tests change the installed files and hence make the whole process not reproducible. Please execute `ctest -LE not_reproducible` to skip these tests. See [issue #2324](https://github.com/nlohmann/json/issues/2324) for more information. + +Note you need to call `cmake -LE "not_reproducible|git_required"` to exclude both labels. See [issue #2596](https://github.com/nlohmann/json/issues/2596) for more information. + +As Intel compilers use unsafe floating point optimization by default, the unit tests may fail. Use flag [`/fp:precise`](https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/compiler-options/compiler-option-details/floating-point-options/fp-model-fp.html) then. diff --git a/cpp/external/katagocoreml/vendor/deps/nlohmann/json.hpp b/cpp/external/katagocoreml/vendor/deps/nlohmann/json.hpp new file mode 100644 index 000000000..a83971da2 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/deps/nlohmann/json.hpp @@ -0,0 +1,25855 @@ +/* + __ _____ _____ _____ + __| | __| | | | JSON for Modern C++ +| | |__ | | | | | | version 3.9.1 +|_____|_____|_____|_|___| https://github.com/nlohmann/json + +Licensed under the MIT License . +SPDX-License-Identifier: MIT +Copyright (c) 2013-2019 Niels Lohmann . + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +#ifndef INCLUDE_NLOHMANN_JSON_HPP_ +#define INCLUDE_NLOHMANN_JSON_HPP_ + +#define NLOHMANN_JSON_VERSION_MAJOR 3 +#define NLOHMANN_JSON_VERSION_MINOR 9 +#define NLOHMANN_JSON_VERSION_PATCH 1 + +#include // all_of, find, for_each +#include // nullptr_t, ptrdiff_t, size_t +#include // hash, less +#include // initializer_list +#include // istream, ostream +#include // random_access_iterator_tag +#include // unique_ptr +#include // accumulate +#include // string, stoi, to_string +#include // declval, forward, move, pair, swap +#include // vector + +// #include + + +#include + +// #include + + +#include // transform +#include // array +#include // forward_list +#include // inserter, front_inserter, end +#include // map +#include // string +#include // tuple, make_tuple +#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible +#include // unordered_map +#include // pair, declval +#include // valarray + +// #include + + +#include // exception +#include // runtime_error +#include // to_string + +// #include + + +#include // array +#include // size_t +#include // uint8_t +#include // string + +namespace nlohmann +{ +namespace detail +{ +/////////////////////////// +// JSON type enumeration // +/////////////////////////// + +/*! +@brief the JSON type enumeration + +This enumeration collects the different JSON types. It is internally used to +distinguish the stored values, and the functions @ref basic_json::is_null(), +@ref basic_json::is_object(), @ref basic_json::is_array(), +@ref basic_json::is_string(), @ref basic_json::is_boolean(), +@ref basic_json::is_number() (with @ref basic_json::is_number_integer(), +@ref basic_json::is_number_unsigned(), and @ref basic_json::is_number_float()), +@ref basic_json::is_discarded(), @ref basic_json::is_primitive(), and +@ref basic_json::is_structured() rely on it. + +@note There are three enumeration entries (number_integer, number_unsigned, and +number_float), because the library distinguishes these three types for numbers: +@ref basic_json::number_unsigned_t is used for unsigned integers, +@ref basic_json::number_integer_t is used for signed integers, and +@ref basic_json::number_float_t is used for floating-point numbers or to +approximate integers which do not fit in the limits of their respective type. + +@sa see @ref basic_json::basic_json(const value_t value_type) -- create a JSON +value with the default value for a given type + +@since version 1.0.0 +*/ +enum class value_t : std::uint8_t +{ + null, ///< null value + object, ///< object (unordered set of name/value pairs) + array, ///< array (ordered collection of values) + string, ///< string value + boolean, ///< boolean value + number_integer, ///< number value (signed integer) + number_unsigned, ///< number value (unsigned integer) + number_float, ///< number value (floating-point) + binary, ///< binary array (ordered collection of bytes) + discarded ///< discarded by the parser callback function +}; + +/*! +@brief comparison operator for JSON types + +Returns an ordering that is similar to Python: +- order: null < boolean < number < object < array < string < binary +- furthermore, each type is not smaller than itself +- discarded values are not comparable +- binary is represented as a b"" string in python and directly comparable to a + string; however, making a binary array directly comparable with a string would + be surprising behavior in a JSON file. + +@since version 1.0.0 +*/ +inline bool operator<(const value_t lhs, const value_t rhs) noexcept +{ + static constexpr std::array order = {{ + 0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */, + 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */, + 6 /* binary */ + } + }; + + const auto l_index = static_cast(lhs); + const auto r_index = static_cast(rhs); + return l_index < order.size() && r_index < order.size() && order[l_index] < order[r_index]; +} +} // namespace detail +} // namespace nlohmann + +// #include + + +#include +// #include + + +#include // pair +// #include +/* Hedley - https://nemequ.github.io/hedley + * Created by Evan Nemerson + * + * To the extent possible under law, the author(s) have dedicated all + * copyright and related and neighboring rights to this software to + * the public domain worldwide. This software is distributed without + * any warranty. + * + * For details, see . + * SPDX-License-Identifier: CC0-1.0 + */ + +#if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 15) +#if defined(JSON_HEDLEY_VERSION) + #undef JSON_HEDLEY_VERSION +#endif +#define JSON_HEDLEY_VERSION 15 + +#if defined(JSON_HEDLEY_STRINGIFY_EX) + #undef JSON_HEDLEY_STRINGIFY_EX +#endif +#define JSON_HEDLEY_STRINGIFY_EX(x) #x + +#if defined(JSON_HEDLEY_STRINGIFY) + #undef JSON_HEDLEY_STRINGIFY +#endif +#define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x) + +#if defined(JSON_HEDLEY_CONCAT_EX) + #undef JSON_HEDLEY_CONCAT_EX +#endif +#define JSON_HEDLEY_CONCAT_EX(a,b) a##b + +#if defined(JSON_HEDLEY_CONCAT) + #undef JSON_HEDLEY_CONCAT +#endif +#define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b) + +#if defined(JSON_HEDLEY_CONCAT3_EX) + #undef JSON_HEDLEY_CONCAT3_EX +#endif +#define JSON_HEDLEY_CONCAT3_EX(a,b,c) a##b##c + +#if defined(JSON_HEDLEY_CONCAT3) + #undef JSON_HEDLEY_CONCAT3 +#endif +#define JSON_HEDLEY_CONCAT3(a,b,c) JSON_HEDLEY_CONCAT3_EX(a,b,c) + +#if defined(JSON_HEDLEY_VERSION_ENCODE) + #undef JSON_HEDLEY_VERSION_ENCODE +#endif +#define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision)) + +#if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR) + #undef JSON_HEDLEY_VERSION_DECODE_MAJOR +#endif +#define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000) + +#if defined(JSON_HEDLEY_VERSION_DECODE_MINOR) + #undef JSON_HEDLEY_VERSION_DECODE_MINOR +#endif +#define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000) + +#if defined(JSON_HEDLEY_VERSION_DECODE_REVISION) + #undef JSON_HEDLEY_VERSION_DECODE_REVISION +#endif +#define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000) + +#if defined(JSON_HEDLEY_GNUC_VERSION) + #undef JSON_HEDLEY_GNUC_VERSION +#endif +#if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__) + #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) +#elif defined(__GNUC__) + #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0) +#endif + +#if defined(JSON_HEDLEY_GNUC_VERSION_CHECK) + #undef JSON_HEDLEY_GNUC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_GNUC_VERSION) + #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_MSVC_VERSION) + #undef JSON_HEDLEY_MSVC_VERSION +#endif +#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000) && !defined(__ICL) + #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100) +#elif defined(_MSC_FULL_VER) && !defined(__ICL) + #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10) +#elif defined(_MSC_VER) && !defined(__ICL) + #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0) +#endif + +#if defined(JSON_HEDLEY_MSVC_VERSION_CHECK) + #undef JSON_HEDLEY_MSVC_VERSION_CHECK +#endif +#if !defined(JSON_HEDLEY_MSVC_VERSION) + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0) +#elif defined(_MSC_VER) && (_MSC_VER >= 1400) + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch))) +#elif defined(_MSC_VER) && (_MSC_VER >= 1200) + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch))) +#else + #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor))) +#endif + +#if defined(JSON_HEDLEY_INTEL_VERSION) + #undef JSON_HEDLEY_INTEL_VERSION +#endif +#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && !defined(__ICL) + #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE) +#elif defined(__INTEL_COMPILER) && !defined(__ICL) + #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0) +#endif + +#if defined(JSON_HEDLEY_INTEL_VERSION_CHECK) + #undef JSON_HEDLEY_INTEL_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_INTEL_VERSION) + #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_INTEL_CL_VERSION) + #undef JSON_HEDLEY_INTEL_CL_VERSION +#endif +#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && defined(__ICL) + #define JSON_HEDLEY_INTEL_CL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER, __INTEL_COMPILER_UPDATE, 0) +#endif + +#if defined(JSON_HEDLEY_INTEL_CL_VERSION_CHECK) + #undef JSON_HEDLEY_INTEL_CL_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_INTEL_CL_VERSION) + #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_CL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_PGI_VERSION) + #undef JSON_HEDLEY_PGI_VERSION +#endif +#if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__) + #define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__) +#endif + +#if defined(JSON_HEDLEY_PGI_VERSION_CHECK) + #undef JSON_HEDLEY_PGI_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_PGI_VERSION) + #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_SUNPRO_VERSION) + #undef JSON_HEDLEY_SUNPRO_VERSION +#endif +#if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10) +#elif defined(__SUNPRO_C) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf) +#elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10) +#elif defined(__SUNPRO_CC) + #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf) +#endif + +#if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK) + #undef JSON_HEDLEY_SUNPRO_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_SUNPRO_VERSION) + #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) + #undef JSON_HEDLEY_EMSCRIPTEN_VERSION +#endif +#if defined(__EMSCRIPTEN__) + #define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__) +#endif + +#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK) + #undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) + #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_ARM_VERSION) + #undef JSON_HEDLEY_ARM_VERSION +#endif +#if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION) + #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100) +#elif defined(__CC_ARM) && defined(__ARMCC_VERSION) + #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100) +#endif + +#if defined(JSON_HEDLEY_ARM_VERSION_CHECK) + #undef JSON_HEDLEY_ARM_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_ARM_VERSION) + #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_IBM_VERSION) + #undef JSON_HEDLEY_IBM_VERSION +#endif +#if defined(__ibmxl__) + #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__) +#elif defined(__xlC__) && defined(__xlC_ver__) + #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff) +#elif defined(__xlC__) + #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0) +#endif + +#if defined(JSON_HEDLEY_IBM_VERSION_CHECK) + #undef JSON_HEDLEY_IBM_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_IBM_VERSION) + #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_VERSION) + #undef JSON_HEDLEY_TI_VERSION +#endif +#if \ + defined(__TI_COMPILER_VERSION__) && \ + ( \ + defined(__TMS470__) || defined(__TI_ARM__) || \ + defined(__MSP430__) || \ + defined(__TMS320C2000__) \ + ) +#if (__TI_COMPILER_VERSION__ >= 16000000) + #define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif +#endif + +#if defined(JSON_HEDLEY_TI_VERSION_CHECK) + #undef JSON_HEDLEY_TI_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_VERSION) + #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL2000_VERSION) + #undef JSON_HEDLEY_TI_CL2000_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__) + #define JSON_HEDLEY_TI_CL2000_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL2000_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL2000_VERSION) + #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL2000_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL430_VERSION) + #undef JSON_HEDLEY_TI_CL430_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__) + #define JSON_HEDLEY_TI_CL430_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL430_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL430_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL430_VERSION) + #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL430_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) + #undef JSON_HEDLEY_TI_ARMCL_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__)) + #define JSON_HEDLEY_TI_ARMCL_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK) + #undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) + #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_ARMCL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL6X_VERSION) + #undef JSON_HEDLEY_TI_CL6X_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__) + #define JSON_HEDLEY_TI_CL6X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL6X_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL6X_VERSION) + #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL6X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CL7X_VERSION) + #undef JSON_HEDLEY_TI_CL7X_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__C7000__) + #define JSON_HEDLEY_TI_CL7X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CL7X_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CL7X_VERSION) + #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL7X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) + #undef JSON_HEDLEY_TI_CLPRU_VERSION +#endif +#if defined(__TI_COMPILER_VERSION__) && defined(__PRU__) + #define JSON_HEDLEY_TI_CLPRU_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) +#endif + +#if defined(JSON_HEDLEY_TI_CLPRU_VERSION_CHECK) + #undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) + #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CLPRU_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_CRAY_VERSION) + #undef JSON_HEDLEY_CRAY_VERSION +#endif +#if defined(_CRAYC) + #if defined(_RELEASE_PATCHLEVEL) + #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL) + #else + #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0) + #endif +#endif + +#if defined(JSON_HEDLEY_CRAY_VERSION_CHECK) + #undef JSON_HEDLEY_CRAY_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_CRAY_VERSION) + #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_IAR_VERSION) + #undef JSON_HEDLEY_IAR_VERSION +#endif +#if defined(__IAR_SYSTEMS_ICC__) + #if __VER__ > 1000 + #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000)) + #else + #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(__VER__ / 100, __VER__ % 100, 0) + #endif +#endif + +#if defined(JSON_HEDLEY_IAR_VERSION_CHECK) + #undef JSON_HEDLEY_IAR_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_IAR_VERSION) + #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_TINYC_VERSION) + #undef JSON_HEDLEY_TINYC_VERSION +#endif +#if defined(__TINYC__) + #define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100) +#endif + +#if defined(JSON_HEDLEY_TINYC_VERSION_CHECK) + #undef JSON_HEDLEY_TINYC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_TINYC_VERSION) + #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_DMC_VERSION) + #undef JSON_HEDLEY_DMC_VERSION +#endif +#if defined(__DMC__) + #define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf) +#endif + +#if defined(JSON_HEDLEY_DMC_VERSION_CHECK) + #undef JSON_HEDLEY_DMC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_DMC_VERSION) + #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_COMPCERT_VERSION) + #undef JSON_HEDLEY_COMPCERT_VERSION +#endif +#if defined(__COMPCERT_VERSION__) + #define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100) +#endif + +#if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK) + #undef JSON_HEDLEY_COMPCERT_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_COMPCERT_VERSION) + #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_PELLES_VERSION) + #undef JSON_HEDLEY_PELLES_VERSION +#endif +#if defined(__POCC__) + #define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0) +#endif + +#if defined(JSON_HEDLEY_PELLES_VERSION_CHECK) + #undef JSON_HEDLEY_PELLES_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_PELLES_VERSION) + #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_MCST_LCC_VERSION) + #undef JSON_HEDLEY_MCST_LCC_VERSION +#endif +#if defined(__LCC__) && defined(__LCC_MINOR__) + #define JSON_HEDLEY_MCST_LCC_VERSION JSON_HEDLEY_VERSION_ENCODE(__LCC__ / 100, __LCC__ % 100, __LCC_MINOR__) +#endif + +#if defined(JSON_HEDLEY_MCST_LCC_VERSION_CHECK) + #undef JSON_HEDLEY_MCST_LCC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_MCST_LCC_VERSION) + #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_MCST_LCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_GCC_VERSION) + #undef JSON_HEDLEY_GCC_VERSION +#endif +#if \ + defined(JSON_HEDLEY_GNUC_VERSION) && \ + !defined(__clang__) && \ + !defined(JSON_HEDLEY_INTEL_VERSION) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_ARM_VERSION) && \ + !defined(JSON_HEDLEY_CRAY_VERSION) && \ + !defined(JSON_HEDLEY_TI_VERSION) && \ + !defined(JSON_HEDLEY_TI_ARMCL_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL430_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL2000_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL6X_VERSION) && \ + !defined(JSON_HEDLEY_TI_CL7X_VERSION) && \ + !defined(JSON_HEDLEY_TI_CLPRU_VERSION) && \ + !defined(__COMPCERT__) && \ + !defined(JSON_HEDLEY_MCST_LCC_VERSION) + #define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION +#endif + +#if defined(JSON_HEDLEY_GCC_VERSION_CHECK) + #undef JSON_HEDLEY_GCC_VERSION_CHECK +#endif +#if defined(JSON_HEDLEY_GCC_VERSION) + #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) +#else + #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0) +#endif + +#if defined(JSON_HEDLEY_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_HAS_ATTRIBUTE +#endif +#if \ + defined(__has_attribute) && \ + ( \ + (!defined(JSON_HEDLEY_IAR_VERSION) || JSON_HEDLEY_IAR_VERSION_CHECK(8,5,9)) \ + ) +# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute) +#else +# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE +#endif +#if defined(__has_attribute) + #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) +#else + #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE +#endif +#if defined(__has_attribute) + #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) +#else + #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE +#endif +#if \ + defined(__has_cpp_attribute) && \ + defined(__cplusplus) && \ + (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute) +#else + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0) +#endif + +#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS) + #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS +#endif +#if !defined(__cplusplus) || !defined(__has_cpp_attribute) + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) +#elif \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_IAR_VERSION) && \ + (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \ + (!defined(JSON_HEDLEY_MSVC_VERSION) || JSON_HEDLEY_MSVC_VERSION_CHECK(19,20,0)) + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute) +#else + #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE +#endif +#if defined(__has_cpp_attribute) && defined(__cplusplus) + #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) +#else + #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE +#endif +#if defined(__has_cpp_attribute) && defined(__cplusplus) + #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) +#else + #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_BUILTIN) + #undef JSON_HEDLEY_HAS_BUILTIN +#endif +#if defined(__has_builtin) + #define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin) +#else + #define JSON_HEDLEY_HAS_BUILTIN(builtin) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN) + #undef JSON_HEDLEY_GNUC_HAS_BUILTIN +#endif +#if defined(__has_builtin) + #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) +#else + #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_BUILTIN) + #undef JSON_HEDLEY_GCC_HAS_BUILTIN +#endif +#if defined(__has_builtin) + #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) +#else + #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_FEATURE) + #undef JSON_HEDLEY_HAS_FEATURE +#endif +#if defined(__has_feature) + #define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature) +#else + #define JSON_HEDLEY_HAS_FEATURE(feature) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_FEATURE) + #undef JSON_HEDLEY_GNUC_HAS_FEATURE +#endif +#if defined(__has_feature) + #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) +#else + #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_FEATURE) + #undef JSON_HEDLEY_GCC_HAS_FEATURE +#endif +#if defined(__has_feature) + #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) +#else + #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_EXTENSION) + #undef JSON_HEDLEY_HAS_EXTENSION +#endif +#if defined(__has_extension) + #define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension) +#else + #define JSON_HEDLEY_HAS_EXTENSION(extension) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION) + #undef JSON_HEDLEY_GNUC_HAS_EXTENSION +#endif +#if defined(__has_extension) + #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) +#else + #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_EXTENSION) + #undef JSON_HEDLEY_GCC_HAS_EXTENSION +#endif +#if defined(__has_extension) + #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) +#else + #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE +#endif +#if defined(__has_declspec_attribute) + #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute) +#else + #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE +#endif +#if defined(__has_declspec_attribute) + #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) +#else + #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE +#endif +#if defined(__has_declspec_attribute) + #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) +#else + #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_HAS_WARNING) + #undef JSON_HEDLEY_HAS_WARNING +#endif +#if defined(__has_warning) + #define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning) +#else + #define JSON_HEDLEY_HAS_WARNING(warning) (0) +#endif + +#if defined(JSON_HEDLEY_GNUC_HAS_WARNING) + #undef JSON_HEDLEY_GNUC_HAS_WARNING +#endif +#if defined(__has_warning) + #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) +#else + #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_GCC_HAS_WARNING) + #undef JSON_HEDLEY_GCC_HAS_WARNING +#endif +#if defined(__has_warning) + #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) +#else + #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ + defined(__clang__) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \ + (JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR)) + #define JSON_HEDLEY_PRAGMA(value) _Pragma(#value) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) + #define JSON_HEDLEY_PRAGMA(value) __pragma(value) +#else + #define JSON_HEDLEY_PRAGMA(value) +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH) + #undef JSON_HEDLEY_DIAGNOSTIC_PUSH +#endif +#if defined(JSON_HEDLEY_DIAGNOSTIC_POP) + #undef JSON_HEDLEY_DIAGNOSTIC_POP +#endif +#if defined(__clang__) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push)) + #define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop)) +#elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop") +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop") +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) + #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") + #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") +#else + #define JSON_HEDLEY_DIAGNOSTIC_PUSH + #define JSON_HEDLEY_DIAGNOSTIC_POP +#endif + +/* JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for + HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ +#endif +#if defined(__cplusplus) +# if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat") +# if JSON_HEDLEY_HAS_WARNING("-Wc++17-extensions") +# if JSON_HEDLEY_HAS_WARNING("-Wc++1z-extensions") +# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ + _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ + _Pragma("clang diagnostic ignored \"-Wc++1z-extensions\"") \ + xpr \ + JSON_HEDLEY_DIAGNOSTIC_POP +# else +# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ + _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ + xpr \ + JSON_HEDLEY_DIAGNOSTIC_POP +# endif +# else +# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ + xpr \ + JSON_HEDLEY_DIAGNOSTIC_POP +# endif +# endif +#endif +#if !defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x +#endif + +#if defined(JSON_HEDLEY_CONST_CAST) + #undef JSON_HEDLEY_CONST_CAST +#endif +#if defined(__cplusplus) +# define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast(expr)) +#elif \ + JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) +# define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ + ((T) (expr)); \ + JSON_HEDLEY_DIAGNOSTIC_POP \ + })) +#else +# define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr)) +#endif + +#if defined(JSON_HEDLEY_REINTERPRET_CAST) + #undef JSON_HEDLEY_REINTERPRET_CAST +#endif +#if defined(__cplusplus) + #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast(expr)) +#else + #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr)) +#endif + +#if defined(JSON_HEDLEY_STATIC_CAST) + #undef JSON_HEDLEY_STATIC_CAST +#endif +#if defined(__cplusplus) + #define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast(expr)) +#else + #define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr)) +#endif + +#if defined(JSON_HEDLEY_CPP_CAST) + #undef JSON_HEDLEY_CPP_CAST +#endif +#if defined(__cplusplus) +# if JSON_HEDLEY_HAS_WARNING("-Wold-style-cast") +# define JSON_HEDLEY_CPP_CAST(T, expr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \ + ((T) (expr)) \ + JSON_HEDLEY_DIAGNOSTIC_POP +# elif JSON_HEDLEY_IAR_VERSION_CHECK(8,3,0) +# define JSON_HEDLEY_CPP_CAST(T, expr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("diag_suppress=Pe137") \ + JSON_HEDLEY_DIAGNOSTIC_POP +# else +# define JSON_HEDLEY_CPP_CAST(T, expr) ((T) (expr)) +# endif +#else +# define JSON_HEDLEY_CPP_CAST(T, expr) (expr) +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)") +#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:1478 1786)) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1216,1444,1445") +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996)) +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215") +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)") +#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:161)) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068)) +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(16,9,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") +#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161") +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 161") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-attributes") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)") +#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:1292)) +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030)) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097,1098") +#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)") +#elif \ + JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097") +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wcast-qual") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"") +#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL +#endif + +#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION) + #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunused-function") + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("clang diagnostic ignored \"-Wunused-function\"") +#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("GCC diagnostic ignored \"-Wunused-function\"") +#elif JSON_HEDLEY_MSVC_VERSION_CHECK(1,0,0) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION __pragma(warning(disable:4505)) +#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("diag_suppress 3142") +#else + #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION +#endif + +#if defined(JSON_HEDLEY_DEPRECATED) + #undef JSON_HEDLEY_DEPRECATED +#endif +#if defined(JSON_HEDLEY_DEPRECATED_FOR) + #undef JSON_HEDLEY_DEPRECATED_FOR +#endif +#if \ + JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since)) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement)) +#elif \ + (JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since))) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement))) +#elif defined(__cplusplus) && (__cplusplus >= 201402L) + #define JSON_HEDLEY_DEPRECATED(since) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]]) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]]) +#elif \ + JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) + #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__)) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__)) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated) +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated") + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated") +#else + #define JSON_HEDLEY_DEPRECATED(since) + #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) +#endif + +#if defined(JSON_HEDLEY_UNAVAILABLE) + #undef JSON_HEDLEY_UNAVAILABLE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(warning) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since))) +#else + #define JSON_HEDLEY_UNAVAILABLE(available_since) +#endif + +#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT) + #undef JSON_HEDLEY_WARN_UNUSED_RESULT +#endif +#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT_MSG) + #undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) __attribute__((__warn_unused_result__)) +#elif (JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) >= 201907L) + #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard(msg)]]) +#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) + #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) +#elif defined(_Check_return_) /* SAL */ + #define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_ + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) _Check_return_ +#else + #define JSON_HEDLEY_WARN_UNUSED_RESULT + #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) +#endif + +#if defined(JSON_HEDLEY_SENTINEL) + #undef JSON_HEDLEY_SENTINEL +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position))) +#else + #define JSON_HEDLEY_SENTINEL(position) +#endif + +#if defined(JSON_HEDLEY_NO_RETURN) + #undef JSON_HEDLEY_NO_RETURN +#endif +#if JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_NO_RETURN __noreturn +#elif \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) +#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L + #define JSON_HEDLEY_NO_RETURN _Noreturn +#elif defined(__cplusplus) && (__cplusplus >= 201103L) + #define JSON_HEDLEY_NO_RETURN JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[noreturn]]) +#elif \ + JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,2,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) + #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) + #define JSON_HEDLEY_NO_RETURN _Pragma("does_not_return") +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) +#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) + #define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;") +#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) + #define JSON_HEDLEY_NO_RETURN __attribute((noreturn)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) + #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) +#else + #define JSON_HEDLEY_NO_RETURN +#endif + +#if defined(JSON_HEDLEY_NO_ESCAPE) + #undef JSON_HEDLEY_NO_ESCAPE +#endif +#if JSON_HEDLEY_HAS_ATTRIBUTE(noescape) + #define JSON_HEDLEY_NO_ESCAPE __attribute__((__noescape__)) +#else + #define JSON_HEDLEY_NO_ESCAPE +#endif + +#if defined(JSON_HEDLEY_UNREACHABLE) + #undef JSON_HEDLEY_UNREACHABLE +#endif +#if defined(JSON_HEDLEY_UNREACHABLE_RETURN) + #undef JSON_HEDLEY_UNREACHABLE_RETURN +#endif +#if defined(JSON_HEDLEY_ASSUME) + #undef JSON_HEDLEY_ASSUME +#endif +#if \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_ASSUME(expr) __assume(expr) +#elif JSON_HEDLEY_HAS_BUILTIN(__builtin_assume) + #define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr) +#elif \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) + #if defined(__cplusplus) + #define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr) + #else + #define JSON_HEDLEY_ASSUME(expr) _nassert(expr) + #endif +#endif +#if \ + (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(18,10,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(10,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable() +#elif defined(JSON_HEDLEY_ASSUME) + #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) +#endif +#if !defined(JSON_HEDLEY_ASSUME) + #if defined(JSON_HEDLEY_UNREACHABLE) + #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, ((expr) ? 1 : (JSON_HEDLEY_UNREACHABLE(), 1))) + #else + #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, expr) + #endif +#endif +#if defined(JSON_HEDLEY_UNREACHABLE) + #if \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) + #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (JSON_HEDLEY_STATIC_CAST(void, JSON_HEDLEY_ASSUME(0)), (value)) + #else + #define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE() + #endif +#else + #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (value) +#endif +#if !defined(JSON_HEDLEY_UNREACHABLE) + #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) +#endif + +JSON_HEDLEY_DIAGNOSTIC_PUSH +#if JSON_HEDLEY_HAS_WARNING("-Wpedantic") + #pragma clang diagnostic ignored "-Wpedantic" +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") && defined(__cplusplus) + #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" +#endif +#if JSON_HEDLEY_GCC_HAS_WARNING("-Wvariadic-macros",4,0,0) + #if defined(__clang__) + #pragma clang diagnostic ignored "-Wvariadic-macros" + #elif defined(JSON_HEDLEY_GCC_VERSION) + #pragma GCC diagnostic ignored "-Wvariadic-macros" + #endif +#endif +#if defined(JSON_HEDLEY_NON_NULL) + #undef JSON_HEDLEY_NON_NULL +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) + #define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__))) +#else + #define JSON_HEDLEY_NON_NULL(...) +#endif +JSON_HEDLEY_DIAGNOSTIC_POP + +#if defined(JSON_HEDLEY_PRINTF_FORMAT) + #undef JSON_HEDLEY_PRINTF_FORMAT +#endif +#if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check))) +#elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check))) +#elif \ + JSON_HEDLEY_HAS_ATTRIBUTE(format) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check))) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(6,0,0) + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check)) +#else + #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) +#endif + +#if defined(JSON_HEDLEY_CONSTEXPR) + #undef JSON_HEDLEY_CONSTEXPR +#endif +#if defined(__cplusplus) + #if __cplusplus >= 201103L + #define JSON_HEDLEY_CONSTEXPR JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(constexpr) + #endif +#endif +#if !defined(JSON_HEDLEY_CONSTEXPR) + #define JSON_HEDLEY_CONSTEXPR +#endif + +#if defined(JSON_HEDLEY_PREDICT) + #undef JSON_HEDLEY_PREDICT +#endif +#if defined(JSON_HEDLEY_LIKELY) + #undef JSON_HEDLEY_LIKELY +#endif +#if defined(JSON_HEDLEY_UNLIKELY) + #undef JSON_HEDLEY_UNLIKELY +#endif +#if defined(JSON_HEDLEY_UNPREDICTABLE) + #undef JSON_HEDLEY_UNPREDICTABLE +#endif +#if JSON_HEDLEY_HAS_BUILTIN(__builtin_unpredictable) + #define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr)) +#endif +#if \ + (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) && !defined(JSON_HEDLEY_PGI_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(9,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability)) +# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1 , (probability)) +# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0 , (probability)) +# define JSON_HEDLEY_LIKELY(expr) __builtin_expect (!!(expr), 1 ) +# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect (!!(expr), 0 ) +#elif \ + (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,27) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PREDICT(expr, expected, probability) \ + (((probability) >= 0.9) ? __builtin_expect((expr), (expected)) : (JSON_HEDLEY_STATIC_CAST(void, expected), (expr))) +# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \ + (__extension__ ({ \ + double hedley_probability_ = (probability); \ + ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \ + })) +# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \ + (__extension__ ({ \ + double hedley_probability_ = (probability); \ + ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \ + })) +# define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) +# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) +#else +# define JSON_HEDLEY_PREDICT(expr, expected, probability) (JSON_HEDLEY_STATIC_CAST(void, expected), (expr)) +# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr)) +# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr)) +# define JSON_HEDLEY_LIKELY(expr) (!!(expr)) +# define JSON_HEDLEY_UNLIKELY(expr) (!!(expr)) +#endif +#if !defined(JSON_HEDLEY_UNPREDICTABLE) + #define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5) +#endif + +#if defined(JSON_HEDLEY_MALLOC) + #undef JSON_HEDLEY_MALLOC +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_MALLOC __attribute__((__malloc__)) +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) + #define JSON_HEDLEY_MALLOC _Pragma("returns_new_memory") +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_MALLOC __declspec(restrict) +#else + #define JSON_HEDLEY_MALLOC +#endif + +#if defined(JSON_HEDLEY_PURE) + #undef JSON_HEDLEY_PURE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(pure) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(2,96,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PURE __attribute__((__pure__)) +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) +# define JSON_HEDLEY_PURE _Pragma("does_not_write_global_data") +#elif defined(__cplusplus) && \ + ( \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) \ + ) +# define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;") +#else +# define JSON_HEDLEY_PURE +#endif + +#if defined(JSON_HEDLEY_CONST) + #undef JSON_HEDLEY_CONST +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(const) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(2,5,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_CONST __attribute__((__const__)) +#elif \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) + #define JSON_HEDLEY_CONST _Pragma("no_side_effect") +#else + #define JSON_HEDLEY_CONST JSON_HEDLEY_PURE +#endif + +#if defined(JSON_HEDLEY_RESTRICT) + #undef JSON_HEDLEY_RESTRICT +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus) + #define JSON_HEDLEY_RESTRICT restrict +#elif \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,4) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ + defined(__clang__) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_RESTRICT __restrict +#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus) + #define JSON_HEDLEY_RESTRICT _Restrict +#else + #define JSON_HEDLEY_RESTRICT +#endif + +#if defined(JSON_HEDLEY_INLINE) + #undef JSON_HEDLEY_INLINE +#endif +#if \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ + (defined(__cplusplus) && (__cplusplus >= 199711L)) + #define JSON_HEDLEY_INLINE inline +#elif \ + defined(JSON_HEDLEY_GCC_VERSION) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(6,2,0) + #define JSON_HEDLEY_INLINE __inline__ +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,1,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_INLINE __inline +#else + #define JSON_HEDLEY_INLINE +#endif + +#if defined(JSON_HEDLEY_ALWAYS_INLINE) + #undef JSON_HEDLEY_ALWAYS_INLINE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) +# define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) +# define JSON_HEDLEY_ALWAYS_INLINE __forceinline +#elif defined(__cplusplus) && \ + ( \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) \ + ) +# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) +# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced") +#else +# define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE +#endif + +#if defined(JSON_HEDLEY_NEVER_INLINE) + #undef JSON_HEDLEY_NEVER_INLINE +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ + JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ + (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ + (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ + (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ + JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ + JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ + JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) + #define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__)) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) +#elif JSON_HEDLEY_PGI_VERSION_CHECK(10,2,0) + #define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline") +#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) + #define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;") +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) + #define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never") +#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) + #define JSON_HEDLEY_NEVER_INLINE __attribute((noinline)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) + #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) +#else + #define JSON_HEDLEY_NEVER_INLINE +#endif + +#if defined(JSON_HEDLEY_PRIVATE) + #undef JSON_HEDLEY_PRIVATE +#endif +#if defined(JSON_HEDLEY_PUBLIC) + #undef JSON_HEDLEY_PUBLIC +#endif +#if defined(JSON_HEDLEY_IMPORT) + #undef JSON_HEDLEY_IMPORT +#endif +#if defined(_WIN32) || defined(__CYGWIN__) +# define JSON_HEDLEY_PRIVATE +# define JSON_HEDLEY_PUBLIC __declspec(dllexport) +# define JSON_HEDLEY_IMPORT __declspec(dllimport) +#else +# if \ + JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ + JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ + ( \ + defined(__TI_EABI__) && \ + ( \ + (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) \ + ) \ + ) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) +# define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden"))) +# define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default"))) +# else +# define JSON_HEDLEY_PRIVATE +# define JSON_HEDLEY_PUBLIC +# endif +# define JSON_HEDLEY_IMPORT extern +#endif + +#if defined(JSON_HEDLEY_NO_THROW) + #undef JSON_HEDLEY_NO_THROW +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__)) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) + #define JSON_HEDLEY_NO_THROW __declspec(nothrow) +#else + #define JSON_HEDLEY_NO_THROW +#endif + +#if defined(JSON_HEDLEY_FALL_THROUGH) + #undef JSON_HEDLEY_FALL_THROUGH +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(fallthrough) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(7,0,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__)) +#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(clang,fallthrough) + #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[clang::fallthrough]]) +#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough) + #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[fallthrough]]) +#elif defined(__fallthrough) /* SAL */ + #define JSON_HEDLEY_FALL_THROUGH __fallthrough +#else + #define JSON_HEDLEY_FALL_THROUGH +#endif + +#if defined(JSON_HEDLEY_RETURNS_NON_NULL) + #undef JSON_HEDLEY_RETURNS_NON_NULL +#endif +#if \ + JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__)) +#elif defined(_Ret_notnull_) /* SAL */ + #define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_ +#else + #define JSON_HEDLEY_RETURNS_NON_NULL +#endif + +#if defined(JSON_HEDLEY_ARRAY_PARAM) + #undef JSON_HEDLEY_ARRAY_PARAM +#endif +#if \ + defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \ + !defined(__STDC_NO_VLA__) && \ + !defined(__cplusplus) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_TINYC_VERSION) + #define JSON_HEDLEY_ARRAY_PARAM(name) (name) +#else + #define JSON_HEDLEY_ARRAY_PARAM(name) +#endif + +#if defined(JSON_HEDLEY_IS_CONSTANT) + #undef JSON_HEDLEY_IS_CONSTANT +#endif +#if defined(JSON_HEDLEY_REQUIRE_CONSTEXPR) + #undef JSON_HEDLEY_REQUIRE_CONSTEXPR +#endif +/* JSON_HEDLEY_IS_CONSTEXPR_ is for + HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ +#if defined(JSON_HEDLEY_IS_CONSTEXPR_) + #undef JSON_HEDLEY_IS_CONSTEXPR_ +#endif +#if \ + JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ + JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ + (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) + #define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr) +#endif +#if !defined(__cplusplus) +# if \ + JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ + JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ + JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,24) +#if defined(__INTPTR_TYPE__) + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*) +#else + #include + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*) +#endif +# elif \ + ( \ + defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \ + !defined(JSON_HEDLEY_SUNPRO_VERSION) && \ + !defined(JSON_HEDLEY_PGI_VERSION) && \ + !defined(JSON_HEDLEY_IAR_VERSION)) || \ + (JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \ + JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ + JSON_HEDLEY_ARM_VERSION_CHECK(5,3,0) +#if defined(__INTPTR_TYPE__) + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0) +#else + #include + #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0) +#endif +# elif \ + defined(JSON_HEDLEY_GCC_VERSION) || \ + defined(JSON_HEDLEY_INTEL_VERSION) || \ + defined(JSON_HEDLEY_TINYC_VERSION) || \ + defined(JSON_HEDLEY_TI_ARMCL_VERSION) || \ + JSON_HEDLEY_TI_CL430_VERSION_CHECK(18,12,0) || \ + defined(JSON_HEDLEY_TI_CL2000_VERSION) || \ + defined(JSON_HEDLEY_TI_CL6X_VERSION) || \ + defined(JSON_HEDLEY_TI_CL7X_VERSION) || \ + defined(JSON_HEDLEY_TI_CLPRU_VERSION) || \ + defined(__clang__) +# define JSON_HEDLEY_IS_CONSTEXPR_(expr) ( \ + sizeof(void) != \ + sizeof(*( \ + 1 ? \ + ((void*) ((expr) * 0L) ) : \ +((struct { char v[sizeof(void) * 2]; } *) 1) \ + ) \ + ) \ + ) +# endif +#endif +#if defined(JSON_HEDLEY_IS_CONSTEXPR_) + #if !defined(JSON_HEDLEY_IS_CONSTANT) + #define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY_IS_CONSTEXPR_(expr) + #endif + #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY_IS_CONSTEXPR_(expr) ? (expr) : (-1)) +#else + #if !defined(JSON_HEDLEY_IS_CONSTANT) + #define JSON_HEDLEY_IS_CONSTANT(expr) (0) + #endif + #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr) +#endif + +#if defined(JSON_HEDLEY_BEGIN_C_DECLS) + #undef JSON_HEDLEY_BEGIN_C_DECLS +#endif +#if defined(JSON_HEDLEY_END_C_DECLS) + #undef JSON_HEDLEY_END_C_DECLS +#endif +#if defined(JSON_HEDLEY_C_DECL) + #undef JSON_HEDLEY_C_DECL +#endif +#if defined(__cplusplus) + #define JSON_HEDLEY_BEGIN_C_DECLS extern "C" { + #define JSON_HEDLEY_END_C_DECLS } + #define JSON_HEDLEY_C_DECL extern "C" +#else + #define JSON_HEDLEY_BEGIN_C_DECLS + #define JSON_HEDLEY_END_C_DECLS + #define JSON_HEDLEY_C_DECL +#endif + +#if defined(JSON_HEDLEY_STATIC_ASSERT) + #undef JSON_HEDLEY_STATIC_ASSERT +#endif +#if \ + !defined(__cplusplus) && ( \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \ + (JSON_HEDLEY_HAS_FEATURE(c_static_assert) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ + JSON_HEDLEY_GCC_VERSION_CHECK(6,0,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ + defined(_Static_assert) \ + ) +# define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message) +#elif \ + (defined(__cplusplus) && (__cplusplus >= 201103L)) || \ + JSON_HEDLEY_MSVC_VERSION_CHECK(16,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) +# define JSON_HEDLEY_STATIC_ASSERT(expr, message) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message)) +#else +# define JSON_HEDLEY_STATIC_ASSERT(expr, message) +#endif + +#if defined(JSON_HEDLEY_NULL) + #undef JSON_HEDLEY_NULL +#endif +#if defined(__cplusplus) + #if __cplusplus >= 201103L + #define JSON_HEDLEY_NULL JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(nullptr) + #elif defined(NULL) + #define JSON_HEDLEY_NULL NULL + #else + #define JSON_HEDLEY_NULL JSON_HEDLEY_STATIC_CAST(void*, 0) + #endif +#elif defined(NULL) + #define JSON_HEDLEY_NULL NULL +#else + #define JSON_HEDLEY_NULL ((void*) 0) +#endif + +#if defined(JSON_HEDLEY_MESSAGE) + #undef JSON_HEDLEY_MESSAGE +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") +# define JSON_HEDLEY_MESSAGE(msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ + JSON_HEDLEY_PRAGMA(message msg) \ + JSON_HEDLEY_DIAGNOSTIC_POP +#elif \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg) +#elif JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg) +#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,0,0) +# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#else +# define JSON_HEDLEY_MESSAGE(msg) +#endif + +#if defined(JSON_HEDLEY_WARNING) + #undef JSON_HEDLEY_WARNING +#endif +#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") +# define JSON_HEDLEY_WARNING(msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ + JSON_HEDLEY_PRAGMA(clang warning msg) \ + JSON_HEDLEY_DIAGNOSTIC_POP +#elif \ + JSON_HEDLEY_GCC_VERSION_CHECK(4,8,0) || \ + JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ + JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) +# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg) +#elif \ + JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) +# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg)) +#else +# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg) +#endif + +#if defined(JSON_HEDLEY_REQUIRE) + #undef JSON_HEDLEY_REQUIRE +#endif +#if defined(JSON_HEDLEY_REQUIRE_MSG) + #undef JSON_HEDLEY_REQUIRE_MSG +#endif +#if JSON_HEDLEY_HAS_ATTRIBUTE(diagnose_if) +# if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat") +# define JSON_HEDLEY_REQUIRE(expr) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ + __attribute__((diagnose_if(!(expr), #expr, "error"))) \ + JSON_HEDLEY_DIAGNOSTIC_POP +# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ + __attribute__((diagnose_if(!(expr), msg, "error"))) \ + JSON_HEDLEY_DIAGNOSTIC_POP +# else +# define JSON_HEDLEY_REQUIRE(expr) __attribute__((diagnose_if(!(expr), #expr, "error"))) +# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) __attribute__((diagnose_if(!(expr), msg, "error"))) +# endif +#else +# define JSON_HEDLEY_REQUIRE(expr) +# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) +#endif + +#if defined(JSON_HEDLEY_FLAGS) + #undef JSON_HEDLEY_FLAGS +#endif +#if JSON_HEDLEY_HAS_ATTRIBUTE(flag_enum) && (!defined(__cplusplus) || JSON_HEDLEY_HAS_WARNING("-Wbitfield-enum-conversion")) + #define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__)) +#else + #define JSON_HEDLEY_FLAGS +#endif + +#if defined(JSON_HEDLEY_FLAGS_CAST) + #undef JSON_HEDLEY_FLAGS_CAST +#endif +#if JSON_HEDLEY_INTEL_VERSION_CHECK(19,0,0) +# define JSON_HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \ + JSON_HEDLEY_DIAGNOSTIC_PUSH \ + _Pragma("warning(disable:188)") \ + ((T) (expr)); \ + JSON_HEDLEY_DIAGNOSTIC_POP \ + })) +#else +# define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr) +#endif + +#if defined(JSON_HEDLEY_EMPTY_BASES) + #undef JSON_HEDLEY_EMPTY_BASES +#endif +#if \ + (JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,23918) && !JSON_HEDLEY_MSVC_VERSION_CHECK(20,0,0)) || \ + JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) + #define JSON_HEDLEY_EMPTY_BASES __declspec(empty_bases) +#else + #define JSON_HEDLEY_EMPTY_BASES +#endif + +/* Remaining macros are deprecated. */ + +#if defined(JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK) + #undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK +#endif +#if defined(__clang__) + #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0) +#else + #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) +#endif + +#if defined(JSON_HEDLEY_CLANG_HAS_ATTRIBUTE) + #undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE +#endif +#define JSON_HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) + +#if defined(JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE) + #undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE +#endif +#define JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) + +#if defined(JSON_HEDLEY_CLANG_HAS_BUILTIN) + #undef JSON_HEDLEY_CLANG_HAS_BUILTIN +#endif +#define JSON_HEDLEY_CLANG_HAS_BUILTIN(builtin) JSON_HEDLEY_HAS_BUILTIN(builtin) + +#if defined(JSON_HEDLEY_CLANG_HAS_FEATURE) + #undef JSON_HEDLEY_CLANG_HAS_FEATURE +#endif +#define JSON_HEDLEY_CLANG_HAS_FEATURE(feature) JSON_HEDLEY_HAS_FEATURE(feature) + +#if defined(JSON_HEDLEY_CLANG_HAS_EXTENSION) + #undef JSON_HEDLEY_CLANG_HAS_EXTENSION +#endif +#define JSON_HEDLEY_CLANG_HAS_EXTENSION(extension) JSON_HEDLEY_HAS_EXTENSION(extension) + +#if defined(JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE) + #undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE +#endif +#define JSON_HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) + +#if defined(JSON_HEDLEY_CLANG_HAS_WARNING) + #undef JSON_HEDLEY_CLANG_HAS_WARNING +#endif +#define JSON_HEDLEY_CLANG_HAS_WARNING(warning) JSON_HEDLEY_HAS_WARNING(warning) + +#endif /* !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < X) */ + + +// This file contains all internal macro definitions +// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them + +// exclude unsupported compilers +#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) + #if defined(__clang__) + #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 + #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" + #endif + #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) + #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 + #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" + #endif + #endif +#endif + +// C++ language standard detection +#if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) + #define JSON_HAS_CPP_20 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 +#elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 +#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) + #define JSON_HAS_CPP_14 +#endif + +// disable float-equal warnings on GCC/clang +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wfloat-equal" +#endif + +// disable documentation warnings on clang +#if defined(__clang__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wdocumentation" +#endif + +// allow to disable exceptions +#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) + #define JSON_THROW(exception) throw exception + #define JSON_TRY try + #define JSON_CATCH(exception) catch(exception) + #define JSON_INTERNAL_CATCH(exception) catch(exception) +#else + #include + #define JSON_THROW(exception) std::abort() + #define JSON_TRY if(true) + #define JSON_CATCH(exception) if(false) + #define JSON_INTERNAL_CATCH(exception) if(false) +#endif + +// override exception macros +#if defined(JSON_THROW_USER) + #undef JSON_THROW + #define JSON_THROW JSON_THROW_USER +#endif +#if defined(JSON_TRY_USER) + #undef JSON_TRY + #define JSON_TRY JSON_TRY_USER +#endif +#if defined(JSON_CATCH_USER) + #undef JSON_CATCH + #define JSON_CATCH JSON_CATCH_USER + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_CATCH_USER +#endif +#if defined(JSON_INTERNAL_CATCH_USER) + #undef JSON_INTERNAL_CATCH + #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER +#endif + +// allow to override assert +#if !defined(JSON_ASSERT) + #include // assert + #define JSON_ASSERT(x) assert(x) +#endif + +// allow to access some private functions (needed by the test suite) +#if defined(JSON_TESTS_PRIVATE) + #define JSON_PRIVATE_UNLESS_TESTED public +#else + #define JSON_PRIVATE_UNLESS_TESTED private +#endif + +/*! +@brief macro to briefly define a mapping between an enum and JSON +@def NLOHMANN_JSON_SERIALIZE_ENUM +@since version 3.4.0 +*/ +#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ + template \ + inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [e](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.first == e; \ + }); \ + j = ((it != std::end(m)) ? it : std::begin(m))->second; \ + } \ + template \ + inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ + { \ + static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + static const std::pair m[] = __VA_ARGS__; \ + auto it = std::find_if(std::begin(m), std::end(m), \ + [&j](const std::pair& ej_pair) -> bool \ + { \ + return ej_pair.second == j; \ + }); \ + e = ((it != std::end(m)) ? it : std::begin(m))->first; \ + } + +// Ugly macros to avoid uglier copy-paste when specializing basic_json. They +// may be removed in the future once the class is split. + +#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ + template class ObjectType, \ + template class ArrayType, \ + class StringType, class BooleanType, class NumberIntegerType, \ + class NumberUnsignedType, class NumberFloatType, \ + template class AllocatorType, \ + template class JSONSerializer, \ + class BinaryType> + +#define NLOHMANN_BASIC_JSON_TPL \ + basic_json + +// Macros to simplify conversion from/to types + +#define NLOHMANN_JSON_EXPAND( x ) x +#define NLOHMANN_JSON_GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, NAME,...) NAME +#define NLOHMANN_JSON_PASTE(...) NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_GET_MACRO(__VA_ARGS__, \ + NLOHMANN_JSON_PASTE64, \ + NLOHMANN_JSON_PASTE63, \ + NLOHMANN_JSON_PASTE62, \ + NLOHMANN_JSON_PASTE61, \ + NLOHMANN_JSON_PASTE60, \ + NLOHMANN_JSON_PASTE59, \ + NLOHMANN_JSON_PASTE58, \ + NLOHMANN_JSON_PASTE57, \ + NLOHMANN_JSON_PASTE56, \ + NLOHMANN_JSON_PASTE55, \ + NLOHMANN_JSON_PASTE54, \ + NLOHMANN_JSON_PASTE53, \ + NLOHMANN_JSON_PASTE52, \ + NLOHMANN_JSON_PASTE51, \ + NLOHMANN_JSON_PASTE50, \ + NLOHMANN_JSON_PASTE49, \ + NLOHMANN_JSON_PASTE48, \ + NLOHMANN_JSON_PASTE47, \ + NLOHMANN_JSON_PASTE46, \ + NLOHMANN_JSON_PASTE45, \ + NLOHMANN_JSON_PASTE44, \ + NLOHMANN_JSON_PASTE43, \ + NLOHMANN_JSON_PASTE42, \ + NLOHMANN_JSON_PASTE41, \ + NLOHMANN_JSON_PASTE40, \ + NLOHMANN_JSON_PASTE39, \ + NLOHMANN_JSON_PASTE38, \ + NLOHMANN_JSON_PASTE37, \ + NLOHMANN_JSON_PASTE36, \ + NLOHMANN_JSON_PASTE35, \ + NLOHMANN_JSON_PASTE34, \ + NLOHMANN_JSON_PASTE33, \ + NLOHMANN_JSON_PASTE32, \ + NLOHMANN_JSON_PASTE31, \ + NLOHMANN_JSON_PASTE30, \ + NLOHMANN_JSON_PASTE29, \ + NLOHMANN_JSON_PASTE28, \ + NLOHMANN_JSON_PASTE27, \ + NLOHMANN_JSON_PASTE26, \ + NLOHMANN_JSON_PASTE25, \ + NLOHMANN_JSON_PASTE24, \ + NLOHMANN_JSON_PASTE23, \ + NLOHMANN_JSON_PASTE22, \ + NLOHMANN_JSON_PASTE21, \ + NLOHMANN_JSON_PASTE20, \ + NLOHMANN_JSON_PASTE19, \ + NLOHMANN_JSON_PASTE18, \ + NLOHMANN_JSON_PASTE17, \ + NLOHMANN_JSON_PASTE16, \ + NLOHMANN_JSON_PASTE15, \ + NLOHMANN_JSON_PASTE14, \ + NLOHMANN_JSON_PASTE13, \ + NLOHMANN_JSON_PASTE12, \ + NLOHMANN_JSON_PASTE11, \ + NLOHMANN_JSON_PASTE10, \ + NLOHMANN_JSON_PASTE9, \ + NLOHMANN_JSON_PASTE8, \ + NLOHMANN_JSON_PASTE7, \ + NLOHMANN_JSON_PASTE6, \ + NLOHMANN_JSON_PASTE5, \ + NLOHMANN_JSON_PASTE4, \ + NLOHMANN_JSON_PASTE3, \ + NLOHMANN_JSON_PASTE2, \ + NLOHMANN_JSON_PASTE1)(__VA_ARGS__)) +#define NLOHMANN_JSON_PASTE2(func, v1) func(v1) +#define NLOHMANN_JSON_PASTE3(func, v1, v2) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE2(func, v2) +#define NLOHMANN_JSON_PASTE4(func, v1, v2, v3) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE3(func, v2, v3) +#define NLOHMANN_JSON_PASTE5(func, v1, v2, v3, v4) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE4(func, v2, v3, v4) +#define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5) +#define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6) +#define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7) +#define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8) +#define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v6, v7, v8, v9) +#define NLOHMANN_JSON_PASTE11(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE10(func, v2, v3, v4, v5, v6, v7, v8, v9, v10) +#define NLOHMANN_JSON_PASTE12(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE11(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) +#define NLOHMANN_JSON_PASTE13(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE12(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) +#define NLOHMANN_JSON_PASTE14(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE13(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) +#define NLOHMANN_JSON_PASTE15(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE14(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) +#define NLOHMANN_JSON_PASTE16(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE15(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) +#define NLOHMANN_JSON_PASTE17(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE16(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) +#define NLOHMANN_JSON_PASTE18(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE17(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) +#define NLOHMANN_JSON_PASTE19(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE18(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) +#define NLOHMANN_JSON_PASTE20(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE19(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) +#define NLOHMANN_JSON_PASTE21(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE20(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) +#define NLOHMANN_JSON_PASTE22(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE21(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) +#define NLOHMANN_JSON_PASTE23(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE22(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) +#define NLOHMANN_JSON_PASTE24(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE23(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) +#define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) +#define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) +#define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) +#define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) +#define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) +#define NLOHMANN_JSON_PASTE30(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE29(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) +#define NLOHMANN_JSON_PASTE31(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE30(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) +#define NLOHMANN_JSON_PASTE32(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE31(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) +#define NLOHMANN_JSON_PASTE33(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE32(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) +#define NLOHMANN_JSON_PASTE34(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE33(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) +#define NLOHMANN_JSON_PASTE35(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE34(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) +#define NLOHMANN_JSON_PASTE36(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE35(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) +#define NLOHMANN_JSON_PASTE37(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE36(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) +#define NLOHMANN_JSON_PASTE38(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE37(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) +#define NLOHMANN_JSON_PASTE39(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE38(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) +#define NLOHMANN_JSON_PASTE40(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE39(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) +#define NLOHMANN_JSON_PASTE41(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE40(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) +#define NLOHMANN_JSON_PASTE42(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE41(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) +#define NLOHMANN_JSON_PASTE43(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE42(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) +#define NLOHMANN_JSON_PASTE44(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE43(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) +#define NLOHMANN_JSON_PASTE45(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE44(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) +#define NLOHMANN_JSON_PASTE46(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE45(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) +#define NLOHMANN_JSON_PASTE47(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE46(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) +#define NLOHMANN_JSON_PASTE48(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE47(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) +#define NLOHMANN_JSON_PASTE49(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE48(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) +#define NLOHMANN_JSON_PASTE50(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE49(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) +#define NLOHMANN_JSON_PASTE51(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE50(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) +#define NLOHMANN_JSON_PASTE52(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE51(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) +#define NLOHMANN_JSON_PASTE53(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE52(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) +#define NLOHMANN_JSON_PASTE54(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE53(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) +#define NLOHMANN_JSON_PASTE55(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE54(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) +#define NLOHMANN_JSON_PASTE56(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE55(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) +#define NLOHMANN_JSON_PASTE57(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE56(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) +#define NLOHMANN_JSON_PASTE58(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE57(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) +#define NLOHMANN_JSON_PASTE59(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE58(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) +#define NLOHMANN_JSON_PASTE60(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE59(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) +#define NLOHMANN_JSON_PASTE61(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE60(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) +#define NLOHMANN_JSON_PASTE62(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE61(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) +#define NLOHMANN_JSON_PASTE63(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE62(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) +#define NLOHMANN_JSON_PASTE64(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE63(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) + +#define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1; +#define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1); + +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_INTRUSIVE +@since version 3.9.0 +*/ +#define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \ + friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE +@since version 3.9.0 +*/ +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \ + inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +#ifndef JSON_USE_IMPLICIT_CONVERSIONS + #define JSON_USE_IMPLICIT_CONVERSIONS 1 +#endif + +#if JSON_USE_IMPLICIT_CONVERSIONS + #define JSON_EXPLICIT +#else + #define JSON_EXPLICIT explicit +#endif + + +namespace nlohmann +{ +namespace detail +{ + +/*! +@brief replace all occurrences of a substring by another string + +@param[in,out] s the string to manipulate; changed so that all + occurrences of @a f are replaced with @a t +@param[in] f the substring to replace with @a t +@param[in] t the string to replace @a f + +@pre The search string @a f must not be empty. **This precondition is +enforced with an assertion.** + +@since version 2.0.0 +*/ +inline void replace_substring(std::string& s, const std::string& f, + const std::string& t) +{ + JSON_ASSERT(!f.empty()); + for (auto pos = s.find(f); // find first occurrence of f + pos != std::string::npos; // make sure f was found + s.replace(pos, f.size(), t), // replace with t, and + pos = s.find(f, pos + t.size())) // find next occurrence of f + {} +} + +/*! + * @brief string escaping as described in RFC 6901 (Sect. 4) + * @param[in] s string to escape + * @return escaped string + * + * Note the order of escaping "~" to "~0" and "/" to "~1" is important. + */ +inline std::string escape(std::string s) +{ + replace_substring(s, "~", "~0"); + replace_substring(s, "/", "~1"); + return s; +} + +/*! + * @brief string unescaping as described in RFC 6901 (Sect. 4) + * @param[in] s string to unescape + * @return unescaped string + * + * Note the order of escaping "~1" to "/" and "~0" to "~" is important. + */ +static void unescape(std::string& s) +{ + replace_substring(s, "~1", "/"); + replace_substring(s, "~0", "~"); +} + +} // namespace detail +} // namespace nlohmann + +// #include + + +#include // size_t + +namespace nlohmann +{ +namespace detail +{ +/// struct to capture the start position of the current token +struct position_t +{ + /// the total number of characters read + std::size_t chars_read_total = 0; + /// the number of characters read in the current line + std::size_t chars_read_current_line = 0; + /// the number of lines read + std::size_t lines_read = 0; + + /// conversion to size_t to preserve SAX interface + constexpr operator size_t() const + { + return chars_read_total; + } +}; + +} // namespace detail +} // namespace nlohmann + +// #include + + +namespace nlohmann +{ +namespace detail +{ +//////////////// +// exceptions // +//////////////// + +/*! +@brief general exception of the @ref basic_json class + +This class is an extension of `std::exception` objects with a member @a id for +exception ids. It is used as the base class for all exceptions thrown by the +@ref basic_json class. This class can hence be used as "wildcard" to catch +exceptions. + +Subclasses: +- @ref parse_error for exceptions indicating a parse error +- @ref invalid_iterator for exceptions indicating errors with iterators +- @ref type_error for exceptions indicating executing a member function with + a wrong type +- @ref out_of_range for exceptions indicating access out of the defined range +- @ref other_error for exceptions indicating other library errors + +@internal +@note To have nothrow-copy-constructible exceptions, we internally use + `std::runtime_error` which can cope with arbitrary-length error messages. + Intermediate strings are built with static functions and then passed to + the actual constructor. +@endinternal + +@liveexample{The following code shows how arbitrary library exceptions can be +caught.,exception} + +@since version 3.0.0 +*/ +class exception : public std::exception +{ + public: + /// returns the explanatory string + JSON_HEDLEY_RETURNS_NON_NULL + const char* what() const noexcept override + { + return m.what(); + } + + /// the id of the exception + const int id; + + protected: + JSON_HEDLEY_NON_NULL(3) + exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} + + static std::string name(const std::string& ename, int id_) + { + return "[json.exception." + ename + "." + std::to_string(id_) + "] "; + } + + template + static std::string diagnostics(const BasicJsonType& leaf_element) + { +#if JSON_DIAGNOSTICS + std::vector tokens; + for (const auto* current = &leaf_element; current->m_parent != nullptr; current = current->m_parent) + { + switch (current->m_parent->type()) + { + case value_t::array: + { + for (std::size_t i = 0; i < current->m_parent->m_value.array->size(); ++i) + { + if (¤t->m_parent->m_value.array->operator[](i) == current) + { + tokens.emplace_back(std::to_string(i)); + break; + } + } + break; + } + + case value_t::object: + { + for (const auto& element : *current->m_parent->m_value.object) + { + if (&element.second == current) + { + tokens.emplace_back(element.first.c_str()); + break; + } + } + break; + } + + default: // LCOV_EXCL_LINE + break; // LCOV_EXCL_LINE + } + } + + if (tokens.empty()) + { + return ""; + } + + return "(" + std::accumulate(tokens.rbegin(), tokens.rend(), std::string{}, + [](const std::string & a, const std::string & b) + { + return a + "/" + detail::escape(b); + }) + ") "; +#else + return ""; +#endif + } + + private: + /// an exception object as storage for error messages + std::runtime_error m; +}; + +/*! +@brief exception indicating a parse error + +This exception is thrown by the library when a parse error occurs. Parse errors +can occur during the deserialization of JSON text, CBOR, MessagePack, as well +as when using JSON Patch. + +Member @a byte holds the byte index of the last read character in the input +file. + +Exceptions have ids 1xx. + +name / id | example message | description +------------------------------ | --------------- | ------------------------- +json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position. +json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point. +json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid. +json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects. +json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors. +json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`. +json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character. +json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences. +json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number. +json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read. +json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read. +json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read. +json.exception.parse_error.114 | parse error: Unsupported BSON record type 0x0F | The parsing of the corresponding BSON record type is not implemented (yet). +json.exception.parse_error.115 | parse error at byte 5: syntax error while parsing UBJSON high-precision number: invalid number text: 1A | A UBJSON high-precision number could not be parsed. + +@note For an input with n bytes, 1 is the index of the first character and n+1 + is the index of the terminating null byte or the end of file. This also + holds true when reading a byte vector (CBOR or MessagePack). + +@liveexample{The following code shows how a `parse_error` exception can be +caught.,parse_error} + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref invalid_iterator for exceptions indicating errors with iterators +@sa - @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa - @ref out_of_range for exceptions indicating access out of the defined range +@sa - @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class parse_error : public exception +{ + public: + /*! + @brief create a parse error exception + @param[in] id_ the id of the exception + @param[in] pos the position where the error occurred (or with + chars_read_total=0 if the position cannot be + determined) + @param[in] what_arg the explanatory string + @return parse_error object + */ + template + static parse_error create(int id_, const position_t& pos, const std::string& what_arg, const BasicJsonType& context) + { + std::string w = exception::name("parse_error", id_) + "parse error" + + position_string(pos) + ": " + exception::diagnostics(context) + what_arg; + return parse_error(id_, pos.chars_read_total, w.c_str()); + } + + template + static parse_error create(int id_, std::size_t byte_, const std::string& what_arg, const BasicJsonType& context) + { + std::string w = exception::name("parse_error", id_) + "parse error" + + (byte_ != 0 ? (" at byte " + std::to_string(byte_)) : "") + + ": " + exception::diagnostics(context) + what_arg; + return parse_error(id_, byte_, w.c_str()); + } + + /*! + @brief byte index of the parse error + + The byte index of the last read character in the input file. + + @note For an input with n bytes, 1 is the index of the first character and + n+1 is the index of the terminating null byte or the end of file. + This also holds true when reading a byte vector (CBOR or MessagePack). + */ + const std::size_t byte; + + private: + parse_error(int id_, std::size_t byte_, const char* what_arg) + : exception(id_, what_arg), byte(byte_) {} + + static std::string position_string(const position_t& pos) + { + return " at line " + std::to_string(pos.lines_read + 1) + + ", column " + std::to_string(pos.chars_read_current_line); + } +}; + +/*! +@brief exception indicating errors with iterators + +This exception is thrown if iterators passed to a library function do not match +the expected semantics. + +Exceptions have ids 2xx. + +name / id | example message | description +----------------------------------- | --------------- | ------------------------- +json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. +json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion. +json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from. +json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid. +json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid. +json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range. +json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key. +json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. +json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. +json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. +json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to. +json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container. +json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered. +json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin(). + +@liveexample{The following code shows how an `invalid_iterator` exception can be +caught.,invalid_iterator} + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref parse_error for exceptions indicating a parse error +@sa - @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa - @ref out_of_range for exceptions indicating access out of the defined range +@sa - @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class invalid_iterator : public exception +{ + public: + template + static invalid_iterator create(int id_, const std::string& what_arg, const BasicJsonType& context) + { + std::string w = exception::name("invalid_iterator", id_) + exception::diagnostics(context) + what_arg; + return invalid_iterator(id_, w.c_str()); + } + + private: + JSON_HEDLEY_NON_NULL(3) + invalid_iterator(int id_, const char* what_arg) + : exception(id_, what_arg) {} +}; + +/*! +@brief exception indicating executing a member function with a wrong type + +This exception is thrown in case of a type error; that is, a library function is +executed on a JSON value whose type does not match the expected semantics. + +Exceptions have ids 3xx. + +name / id | example message | description +----------------------------- | --------------- | ------------------------- +json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead. +json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types. +json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t &. +json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types. +json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types. +json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types. +json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types. +json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types. +json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types. +json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types. +json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types. +json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types. +json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined. +json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers. +json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive. +json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. | +json.exception.type_error.317 | JSON value cannot be serialized to requested format | The dynamic type of the object cannot be represented in the requested serialization format (e.g. a raw `true` or `null` JSON object cannot be serialized to BSON) | + +@liveexample{The following code shows how a `type_error` exception can be +caught.,type_error} + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref parse_error for exceptions indicating a parse error +@sa - @ref invalid_iterator for exceptions indicating errors with iterators +@sa - @ref out_of_range for exceptions indicating access out of the defined range +@sa - @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class type_error : public exception +{ + public: + template + static type_error create(int id_, const std::string& what_arg, const BasicJsonType& context) + { + std::string w = exception::name("type_error", id_) + exception::diagnostics(context) + what_arg; + return type_error(id_, w.c_str()); + } + + private: + JSON_HEDLEY_NON_NULL(3) + type_error(int id_, const char* what_arg) : exception(id_, what_arg) {} +}; + +/*! +@brief exception indicating access out of the defined range + +This exception is thrown in case a library function is called on an input +parameter that exceeds the expected range, for instance in case of array +indices or nonexisting object keys. + +Exceptions have ids 4xx. + +name / id | example message | description +------------------------------- | --------------- | ------------------------- +json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1. +json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it. +json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object. +json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved. +json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value. +json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF. +json.exception.out_of_range.407 | number overflow serializing '9223372036854775808' | UBJSON and BSON only support integer numbers up to 9223372036854775807. (until version 3.8.0) | +json.exception.out_of_range.408 | excessive array size: 8658170730974374167 | The size (following `#`) of an UBJSON array or object exceeds the maximal capacity. | +json.exception.out_of_range.409 | BSON key cannot contain code point U+0000 (at byte 2) | Key identifiers to be serialized to BSON cannot contain code point U+0000, since the key is stored as zero-terminated c-string | + +@liveexample{The following code shows how an `out_of_range` exception can be +caught.,out_of_range} + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref parse_error for exceptions indicating a parse error +@sa - @ref invalid_iterator for exceptions indicating errors with iterators +@sa - @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa - @ref other_error for exceptions indicating other library errors + +@since version 3.0.0 +*/ +class out_of_range : public exception +{ + public: + template + static out_of_range create(int id_, const std::string& what_arg, const BasicJsonType& context) + { + std::string w = exception::name("out_of_range", id_) + exception::diagnostics(context) + what_arg; + return out_of_range(id_, w.c_str()); + } + + private: + JSON_HEDLEY_NON_NULL(3) + out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {} +}; + +/*! +@brief exception indicating other library errors + +This exception is thrown in case of errors that cannot be classified with the +other exception types. + +Exceptions have ids 5xx. + +name / id | example message | description +------------------------------ | --------------- | ------------------------- +json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed. + +@sa - @ref exception for the base class of the library exceptions +@sa - @ref parse_error for exceptions indicating a parse error +@sa - @ref invalid_iterator for exceptions indicating errors with iterators +@sa - @ref type_error for exceptions indicating executing a member function with + a wrong type +@sa - @ref out_of_range for exceptions indicating access out of the defined range + +@liveexample{The following code shows how an `other_error` exception can be +caught.,other_error} + +@since version 3.0.0 +*/ +class other_error : public exception +{ + public: + template + static other_error create(int id_, const std::string& what_arg, const BasicJsonType& context) + { + std::string w = exception::name("other_error", id_) + exception::diagnostics(context) + what_arg; + return other_error(id_, w.c_str()); + } + + private: + JSON_HEDLEY_NON_NULL(3) + other_error(int id_, const char* what_arg) : exception(id_, what_arg) {} +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + + +#include // size_t +#include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type +#include // index_sequence, make_index_sequence, index_sequence_for + +// #include + + +namespace nlohmann +{ +namespace detail +{ + +template +using uncvref_t = typename std::remove_cv::type>::type; + +#ifdef JSON_HAS_CPP_14 + +// the following utilities are natively available in C++14 +using std::enable_if_t; +using std::index_sequence; +using std::make_index_sequence; +using std::index_sequence_for; + +#else + +// alias templates to reduce boilerplate +template +using enable_if_t = typename std::enable_if::type; + +// source: https://stackoverflow.com/a/32223343 +template +struct index_sequence +{ + using type = index_sequence; + using value_type = std::size_t; + static constexpr std::size_t size() noexcept + { + return sizeof...(Ints); + } +}; + +template +struct merge_and_renumber; + +template +struct merge_and_renumber, index_sequence> + : index_sequence < I1..., (sizeof...(I1) + I2)... > {}; + +template +struct make_index_sequence + : merge_and_renumber < typename make_index_sequence < N / 2 >::type, + typename make_index_sequence < N - N / 2 >::type > {}; + +template<> struct make_index_sequence<0> : index_sequence<> {}; +template<> struct make_index_sequence<1> : index_sequence<0> {}; + +template +using index_sequence_for = make_index_sequence; + +#endif + +// dispatch utility (taken from ranges-v3) +template struct priority_tag : priority_tag < N - 1 > {}; +template<> struct priority_tag<0> {}; + +// taken from ranges-v3 +template +struct static_const +{ + static constexpr T value{}; +}; + +template +constexpr T static_const::value; + +} // namespace detail +} // namespace nlohmann + +// #include + + +#include // numeric_limits +#include // false_type, is_constructible, is_integral, is_same, true_type +#include // declval +#include // tuple + +// #include + + +#include // random_access_iterator_tag + +// #include + + +namespace nlohmann +{ +namespace detail +{ +template struct make_void +{ + using type = void; +}; +template using void_t = typename make_void::type; +} // namespace detail +} // namespace nlohmann + +// #include + + +namespace nlohmann +{ +namespace detail +{ +template +struct iterator_types {}; + +template +struct iterator_types < + It, + void_t> +{ + using difference_type = typename It::difference_type; + using value_type = typename It::value_type; + using pointer = typename It::pointer; + using reference = typename It::reference; + using iterator_category = typename It::iterator_category; +}; + +// This is required as some compilers implement std::iterator_traits in a way that +// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. +template +struct iterator_traits +{ +}; + +template +struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> + : iterator_types +{ +}; + +template +struct iterator_traits::value>> +{ + using iterator_category = std::random_access_iterator_tag; + using value_type = T; + using difference_type = ptrdiff_t; + using pointer = T*; + using reference = T&; +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + +// #include + + +#include + +// #include + + +// https://en.cppreference.com/w/cpp/experimental/is_detected +namespace nlohmann +{ +namespace detail +{ +struct nonesuch +{ + nonesuch() = delete; + ~nonesuch() = delete; + nonesuch(nonesuch const&) = delete; + nonesuch(nonesuch const&&) = delete; + void operator=(nonesuch const&) = delete; + void operator=(nonesuch&&) = delete; +}; + +template class Op, + class... Args> +struct detector +{ + using value_t = std::false_type; + using type = Default; +}; + +template class Op, class... Args> +struct detector>, Op, Args...> +{ + using value_t = std::true_type; + using type = Op; +}; + +template class Op, class... Args> +using is_detected = typename detector::value_t; + +template class Op, class... Args> +using detected_t = typename detector::type; + +template class Op, class... Args> +using detected_or = detector; + +template class Op, class... Args> +using detected_or_t = typename detected_or::type; + +template class Op, class... Args> +using is_detected_exact = std::is_same>; + +template class Op, class... Args> +using is_detected_convertible = + std::is_convertible, To>; +} // namespace detail +} // namespace nlohmann + +// #include +#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ +#define INCLUDE_NLOHMANN_JSON_FWD_HPP_ + +#include // int64_t, uint64_t +#include // map +#include // allocator +#include // string +#include // vector + +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +namespace nlohmann +{ +/*! +@brief default JSONSerializer template argument + +This serializer ignores the template arguments and uses ADL +([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) +for serialization. +*/ +template +struct adl_serializer; + +template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer, + class BinaryType = std::vector> +class basic_json; + +/*! +@brief JSON Pointer + +A JSON pointer defines a string syntax for identifying a specific value +within a JSON document. It can be used with functions `at` and +`operator[]`. Furthermore, JSON pointers are the base for JSON patches. + +@sa [RFC 6901](https://tools.ietf.org/html/rfc6901) + +@since version 2.0.0 +*/ +template +class json_pointer; + +/*! +@brief default JSON class + +This type is the default specialization of the @ref basic_json class which +uses the standard template types. + +@since version 1.0.0 +*/ +using json = basic_json<>; + +template +struct ordered_map; + +/*! +@brief ordered JSON class + +This type preserves the insertion order of object keys. + +@since version 3.9.0 +*/ +using ordered_json = basic_json; + +} // namespace nlohmann + +#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ + + +namespace nlohmann +{ +/*! +@brief detail namespace with internal helper functions + +This namespace collects functions that should not be exposed, +implementations of some @ref basic_json methods, and meta-programming helpers. + +@since version 2.1.0 +*/ +namespace detail +{ +///////////// +// helpers // +///////////// + +// Note to maintainers: +// +// Every trait in this file expects a non CV-qualified type. +// The only exceptions are in the 'aliases for detected' section +// (i.e. those of the form: decltype(T::member_function(std::declval()))) +// +// In this case, T has to be properly CV-qualified to constraint the function arguments +// (e.g. to_json(BasicJsonType&, const T&)) + +template struct is_basic_json : std::false_type {}; + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +struct is_basic_json : std::true_type {}; + +////////////////////// +// json_ref helpers // +////////////////////// + +template +class json_ref; + +template +struct is_json_ref : std::false_type {}; + +template +struct is_json_ref> : std::true_type {}; + +////////////////////////// +// aliases for detected // +////////////////////////// + +template +using mapped_type_t = typename T::mapped_type; + +template +using key_type_t = typename T::key_type; + +template +using value_type_t = typename T::value_type; + +template +using difference_type_t = typename T::difference_type; + +template +using pointer_t = typename T::pointer; + +template +using reference_t = typename T::reference; + +template +using iterator_category_t = typename T::iterator_category; + +template +using iterator_t = typename T::iterator; + +template +using to_json_function = decltype(T::to_json(std::declval()...)); + +template +using from_json_function = decltype(T::from_json(std::declval()...)); + +template +using get_template_function = decltype(std::declval().template get()); + +// trait checking if JSONSerializer::from_json(json const&, udt&) exists +template +struct has_from_json : std::false_type {}; + +// trait checking if j.get is valid +// use this trait instead of std::is_constructible or std::is_convertible, +// both rely on, or make use of implicit conversions, and thus fail when T +// has several constructors/operator= (see https://github.com/nlohmann/json/issues/958) +template +struct is_getable +{ + static constexpr bool value = is_detected::value; +}; + +template +struct has_from_json < BasicJsonType, T, + enable_if_t < !is_basic_json::value >> +{ + using serializer = typename BasicJsonType::template json_serializer; + + static constexpr bool value = + is_detected_exact::value; +}; + +// This trait checks if JSONSerializer::from_json(json const&) exists +// this overload is used for non-default-constructible user-defined-types +template +struct has_non_default_from_json : std::false_type {}; + +template +struct has_non_default_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> +{ + using serializer = typename BasicJsonType::template json_serializer; + + static constexpr bool value = + is_detected_exact::value; +}; + +// This trait checks if BasicJsonType::json_serializer::to_json exists +// Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion. +template +struct has_to_json : std::false_type {}; + +template +struct has_to_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> +{ + using serializer = typename BasicJsonType::template json_serializer; + + static constexpr bool value = + is_detected_exact::value; +}; + + +/////////////////// +// is_ functions // +/////////////////// + +template +struct is_iterator_traits : std::false_type {}; + +template +struct is_iterator_traits> +{ + private: + using traits = iterator_traits; + + public: + static constexpr auto value = + is_detected::value && + is_detected::value && + is_detected::value && + is_detected::value && + is_detected::value; +}; + +// The following implementation of is_complete_type is taken from +// https://blogs.msdn.microsoft.com/vcblog/2015/12/02/partial-support-for-expression-sfinae-in-vs-2015-update-1/ +// and is written by Xiang Fan who agreed to using it in this library. + +template +struct is_complete_type : std::false_type {}; + +template +struct is_complete_type : std::true_type {}; + +template +struct is_compatible_object_type_impl : std::false_type {}; + +template +struct is_compatible_object_type_impl < + BasicJsonType, CompatibleObjectType, + enable_if_t < is_detected::value&& + is_detected::value >> +{ + using object_t = typename BasicJsonType::object_t; + + // macOS's is_constructible does not play well with nonesuch... + static constexpr bool value = + std::is_constructible::value && + std::is_constructible::value; +}; + +template +struct is_compatible_object_type + : is_compatible_object_type_impl {}; + +template +struct is_constructible_object_type_impl : std::false_type {}; + +template +struct is_constructible_object_type_impl < + BasicJsonType, ConstructibleObjectType, + enable_if_t < is_detected::value&& + is_detected::value >> +{ + using object_t = typename BasicJsonType::object_t; + + static constexpr bool value = + (std::is_default_constructible::value && + (std::is_move_assignable::value || + std::is_copy_assignable::value) && + (std::is_constructible::value && + std::is_same < + typename object_t::mapped_type, + typename ConstructibleObjectType::mapped_type >::value)) || + (has_from_json::value || + has_non_default_from_json < + BasicJsonType, + typename ConstructibleObjectType::mapped_type >::value); +}; + +template +struct is_constructible_object_type + : is_constructible_object_type_impl {}; + +template +struct is_compatible_string_type_impl : std::false_type {}; + +template +struct is_compatible_string_type_impl < + BasicJsonType, CompatibleStringType, + enable_if_t::value >> +{ + static constexpr auto value = + std::is_constructible::value; +}; + +template +struct is_compatible_string_type + : is_compatible_string_type_impl {}; + +template +struct is_constructible_string_type_impl : std::false_type {}; + +template +struct is_constructible_string_type_impl < + BasicJsonType, ConstructibleStringType, + enable_if_t::value >> +{ + static constexpr auto value = + std::is_constructible::value; +}; + +template +struct is_constructible_string_type + : is_constructible_string_type_impl {}; + +template +struct is_compatible_array_type_impl : std::false_type {}; + +template +struct is_compatible_array_type_impl < + BasicJsonType, CompatibleArrayType, + enable_if_t < is_detected::value&& + is_detected::value&& +// This is needed because json_reverse_iterator has a ::iterator type... +// Therefore it is detected as a CompatibleArrayType. +// The real fix would be to have an Iterable concept. + !is_iterator_traits < + iterator_traits>::value >> +{ + static constexpr bool value = + std::is_constructible::value; +}; + +template +struct is_compatible_array_type + : is_compatible_array_type_impl {}; + +template +struct is_constructible_array_type_impl : std::false_type {}; + +template +struct is_constructible_array_type_impl < + BasicJsonType, ConstructibleArrayType, + enable_if_t::value >> + : std::true_type {}; + +template +struct is_constructible_array_type_impl < + BasicJsonType, ConstructibleArrayType, + enable_if_t < !std::is_same::value&& + std::is_default_constructible::value&& +(std::is_move_assignable::value || + std::is_copy_assignable::value)&& +is_detected::value&& +is_detected::value&& +is_complete_type < +detected_t>::value >> +{ + static constexpr bool value = + // This is needed because json_reverse_iterator has a ::iterator type, + // furthermore, std::back_insert_iterator (and other iterators) have a + // base class `iterator`... Therefore it is detected as a + // ConstructibleArrayType. The real fix would be to have an Iterable + // concept. + !is_iterator_traits>::value && + + (std::is_same::value || + has_from_json::value || + has_non_default_from_json < + BasicJsonType, typename ConstructibleArrayType::value_type >::value); +}; + +template +struct is_constructible_array_type + : is_constructible_array_type_impl {}; + +template +struct is_compatible_integer_type_impl : std::false_type {}; + +template +struct is_compatible_integer_type_impl < + RealIntegerType, CompatibleNumberIntegerType, + enable_if_t < std::is_integral::value&& + std::is_integral::value&& + !std::is_same::value >> +{ + // is there an assert somewhere on overflows? + using RealLimits = std::numeric_limits; + using CompatibleLimits = std::numeric_limits; + + static constexpr auto value = + std::is_constructible::value && + CompatibleLimits::is_integer && + RealLimits::is_signed == CompatibleLimits::is_signed; +}; + +template +struct is_compatible_integer_type + : is_compatible_integer_type_impl {}; + +template +struct is_compatible_type_impl: std::false_type {}; + +template +struct is_compatible_type_impl < + BasicJsonType, CompatibleType, + enable_if_t::value >> +{ + static constexpr bool value = + has_to_json::value; +}; + +template +struct is_compatible_type + : is_compatible_type_impl {}; + +// https://en.cppreference.com/w/cpp/types/conjunction +template struct conjunction : std::true_type { }; +template struct conjunction : B1 { }; +template +struct conjunction +: std::conditional, B1>::type {}; + +template +struct is_constructible_tuple : std::false_type {}; + +template +struct is_constructible_tuple> : conjunction...> {}; +} // namespace detail +} // namespace nlohmann + +// #include + + +namespace nlohmann +{ +namespace detail +{ +template +void from_json(const BasicJsonType& j, typename std::nullptr_t& n) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_null())) + { + JSON_THROW(type_error::create(302, "type must be null, but is " + std::string(j.type_name()), j)); + } + n = nullptr; +} + +// overloads for basic_json template parameters +template < typename BasicJsonType, typename ArithmeticType, + enable_if_t < std::is_arithmetic::value&& + !std::is_same::value, + int > = 0 > +void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val) +{ + switch (static_cast(j)) + { + case value_t::number_unsigned: + { + val = static_cast(*j.template get_ptr()); + break; + } + case value_t::number_integer: + { + val = static_cast(*j.template get_ptr()); + break; + } + case value_t::number_float: + { + val = static_cast(*j.template get_ptr()); + break; + } + + default: + JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name()), j)); + } +} + +template +void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_boolean())) + { + JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(j.type_name()), j)); + } + b = *j.template get_ptr(); +} + +template +void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_string())) + { + JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name()), j)); + } + s = *j.template get_ptr(); +} + +template < + typename BasicJsonType, typename ConstructibleStringType, + enable_if_t < + is_constructible_string_type::value&& + !std::is_same::value, + int > = 0 > +void from_json(const BasicJsonType& j, ConstructibleStringType& s) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_string())) + { + JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name()), j)); + } + + s = *j.template get_ptr(); +} + +template +void from_json(const BasicJsonType& j, typename BasicJsonType::number_float_t& val) +{ + get_arithmetic_value(j, val); +} + +template +void from_json(const BasicJsonType& j, typename BasicJsonType::number_unsigned_t& val) +{ + get_arithmetic_value(j, val); +} + +template +void from_json(const BasicJsonType& j, typename BasicJsonType::number_integer_t& val) +{ + get_arithmetic_value(j, val); +} + +template::value, int> = 0> +void from_json(const BasicJsonType& j, EnumType& e) +{ + typename std::underlying_type::type val; + get_arithmetic_value(j, val); + e = static_cast(val); +} + +// forward_list doesn't have an insert method +template::value, int> = 0> +void from_json(const BasicJsonType& j, std::forward_list& l) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); + } + l.clear(); + std::transform(j.rbegin(), j.rend(), + std::front_inserter(l), [](const BasicJsonType & i) + { + return i.template get(); + }); +} + +// valarray doesn't have an insert method +template::value, int> = 0> +void from_json(const BasicJsonType& j, std::valarray& l) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); + } + l.resize(j.size()); + std::transform(j.begin(), j.end(), std::begin(l), + [](const BasicJsonType & elem) + { + return elem.template get(); + }); +} + +template +auto from_json(const BasicJsonType& j, T (&arr)[N]) +-> decltype(j.template get(), void()) +{ + for (std::size_t i = 0; i < N; ++i) + { + arr[i] = j.at(i).template get(); + } +} + +template +void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/) +{ + arr = *j.template get_ptr(); +} + +template +auto from_json_array_impl(const BasicJsonType& j, std::array& arr, + priority_tag<2> /*unused*/) +-> decltype(j.template get(), void()) +{ + for (std::size_t i = 0; i < N; ++i) + { + arr[i] = j.at(i).template get(); + } +} + +template +auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, priority_tag<1> /*unused*/) +-> decltype( + arr.reserve(std::declval()), + j.template get(), + void()) +{ + using std::end; + + ConstructibleArrayType ret; + ret.reserve(j.size()); + std::transform(j.begin(), j.end(), + std::inserter(ret, end(ret)), [](const BasicJsonType & i) + { + // get() returns *this, this won't call a from_json + // method when value_type is BasicJsonType + return i.template get(); + }); + arr = std::move(ret); +} + +template +void from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, + priority_tag<0> /*unused*/) +{ + using std::end; + + ConstructibleArrayType ret; + std::transform( + j.begin(), j.end(), std::inserter(ret, end(ret)), + [](const BasicJsonType & i) + { + // get() returns *this, this won't call a from_json + // method when value_type is BasicJsonType + return i.template get(); + }); + arr = std::move(ret); +} + +template < typename BasicJsonType, typename ConstructibleArrayType, + enable_if_t < + is_constructible_array_type::value&& + !is_constructible_object_type::value&& + !is_constructible_string_type::value&& + !std::is_same::value&& + !is_basic_json::value, + int > = 0 > +auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr) +-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}), +j.template get(), +void()) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); + } + + from_json_array_impl(j, arr, priority_tag<3> {}); +} + +template +void from_json(const BasicJsonType& j, typename BasicJsonType::binary_t& bin) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_binary())) + { + JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(j.type_name()), j)); + } + + bin = *j.template get_ptr(); +} + +template::value, int> = 0> +void from_json(const BasicJsonType& j, ConstructibleObjectType& obj) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_object())) + { + JSON_THROW(type_error::create(302, "type must be object, but is " + std::string(j.type_name()), j)); + } + + ConstructibleObjectType ret; + auto inner_object = j.template get_ptr(); + using value_type = typename ConstructibleObjectType::value_type; + std::transform( + inner_object->begin(), inner_object->end(), + std::inserter(ret, ret.begin()), + [](typename BasicJsonType::object_t::value_type const & p) + { + return value_type(p.first, p.second.template get()); + }); + obj = std::move(ret); +} + +// overload for arithmetic types, not chosen for basic_json template arguments +// (BooleanType, etc..); note: Is it really necessary to provide explicit +// overloads for boolean_t etc. in case of a custom BooleanType which is not +// an arithmetic type? +template < typename BasicJsonType, typename ArithmeticType, + enable_if_t < + std::is_arithmetic::value&& + !std::is_same::value&& + !std::is_same::value&& + !std::is_same::value&& + !std::is_same::value, + int > = 0 > +void from_json(const BasicJsonType& j, ArithmeticType& val) +{ + switch (static_cast(j)) + { + case value_t::number_unsigned: + { + val = static_cast(*j.template get_ptr()); + break; + } + case value_t::number_integer: + { + val = static_cast(*j.template get_ptr()); + break; + } + case value_t::number_float: + { + val = static_cast(*j.template get_ptr()); + break; + } + case value_t::boolean: + { + val = static_cast(*j.template get_ptr()); + break; + } + + default: + JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name()), j)); + } +} + +template +void from_json(const BasicJsonType& j, std::pair& p) +{ + p = {j.at(0).template get(), j.at(1).template get()}; +} + +template +void from_json_tuple_impl(const BasicJsonType& j, Tuple& t, index_sequence /*unused*/) +{ + t = std::make_tuple(j.at(Idx).template get::type>()...); +} + +template +void from_json(const BasicJsonType& j, std::tuple& t) +{ + from_json_tuple_impl(j, t, index_sequence_for {}); +} + +template < typename BasicJsonType, typename Key, typename Value, typename Compare, typename Allocator, + typename = enable_if_t < !std::is_constructible < + typename BasicJsonType::string_t, Key >::value >> +void from_json(const BasicJsonType& j, std::map& m) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); + } + m.clear(); + for (const auto& p : j) + { + if (JSON_HEDLEY_UNLIKELY(!p.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name()), j)); + } + m.emplace(p.at(0).template get(), p.at(1).template get()); + } +} + +template < typename BasicJsonType, typename Key, typename Value, typename Hash, typename KeyEqual, typename Allocator, + typename = enable_if_t < !std::is_constructible < + typename BasicJsonType::string_t, Key >::value >> +void from_json(const BasicJsonType& j, std::unordered_map& m) +{ + if (JSON_HEDLEY_UNLIKELY(!j.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); + } + m.clear(); + for (const auto& p : j) + { + if (JSON_HEDLEY_UNLIKELY(!p.is_array())) + { + JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name()), j)); + } + m.emplace(p.at(0).template get(), p.at(1).template get()); + } +} + +struct from_json_fn +{ + template + auto operator()(const BasicJsonType& j, T& val) const + noexcept(noexcept(from_json(j, val))) + -> decltype(from_json(j, val), void()) + { + return from_json(j, val); + } +}; +} // namespace detail + +/// namespace to hold default `from_json` function +/// to see why this is required: +/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html +namespace +{ +constexpr const auto& from_json = detail::static_const::value; +} // namespace +} // namespace nlohmann + +// #include + + +#include // copy +#include // begin, end +#include // string +#include // tuple, get +#include // is_same, is_constructible, is_floating_point, is_enum, underlying_type +#include // move, forward, declval, pair +#include // valarray +#include // vector + +// #include + + +#include // size_t +#include // input_iterator_tag +#include // string, to_string +#include // tuple_size, get, tuple_element + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +template +void int_to_string( string_type& target, std::size_t value ) +{ + // For ADL + using std::to_string; + target = to_string(value); +} +template class iteration_proxy_value +{ + public: + using difference_type = std::ptrdiff_t; + using value_type = iteration_proxy_value; + using pointer = value_type * ; + using reference = value_type & ; + using iterator_category = std::input_iterator_tag; + using string_type = typename std::remove_cv< typename std::remove_reference().key() ) >::type >::type; + + private: + /// the iterator + IteratorType anchor; + /// an index for arrays (used to create key names) + std::size_t array_index = 0; + /// last stringified array index + mutable std::size_t array_index_last = 0; + /// a string representation of the array index + mutable string_type array_index_str = "0"; + /// an empty string (to return a reference for primitive values) + const string_type empty_str{}; + + public: + explicit iteration_proxy_value(IteratorType it) noexcept : anchor(it) {} + + /// dereference operator (needed for range-based for) + iteration_proxy_value& operator*() + { + return *this; + } + + /// increment operator (needed for range-based for) + iteration_proxy_value& operator++() + { + ++anchor; + ++array_index; + + return *this; + } + + /// equality operator (needed for InputIterator) + bool operator==(const iteration_proxy_value& o) const + { + return anchor == o.anchor; + } + + /// inequality operator (needed for range-based for) + bool operator!=(const iteration_proxy_value& o) const + { + return anchor != o.anchor; + } + + /// return key of the iterator + const string_type& key() const + { + JSON_ASSERT(anchor.m_object != nullptr); + + switch (anchor.m_object->type()) + { + // use integer array index as key + case value_t::array: + { + if (array_index != array_index_last) + { + int_to_string( array_index_str, array_index ); + array_index_last = array_index; + } + return array_index_str; + } + + // use key from the object + case value_t::object: + return anchor.key(); + + // use an empty key for all primitive types + default: + return empty_str; + } + } + + /// return value of the iterator + typename IteratorType::reference value() const + { + return anchor.value(); + } +}; + +/// proxy class for the items() function +template class iteration_proxy +{ + private: + /// the container to iterate + typename IteratorType::reference container; + + public: + /// construct iteration proxy from a container + explicit iteration_proxy(typename IteratorType::reference cont) noexcept + : container(cont) {} + + /// return iterator begin (needed for range-based for) + iteration_proxy_value begin() noexcept + { + return iteration_proxy_value(container.begin()); + } + + /// return iterator end (needed for range-based for) + iteration_proxy_value end() noexcept + { + return iteration_proxy_value(container.end()); + } +}; +// Structured Bindings Support +// For further reference see https://blog.tartanllama.xyz/structured-bindings/ +// And see https://github.com/nlohmann/json/pull/1391 +template = 0> +auto get(const nlohmann::detail::iteration_proxy_value& i) -> decltype(i.key()) +{ + return i.key(); +} +// Structured Bindings Support +// For further reference see https://blog.tartanllama.xyz/structured-bindings/ +// And see https://github.com/nlohmann/json/pull/1391 +template = 0> +auto get(const nlohmann::detail::iteration_proxy_value& i) -> decltype(i.value()) +{ + return i.value(); +} +} // namespace detail +} // namespace nlohmann + +// The Addition to the STD Namespace is required to add +// Structured Bindings Support to the iteration_proxy_value class +// For further reference see https://blog.tartanllama.xyz/structured-bindings/ +// And see https://github.com/nlohmann/json/pull/1391 +namespace std +{ +#if defined(__clang__) + // Fix: https://github.com/nlohmann/json/issues/1401 + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wmismatched-tags" +#endif +template +class tuple_size<::nlohmann::detail::iteration_proxy_value> + : public std::integral_constant {}; + +template +class tuple_element> +{ + public: + using type = decltype( + get(std::declval < + ::nlohmann::detail::iteration_proxy_value> ())); +}; +#if defined(__clang__) + #pragma clang diagnostic pop +#endif +} // namespace std + +// #include + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +////////////////// +// constructors // +////////////////// + +template struct external_constructor; + +template<> +struct external_constructor +{ + template + static void construct(BasicJsonType& j, typename BasicJsonType::boolean_t b) noexcept + { + j.m_type = value_t::boolean; + j.m_value = b; + j.assert_invariant(); + } +}; + +template<> +struct external_constructor +{ + template + static void construct(BasicJsonType& j, const typename BasicJsonType::string_t& s) + { + j.m_type = value_t::string; + j.m_value = s; + j.assert_invariant(); + } + + template + static void construct(BasicJsonType& j, typename BasicJsonType::string_t&& s) + { + j.m_type = value_t::string; + j.m_value = std::move(s); + j.assert_invariant(); + } + + template < typename BasicJsonType, typename CompatibleStringType, + enable_if_t < !std::is_same::value, + int > = 0 > + static void construct(BasicJsonType& j, const CompatibleStringType& str) + { + j.m_type = value_t::string; + j.m_value.string = j.template create(str); + j.assert_invariant(); + } +}; + +template<> +struct external_constructor +{ + template + static void construct(BasicJsonType& j, const typename BasicJsonType::binary_t& b) + { + j.m_type = value_t::binary; + typename BasicJsonType::binary_t value{b}; + j.m_value = value; + j.assert_invariant(); + } + + template + static void construct(BasicJsonType& j, typename BasicJsonType::binary_t&& b) + { + j.m_type = value_t::binary; + typename BasicJsonType::binary_t value{std::move(b)}; + j.m_value = value; + j.assert_invariant(); + } +}; + +template<> +struct external_constructor +{ + template + static void construct(BasicJsonType& j, typename BasicJsonType::number_float_t val) noexcept + { + j.m_type = value_t::number_float; + j.m_value = val; + j.assert_invariant(); + } +}; + +template<> +struct external_constructor +{ + template + static void construct(BasicJsonType& j, typename BasicJsonType::number_unsigned_t val) noexcept + { + j.m_type = value_t::number_unsigned; + j.m_value = val; + j.assert_invariant(); + } +}; + +template<> +struct external_constructor +{ + template + static void construct(BasicJsonType& j, typename BasicJsonType::number_integer_t val) noexcept + { + j.m_type = value_t::number_integer; + j.m_value = val; + j.assert_invariant(); + } +}; + +template<> +struct external_constructor +{ + template + static void construct(BasicJsonType& j, const typename BasicJsonType::array_t& arr) + { + j.m_type = value_t::array; + j.m_value = arr; + j.set_parents(); + j.assert_invariant(); + } + + template + static void construct(BasicJsonType& j, typename BasicJsonType::array_t&& arr) + { + j.m_type = value_t::array; + j.m_value = std::move(arr); + j.set_parents(); + j.assert_invariant(); + } + + template < typename BasicJsonType, typename CompatibleArrayType, + enable_if_t < !std::is_same::value, + int > = 0 > + static void construct(BasicJsonType& j, const CompatibleArrayType& arr) + { + using std::begin; + using std::end; + j.m_type = value_t::array; + j.m_value.array = j.template create(begin(arr), end(arr)); + j.set_parents(); + j.assert_invariant(); + } + + template + static void construct(BasicJsonType& j, const std::vector& arr) + { + j.m_type = value_t::array; + j.m_value = value_t::array; + j.m_value.array->reserve(arr.size()); + for (const bool x : arr) + { + j.m_value.array->push_back(x); + j.set_parent(j.m_value.array->back()); + } + j.assert_invariant(); + } + + template::value, int> = 0> + static void construct(BasicJsonType& j, const std::valarray& arr) + { + j.m_type = value_t::array; + j.m_value = value_t::array; + j.m_value.array->resize(arr.size()); + if (arr.size() > 0) + { + std::copy(std::begin(arr), std::end(arr), j.m_value.array->begin()); + } + j.set_parents(); + j.assert_invariant(); + } +}; + +template<> +struct external_constructor +{ + template + static void construct(BasicJsonType& j, const typename BasicJsonType::object_t& obj) + { + j.m_type = value_t::object; + j.m_value = obj; + j.set_parents(); + j.assert_invariant(); + } + + template + static void construct(BasicJsonType& j, typename BasicJsonType::object_t&& obj) + { + j.m_type = value_t::object; + j.m_value = std::move(obj); + j.set_parents(); + j.assert_invariant(); + } + + template < typename BasicJsonType, typename CompatibleObjectType, + enable_if_t < !std::is_same::value, int > = 0 > + static void construct(BasicJsonType& j, const CompatibleObjectType& obj) + { + using std::begin; + using std::end; + + j.m_type = value_t::object; + j.m_value.object = j.template create(begin(obj), end(obj)); + j.set_parents(); + j.assert_invariant(); + } +}; + +///////////// +// to_json // +///////////// + +template::value, int> = 0> +void to_json(BasicJsonType& j, T b) noexcept +{ + external_constructor::construct(j, b); +} + +template::value, int> = 0> +void to_json(BasicJsonType& j, const CompatibleString& s) +{ + external_constructor::construct(j, s); +} + +template +void to_json(BasicJsonType& j, typename BasicJsonType::string_t&& s) +{ + external_constructor::construct(j, std::move(s)); +} + +template::value, int> = 0> +void to_json(BasicJsonType& j, FloatType val) noexcept +{ + external_constructor::construct(j, static_cast(val)); +} + +template::value, int> = 0> +void to_json(BasicJsonType& j, CompatibleNumberUnsignedType val) noexcept +{ + external_constructor::construct(j, static_cast(val)); +} + +template::value, int> = 0> +void to_json(BasicJsonType& j, CompatibleNumberIntegerType val) noexcept +{ + external_constructor::construct(j, static_cast(val)); +} + +template::value, int> = 0> +void to_json(BasicJsonType& j, EnumType e) noexcept +{ + using underlying_type = typename std::underlying_type::type; + external_constructor::construct(j, static_cast(e)); +} + +template +void to_json(BasicJsonType& j, const std::vector& e) +{ + external_constructor::construct(j, e); +} + +template < typename BasicJsonType, typename CompatibleArrayType, + enable_if_t < is_compatible_array_type::value&& + !is_compatible_object_type::value&& + !is_compatible_string_type::value&& + !std::is_same::value&& + !is_basic_json::value, + int > = 0 > +void to_json(BasicJsonType& j, const CompatibleArrayType& arr) +{ + external_constructor::construct(j, arr); +} + +template +void to_json(BasicJsonType& j, const typename BasicJsonType::binary_t& bin) +{ + external_constructor::construct(j, bin); +} + +template::value, int> = 0> +void to_json(BasicJsonType& j, const std::valarray& arr) +{ + external_constructor::construct(j, std::move(arr)); +} + +template +void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr) +{ + external_constructor::construct(j, std::move(arr)); +} + +template < typename BasicJsonType, typename CompatibleObjectType, + enable_if_t < is_compatible_object_type::value&& !is_basic_json::value, int > = 0 > +void to_json(BasicJsonType& j, const CompatibleObjectType& obj) +{ + external_constructor::construct(j, obj); +} + +template +void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj) +{ + external_constructor::construct(j, std::move(obj)); +} + +template < + typename BasicJsonType, typename T, std::size_t N, + enable_if_t < !std::is_constructible::value, + int > = 0 > +void to_json(BasicJsonType& j, const T(&arr)[N]) +{ + external_constructor::construct(j, arr); +} + +template < typename BasicJsonType, typename T1, typename T2, enable_if_t < std::is_constructible::value&& std::is_constructible::value, int > = 0 > +void to_json(BasicJsonType& j, const std::pair& p) +{ + j = { p.first, p.second }; +} + +// for https://github.com/nlohmann/json/pull/1134 +template>::value, int> = 0> +void to_json(BasicJsonType& j, const T& b) +{ + j = { {b.key(), b.value()} }; +} + +template +void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence /*unused*/) +{ + j = { std::get(t)... }; +} + +template::value, int > = 0> +void to_json(BasicJsonType& j, const T& t) +{ + to_json_tuple_impl(j, t, make_index_sequence::value> {}); +} + +struct to_json_fn +{ + template + auto operator()(BasicJsonType& j, T&& val) const noexcept(noexcept(to_json(j, std::forward(val)))) + -> decltype(to_json(j, std::forward(val)), void()) + { + return to_json(j, std::forward(val)); + } +}; +} // namespace detail + +/// namespace to hold default `to_json` function +namespace +{ +constexpr const auto& to_json = detail::static_const::value; +} // namespace +} // namespace nlohmann + + +namespace nlohmann +{ + +template +struct adl_serializer +{ + /*! + @brief convert a JSON value to any value type + + This function is usually called by the `get()` function of the + @ref basic_json class (either explicit or via conversion operators). + + @param[in] j JSON value to read from + @param[in,out] val value to write to + */ + template + static auto from_json(BasicJsonType&& j, ValueType& val) noexcept( + noexcept(::nlohmann::from_json(std::forward(j), val))) + -> decltype(::nlohmann::from_json(std::forward(j), val), void()) + { + ::nlohmann::from_json(std::forward(j), val); + } + + /*! + @brief convert any value type to a JSON value + + This function is usually called by the constructors of the @ref basic_json + class. + + @param[in,out] j JSON value to write to + @param[in] val value to read from + */ + template + static auto to_json(BasicJsonType& j, ValueType&& val) noexcept( + noexcept(::nlohmann::to_json(j, std::forward(val)))) + -> decltype(::nlohmann::to_json(j, std::forward(val)), void()) + { + ::nlohmann::to_json(j, std::forward(val)); + } +}; + +} // namespace nlohmann + +// #include + + +#include // uint8_t +#include // tie +#include // move + +namespace nlohmann +{ + +/*! +@brief an internal type for a backed binary type + +This type extends the template parameter @a BinaryType provided to `basic_json` +with a subtype used by BSON and MessagePack. This type exists so that the user +does not have to specify a type themselves with a specific naming scheme in +order to override the binary type. + +@tparam BinaryType container to store bytes (`std::vector` by + default) + +@since version 3.8.0 +*/ +template +class byte_container_with_subtype : public BinaryType +{ + public: + /// the type of the underlying container + using container_type = BinaryType; + + byte_container_with_subtype() noexcept(noexcept(container_type())) + : container_type() + {} + + byte_container_with_subtype(const container_type& b) noexcept(noexcept(container_type(b))) + : container_type(b) + {} + + byte_container_with_subtype(container_type&& b) noexcept(noexcept(container_type(std::move(b)))) + : container_type(std::move(b)) + {} + + byte_container_with_subtype(const container_type& b, std::uint8_t subtype_) noexcept(noexcept(container_type(b))) + : container_type(b) + , m_subtype(subtype_) + , m_has_subtype(true) + {} + + byte_container_with_subtype(container_type&& b, std::uint8_t subtype_) noexcept(noexcept(container_type(std::move(b)))) + : container_type(std::move(b)) + , m_subtype(subtype_) + , m_has_subtype(true) + {} + + bool operator==(const byte_container_with_subtype& rhs) const + { + return std::tie(static_cast(*this), m_subtype, m_has_subtype) == + std::tie(static_cast(rhs), rhs.m_subtype, rhs.m_has_subtype); + } + + bool operator!=(const byte_container_with_subtype& rhs) const + { + return !(rhs == *this); + } + + /*! + @brief sets the binary subtype + + Sets the binary subtype of the value, also flags a binary JSON value as + having a subtype, which has implications for serialization. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @sa @ref subtype() -- return the binary subtype + @sa @ref clear_subtype() -- clears the binary subtype + @sa @ref has_subtype() -- returns whether or not the binary value has a + subtype + + @since version 3.8.0 + */ + void set_subtype(std::uint8_t subtype_) noexcept + { + m_subtype = subtype_; + m_has_subtype = true; + } + + /*! + @brief return the binary subtype + + Returns the numerical subtype of the value if it has a subtype. If it does + not have a subtype, this function will return size_t(-1) as a sentinel + value. + + @return the numerical subtype of the binary value + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @sa @ref set_subtype() -- sets the binary subtype + @sa @ref clear_subtype() -- clears the binary subtype + @sa @ref has_subtype() -- returns whether or not the binary value has a + subtype + + @since version 3.8.0 + */ + constexpr std::uint8_t subtype() const noexcept + { + return m_subtype; + } + + /*! + @brief return whether the value has a subtype + + @return whether the value has a subtype + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @sa @ref subtype() -- return the binary subtype + @sa @ref set_subtype() -- sets the binary subtype + @sa @ref clear_subtype() -- clears the binary subtype + + @since version 3.8.0 + */ + constexpr bool has_subtype() const noexcept + { + return m_has_subtype; + } + + /*! + @brief clears the binary subtype + + Clears the binary subtype and flags the value as not having a subtype, which + has implications for serialization; for instance MessagePack will prefer the + bin family over the ext family. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @sa @ref subtype() -- return the binary subtype + @sa @ref set_subtype() -- sets the binary subtype + @sa @ref has_subtype() -- returns whether or not the binary value has a + subtype + + @since version 3.8.0 + */ + void clear_subtype() noexcept + { + m_subtype = 0; + m_has_subtype = false; + } + + private: + std::uint8_t m_subtype = 0; + bool m_has_subtype = false; +}; + +} // namespace nlohmann + +// #include + +// #include + +// #include + +// #include + + +#include // size_t, uint8_t +#include // hash + +// #include + + +namespace nlohmann +{ +namespace detail +{ + +// boost::hash_combine +inline std::size_t combine(std::size_t seed, std::size_t h) noexcept +{ + seed ^= h + 0x9e3779b9 + (seed << 6U) + (seed >> 2U); + return seed; +} + +/*! +@brief hash a JSON value + +The hash function tries to rely on std::hash where possible. Furthermore, the +type of the JSON value is taken into account to have different hash values for +null, 0, 0U, and false, etc. + +@tparam BasicJsonType basic_json specialization +@param j JSON value to hash +@return hash value of j +*/ +template +std::size_t hash(const BasicJsonType& j) +{ + using string_t = typename BasicJsonType::string_t; + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + + const auto type = static_cast(j.type()); + switch (j.type()) + { + case BasicJsonType::value_t::null: + case BasicJsonType::value_t::discarded: + { + return combine(type, 0); + } + + case BasicJsonType::value_t::object: + { + auto seed = combine(type, j.size()); + for (const auto& element : j.items()) + { + const auto h = std::hash {}(element.key()); + seed = combine(seed, h); + seed = combine(seed, hash(element.value())); + } + return seed; + } + + case BasicJsonType::value_t::array: + { + auto seed = combine(type, j.size()); + for (const auto& element : j) + { + seed = combine(seed, hash(element)); + } + return seed; + } + + case BasicJsonType::value_t::string: + { + const auto h = std::hash {}(j.template get_ref()); + return combine(type, h); + } + + case BasicJsonType::value_t::boolean: + { + const auto h = std::hash {}(j.template get()); + return combine(type, h); + } + + case BasicJsonType::value_t::number_integer: + { + const auto h = std::hash {}(j.template get()); + return combine(type, h); + } + + case BasicJsonType::value_t::number_unsigned: + { + const auto h = std::hash {}(j.template get()); + return combine(type, h); + } + + case BasicJsonType::value_t::number_float: + { + const auto h = std::hash {}(j.template get()); + return combine(type, h); + } + + case BasicJsonType::value_t::binary: + { + auto seed = combine(type, j.get_binary().size()); + const auto h = std::hash {}(j.get_binary().has_subtype()); + seed = combine(seed, h); + seed = combine(seed, j.get_binary().subtype()); + for (const auto byte : j.get_binary()) + { + seed = combine(seed, std::hash {}(byte)); + } + return seed; + } + + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + return 0; // LCOV_EXCL_LINE + } +} + +} // namespace detail +} // namespace nlohmann + +// #include + + +#include // generate_n +#include // array +#include // ldexp +#include // size_t +#include // uint8_t, uint16_t, uint32_t, uint64_t +#include // snprintf +#include // memcpy +#include // back_inserter +#include // numeric_limits +#include // char_traits, string +#include // make_pair, move +#include // vector + +// #include + +// #include + + +#include // array +#include // size_t +#include //FILE * +#include // strlen +#include // istream +#include // begin, end, iterator_traits, random_access_iterator_tag, distance, next +#include // shared_ptr, make_shared, addressof +#include // accumulate +#include // string, char_traits +#include // enable_if, is_base_of, is_pointer, is_integral, remove_pointer +#include // pair, declval + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +/// the supported input formats +enum class input_format_t { json, cbor, msgpack, ubjson, bson }; + +//////////////////// +// input adapters // +//////////////////// + +/*! +Input adapter for stdio file access. This adapter read only 1 byte and do not use any + buffer. This adapter is a very low level adapter. +*/ +class file_input_adapter +{ + public: + using char_type = char; + + JSON_HEDLEY_NON_NULL(2) + explicit file_input_adapter(std::FILE* f) noexcept + : m_file(f) + {} + + // make class move-only + file_input_adapter(const file_input_adapter&) = delete; + file_input_adapter(file_input_adapter&&) = default; + file_input_adapter& operator=(const file_input_adapter&) = delete; + file_input_adapter& operator=(file_input_adapter&&) = delete; + + std::char_traits::int_type get_character() noexcept + { + return std::fgetc(m_file); + } + + private: + /// the file pointer to read from + std::FILE* m_file; +}; + + +/*! +Input adapter for a (caching) istream. Ignores a UFT Byte Order Mark at +beginning of input. Does not support changing the underlying std::streambuf +in mid-input. Maintains underlying std::istream and std::streambuf to support +subsequent use of standard std::istream operations to process any input +characters following those used in parsing the JSON input. Clears the +std::istream flags; any input errors (e.g., EOF) will be detected by the first +subsequent call for input from the std::istream. +*/ +class input_stream_adapter +{ + public: + using char_type = char; + + ~input_stream_adapter() + { + // clear stream flags; we use underlying streambuf I/O, do not + // maintain ifstream flags, except eof + if (is != nullptr) + { + is->clear(is->rdstate() & std::ios::eofbit); + } + } + + explicit input_stream_adapter(std::istream& i) + : is(&i), sb(i.rdbuf()) + {} + + // delete because of pointer members + input_stream_adapter(const input_stream_adapter&) = delete; + input_stream_adapter& operator=(input_stream_adapter&) = delete; + input_stream_adapter& operator=(input_stream_adapter&& rhs) = delete; + + input_stream_adapter(input_stream_adapter&& rhs) noexcept : is(rhs.is), sb(rhs.sb) + { + rhs.is = nullptr; + rhs.sb = nullptr; + } + + // std::istream/std::streambuf use std::char_traits::to_int_type, to + // ensure that std::char_traits::eof() and the character 0xFF do not + // end up as the same value, eg. 0xFFFFFFFF. + std::char_traits::int_type get_character() + { + auto res = sb->sbumpc(); + // set eof manually, as we don't use the istream interface. + if (JSON_HEDLEY_UNLIKELY(res == EOF)) + { + is->clear(is->rdstate() | std::ios::eofbit); + } + return res; + } + + private: + /// the associated input stream + std::istream* is = nullptr; + std::streambuf* sb = nullptr; +}; + +// General-purpose iterator-based adapter. It might not be as fast as +// theoretically possible for some containers, but it is extremely versatile. +template +class iterator_input_adapter +{ + public: + using char_type = typename std::iterator_traits::value_type; + + iterator_input_adapter(IteratorType first, IteratorType last) + : current(std::move(first)), end(std::move(last)) {} + + typename std::char_traits::int_type get_character() + { + if (JSON_HEDLEY_LIKELY(current != end)) + { + auto result = std::char_traits::to_int_type(*current); + std::advance(current, 1); + return result; + } + + return std::char_traits::eof(); + } + + private: + IteratorType current; + IteratorType end; + + template + friend struct wide_string_input_helper; + + bool empty() const + { + return current == end; + } + +}; + + +template +struct wide_string_input_helper; + +template +struct wide_string_input_helper +{ + // UTF-32 + static void fill_buffer(BaseInputAdapter& input, + std::array::int_type, 4>& utf8_bytes, + size_t& utf8_bytes_index, + size_t& utf8_bytes_filled) + { + utf8_bytes_index = 0; + + if (JSON_HEDLEY_UNLIKELY(input.empty())) + { + utf8_bytes[0] = std::char_traits::eof(); + utf8_bytes_filled = 1; + } + else + { + // get the current character + const auto wc = input.get_character(); + + // UTF-32 to UTF-8 encoding + if (wc < 0x80) + { + utf8_bytes[0] = static_cast::int_type>(wc); + utf8_bytes_filled = 1; + } + else if (wc <= 0x7FF) + { + utf8_bytes[0] = static_cast::int_type>(0xC0u | ((static_cast(wc) >> 6u) & 0x1Fu)); + utf8_bytes[1] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); + utf8_bytes_filled = 2; + } + else if (wc <= 0xFFFF) + { + utf8_bytes[0] = static_cast::int_type>(0xE0u | ((static_cast(wc) >> 12u) & 0x0Fu)); + utf8_bytes[1] = static_cast::int_type>(0x80u | ((static_cast(wc) >> 6u) & 0x3Fu)); + utf8_bytes[2] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); + utf8_bytes_filled = 3; + } + else if (wc <= 0x10FFFF) + { + utf8_bytes[0] = static_cast::int_type>(0xF0u | ((static_cast(wc) >> 18u) & 0x07u)); + utf8_bytes[1] = static_cast::int_type>(0x80u | ((static_cast(wc) >> 12u) & 0x3Fu)); + utf8_bytes[2] = static_cast::int_type>(0x80u | ((static_cast(wc) >> 6u) & 0x3Fu)); + utf8_bytes[3] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); + utf8_bytes_filled = 4; + } + else + { + // unknown character + utf8_bytes[0] = static_cast::int_type>(wc); + utf8_bytes_filled = 1; + } + } + } +}; + +template +struct wide_string_input_helper +{ + // UTF-16 + static void fill_buffer(BaseInputAdapter& input, + std::array::int_type, 4>& utf8_bytes, + size_t& utf8_bytes_index, + size_t& utf8_bytes_filled) + { + utf8_bytes_index = 0; + + if (JSON_HEDLEY_UNLIKELY(input.empty())) + { + utf8_bytes[0] = std::char_traits::eof(); + utf8_bytes_filled = 1; + } + else + { + // get the current character + const auto wc = input.get_character(); + + // UTF-16 to UTF-8 encoding + if (wc < 0x80) + { + utf8_bytes[0] = static_cast::int_type>(wc); + utf8_bytes_filled = 1; + } + else if (wc <= 0x7FF) + { + utf8_bytes[0] = static_cast::int_type>(0xC0u | ((static_cast(wc) >> 6u))); + utf8_bytes[1] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); + utf8_bytes_filled = 2; + } + else if (0xD800 > wc || wc >= 0xE000) + { + utf8_bytes[0] = static_cast::int_type>(0xE0u | ((static_cast(wc) >> 12u))); + utf8_bytes[1] = static_cast::int_type>(0x80u | ((static_cast(wc) >> 6u) & 0x3Fu)); + utf8_bytes[2] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); + utf8_bytes_filled = 3; + } + else + { + if (JSON_HEDLEY_UNLIKELY(!input.empty())) + { + const auto wc2 = static_cast(input.get_character()); + const auto charcode = 0x10000u + (((static_cast(wc) & 0x3FFu) << 10u) | (wc2 & 0x3FFu)); + utf8_bytes[0] = static_cast::int_type>(0xF0u | (charcode >> 18u)); + utf8_bytes[1] = static_cast::int_type>(0x80u | ((charcode >> 12u) & 0x3Fu)); + utf8_bytes[2] = static_cast::int_type>(0x80u | ((charcode >> 6u) & 0x3Fu)); + utf8_bytes[3] = static_cast::int_type>(0x80u | (charcode & 0x3Fu)); + utf8_bytes_filled = 4; + } + else + { + utf8_bytes[0] = static_cast::int_type>(wc); + utf8_bytes_filled = 1; + } + } + } + } +}; + +// Wraps another input apdater to convert wide character types into individual bytes. +template +class wide_string_input_adapter +{ + public: + using char_type = char; + + wide_string_input_adapter(BaseInputAdapter base) + : base_adapter(base) {} + + typename std::char_traits::int_type get_character() noexcept + { + // check if buffer needs to be filled + if (utf8_bytes_index == utf8_bytes_filled) + { + fill_buffer(); + + JSON_ASSERT(utf8_bytes_filled > 0); + JSON_ASSERT(utf8_bytes_index == 0); + } + + // use buffer + JSON_ASSERT(utf8_bytes_filled > 0); + JSON_ASSERT(utf8_bytes_index < utf8_bytes_filled); + return utf8_bytes[utf8_bytes_index++]; + } + + private: + BaseInputAdapter base_adapter; + + template + void fill_buffer() + { + wide_string_input_helper::fill_buffer(base_adapter, utf8_bytes, utf8_bytes_index, utf8_bytes_filled); + } + + /// a buffer for UTF-8 bytes + std::array::int_type, 4> utf8_bytes = {{0, 0, 0, 0}}; + + /// index to the utf8_codes array for the next valid byte + std::size_t utf8_bytes_index = 0; + /// number of valid bytes in the utf8_codes array + std::size_t utf8_bytes_filled = 0; +}; + + +template +struct iterator_input_adapter_factory +{ + using iterator_type = IteratorType; + using char_type = typename std::iterator_traits::value_type; + using adapter_type = iterator_input_adapter; + + static adapter_type create(IteratorType first, IteratorType last) + { + return adapter_type(std::move(first), std::move(last)); + } +}; + +template +struct is_iterator_of_multibyte +{ + using value_type = typename std::iterator_traits::value_type; + enum + { + value = sizeof(value_type) > 1 + }; +}; + +template +struct iterator_input_adapter_factory::value>> +{ + using iterator_type = IteratorType; + using char_type = typename std::iterator_traits::value_type; + using base_adapter_type = iterator_input_adapter; + using adapter_type = wide_string_input_adapter; + + static adapter_type create(IteratorType first, IteratorType last) + { + return adapter_type(base_adapter_type(std::move(first), std::move(last))); + } +}; + +// General purpose iterator-based input +template +typename iterator_input_adapter_factory::adapter_type input_adapter(IteratorType first, IteratorType last) +{ + using factory_type = iterator_input_adapter_factory; + return factory_type::create(first, last); +} + +// Convenience shorthand from container to iterator +// Enables ADL on begin(container) and end(container) +// Encloses the using declarations in namespace for not to leak them to outside scope + +namespace container_input_adapter_factory_impl +{ + +using std::begin; +using std::end; + +template +struct container_input_adapter_factory {}; + +template +struct container_input_adapter_factory< ContainerType, + void_t()), end(std::declval()))>> + { + using adapter_type = decltype(input_adapter(begin(std::declval()), end(std::declval()))); + + static adapter_type create(const ContainerType& container) +{ + return input_adapter(begin(container), end(container)); +} + }; + +} + +template +typename container_input_adapter_factory_impl::container_input_adapter_factory::adapter_type input_adapter(const ContainerType& container) +{ + return container_input_adapter_factory_impl::container_input_adapter_factory::create(container); +} + +// Special cases with fast paths +inline file_input_adapter input_adapter(std::FILE* file) +{ + return file_input_adapter(file); +} + +inline input_stream_adapter input_adapter(std::istream& stream) +{ + return input_stream_adapter(stream); +} + +inline input_stream_adapter input_adapter(std::istream&& stream) +{ + return input_stream_adapter(stream); +} + +using contiguous_bytes_input_adapter = decltype(input_adapter(std::declval(), std::declval())); + +// Null-delimited strings, and the like. +template < typename CharT, + typename std::enable_if < + std::is_pointer::value&& + !std::is_array::value&& + std::is_integral::type>::value&& + sizeof(typename std::remove_pointer::type) == 1, + int >::type = 0 > +contiguous_bytes_input_adapter input_adapter(CharT b) +{ + auto length = std::strlen(reinterpret_cast(b)); + const auto* ptr = reinterpret_cast(b); + return input_adapter(ptr, ptr + length); +} + +template +auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N)) +{ + return input_adapter(array, array + N); +} + +// This class only handles inputs of input_buffer_adapter type. +// It's required so that expressions like {ptr, len} can be implicitely casted +// to the correct adapter. +class span_input_adapter +{ + public: + template < typename CharT, + typename std::enable_if < + std::is_pointer::value&& + std::is_integral::type>::value&& + sizeof(typename std::remove_pointer::type) == 1, + int >::type = 0 > + span_input_adapter(CharT b, std::size_t l) + : ia(reinterpret_cast(b), reinterpret_cast(b) + l) {} + + template::iterator_category, std::random_access_iterator_tag>::value, + int>::type = 0> + span_input_adapter(IteratorType first, IteratorType last) + : ia(input_adapter(first, last)) {} + + contiguous_bytes_input_adapter&& get() + { + return std::move(ia); + } + + private: + contiguous_bytes_input_adapter ia; +}; +} // namespace detail +} // namespace nlohmann + +// #include + + +#include +#include // string +#include // move +#include // vector + +// #include + +// #include + + +namespace nlohmann +{ + +/*! +@brief SAX interface + +This class describes the SAX interface used by @ref nlohmann::json::sax_parse. +Each function is called in different situations while the input is parsed. The +boolean return value informs the parser whether to continue processing the +input. +*/ +template +struct json_sax +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + + /*! + @brief a null value was read + @return whether parsing should proceed + */ + virtual bool null() = 0; + + /*! + @brief a boolean value was read + @param[in] val boolean value + @return whether parsing should proceed + */ + virtual bool boolean(bool val) = 0; + + /*! + @brief an integer number was read + @param[in] val integer value + @return whether parsing should proceed + */ + virtual bool number_integer(number_integer_t val) = 0; + + /*! + @brief an unsigned integer number was read + @param[in] val unsigned integer value + @return whether parsing should proceed + */ + virtual bool number_unsigned(number_unsigned_t val) = 0; + + /*! + @brief an floating-point number was read + @param[in] val floating-point value + @param[in] s raw token value + @return whether parsing should proceed + */ + virtual bool number_float(number_float_t val, const string_t& s) = 0; + + /*! + @brief a string was read + @param[in] val string value + @return whether parsing should proceed + @note It is safe to move the passed string. + */ + virtual bool string(string_t& val) = 0; + + /*! + @brief a binary string was read + @param[in] val binary value + @return whether parsing should proceed + @note It is safe to move the passed binary. + */ + virtual bool binary(binary_t& val) = 0; + + /*! + @brief the beginning of an object was read + @param[in] elements number of object elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_object(std::size_t elements) = 0; + + /*! + @brief an object key was read + @param[in] val object key + @return whether parsing should proceed + @note It is safe to move the passed string. + */ + virtual bool key(string_t& val) = 0; + + /*! + @brief the end of an object was read + @return whether parsing should proceed + */ + virtual bool end_object() = 0; + + /*! + @brief the beginning of an array was read + @param[in] elements number of array elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_array(std::size_t elements) = 0; + + /*! + @brief the end of an array was read + @return whether parsing should proceed + */ + virtual bool end_array() = 0; + + /*! + @brief a parse error occurred + @param[in] position the position in the input where the error occurs + @param[in] last_token the last read token + @param[in] ex an exception object describing the error + @return whether parsing should proceed (must return false) + */ + virtual bool parse_error(std::size_t position, + const std::string& last_token, + const detail::exception& ex) = 0; + + virtual ~json_sax() = default; +}; + + +namespace detail +{ +/*! +@brief SAX implementation to create a JSON value from SAX events + +This class implements the @ref json_sax interface and processes the SAX events +to create a JSON value which makes it basically a DOM parser. The structure or +hierarchy of the JSON value is managed by the stack `ref_stack` which contains +a pointer to the respective array or object for each recursion depth. + +After successful parsing, the value that is passed by reference to the +constructor contains the parsed value. + +@tparam BasicJsonType the JSON type +*/ +template +class json_sax_dom_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + + /*! + @param[in, out] r reference to a JSON value that is manipulated while + parsing + @param[in] allow_exceptions_ whether parse errors yield exceptions + */ + explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true) + : root(r), allow_exceptions(allow_exceptions_) + {} + + // make class move-only + json_sax_dom_parser(const json_sax_dom_parser&) = delete; + json_sax_dom_parser(json_sax_dom_parser&&) = default; + json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; + ~json_sax_dom_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool binary(binary_t& val) + { + handle_value(std::move(val)); + return true; + } + + bool start_object(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); + + if (JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, "excessive object size: " + std::to_string(len), *ref_stack.back())); + } + + return true; + } + + bool key(string_t& val) + { + // add null at given key and store the reference for later + object_element = &(ref_stack.back()->m_value.object->operator[](val)); + return true; + } + + bool end_object() + { + ref_stack.back()->set_parents(); + ref_stack.pop_back(); + return true; + } + + bool start_array(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); + + if (JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, "excessive array size: " + std::to_string(len), *ref_stack.back())); + } + + return true; + } + + bool end_array() + { + ref_stack.back()->set_parents(); + ref_stack.pop_back(); + return true; + } + + template + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const Exception& ex) + { + errored = true; + static_cast(ex); + if (allow_exceptions) + { + JSON_THROW(ex); + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + /*! + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + */ + template + JSON_HEDLEY_RETURNS_NON_NULL + BasicJsonType* handle_value(Value&& v) + { + if (ref_stack.empty()) + { + root = BasicJsonType(std::forward(v)); + return &root; + } + + JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); + + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_value.array->emplace_back(std::forward(v)); + return &(ref_stack.back()->m_value.array->back()); + } + + JSON_ASSERT(ref_stack.back()->is_object()); + JSON_ASSERT(object_element); + *object_element = BasicJsonType(std::forward(v)); + return object_element; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; +}; + +template +class json_sax_dom_callback_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using parser_callback_t = typename BasicJsonType::parser_callback_t; + using parse_event_t = typename BasicJsonType::parse_event_t; + + json_sax_dom_callback_parser(BasicJsonType& r, + const parser_callback_t cb, + const bool allow_exceptions_ = true) + : root(r), callback(cb), allow_exceptions(allow_exceptions_) + { + keep_stack.push_back(true); + } + + // make class move-only + json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; + json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; + ~json_sax_dom_callback_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool binary(binary_t& val) + { + handle_value(std::move(val)); + return true; + } + + bool start_object(std::size_t len) + { + // check callback for object start + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::object, true); + ref_stack.push_back(val.second); + + // check object limit + if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, "excessive object size: " + std::to_string(len), *ref_stack.back())); + } + + return true; + } + + bool key(string_t& val) + { + BasicJsonType k = BasicJsonType(val); + + // check callback for key + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); + key_keep_stack.push_back(keep); + + // add discarded value at given key and store the reference for later + if (keep && ref_stack.back()) + { + object_element = &(ref_stack.back()->m_value.object->operator[](val) = discarded); + } + + return true; + } + + bool end_object() + { + if (ref_stack.back()) + { + if (!callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) + { + // discard object + *ref_stack.back() = discarded; + } + else + { + ref_stack.back()->set_parents(); + } + } + + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(!keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured()) + { + // remove discarded value + for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) + { + if (it->is_discarded()) + { + ref_stack.back()->erase(it); + break; + } + } + } + + return true; + } + + bool start_array(std::size_t len) + { + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::array, true); + ref_stack.push_back(val.second); + + // check array limit + if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, "excessive array size: " + std::to_string(len), *ref_stack.back())); + } + + return true; + } + + bool end_array() + { + bool keep = true; + + if (ref_stack.back()) + { + keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); + if (keep) + { + ref_stack.back()->set_parents(); + } + else + { + // discard array + *ref_stack.back() = discarded; + } + } + + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(!keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + // remove discarded value + if (!keep && !ref_stack.empty() && ref_stack.back()->is_array()) + { + ref_stack.back()->m_value.array->pop_back(); + } + + return true; + } + + template + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const Exception& ex) + { + errored = true; + static_cast(ex); + if (allow_exceptions) + { + JSON_THROW(ex); + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + /*! + @param[in] v value to add to the JSON value we build during parsing + @param[in] skip_callback whether we should skip calling the callback + function; this is required after start_array() and + start_object() SAX events, because otherwise we would call the + callback function with an empty array or object, respectively. + + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + + @return pair of boolean (whether value should be kept) and pointer (to the + passed value in the ref_stack hierarchy; nullptr if not kept) + */ + template + std::pair handle_value(Value&& v, const bool skip_callback = false) + { + JSON_ASSERT(!keep_stack.empty()); + + // do not handle this value if we know it would be added to a discarded + // container + if (!keep_stack.back()) + { + return {false, nullptr}; + } + + // create value + auto value = BasicJsonType(std::forward(v)); + + // check callback + const bool keep = skip_callback || callback(static_cast(ref_stack.size()), parse_event_t::value, value); + + // do not handle this value if we just learnt it shall be discarded + if (!keep) + { + return {false, nullptr}; + } + + if (ref_stack.empty()) + { + root = std::move(value); + return {true, &root}; + } + + // skip this value if we already decided to skip the parent + // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) + if (!ref_stack.back()) + { + return {false, nullptr}; + } + + // we now only expect arrays and objects + JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); + + // array + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_value.array->emplace_back(std::move(value)); + return {true, &(ref_stack.back()->m_value.array->back())}; + } + + // object + JSON_ASSERT(ref_stack.back()->is_object()); + // check if we should store an element for the current key + JSON_ASSERT(!key_keep_stack.empty()); + const bool store_element = key_keep_stack.back(); + key_keep_stack.pop_back(); + + if (!store_element) + { + return {false, nullptr}; + } + + JSON_ASSERT(object_element); + *object_element = std::move(value); + return {true, object_element}; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// stack to manage which values to keep + std::vector keep_stack {}; + /// stack to manage which object keys to keep + std::vector key_keep_stack {}; + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// callback function + const parser_callback_t callback = nullptr; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; + /// a discarded value for the callback + BasicJsonType discarded = BasicJsonType::value_t::discarded; +}; + +template +class json_sax_acceptor +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + + bool null() + { + return true; + } + + bool boolean(bool /*unused*/) + { + return true; + } + + bool number_integer(number_integer_t /*unused*/) + { + return true; + } + + bool number_unsigned(number_unsigned_t /*unused*/) + { + return true; + } + + bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) + { + return true; + } + + bool string(string_t& /*unused*/) + { + return true; + } + + bool binary(binary_t& /*unused*/) + { + return true; + } + + bool start_object(std::size_t /*unused*/ = std::size_t(-1)) + { + return true; + } + + bool key(string_t& /*unused*/) + { + return true; + } + + bool end_object() + { + return true; + } + + bool start_array(std::size_t /*unused*/ = std::size_t(-1)) + { + return true; + } + + bool end_array() + { + return true; + } + + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) + { + return false; + } +}; +} // namespace detail + +} // namespace nlohmann + +// #include + + +#include // array +#include // localeconv +#include // size_t +#include // snprintf +#include // strtof, strtod, strtold, strtoll, strtoull +#include // initializer_list +#include // char_traits, string +#include // move +#include // vector + +// #include + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +/////////// +// lexer // +/////////// + +template +class lexer_base +{ + public: + /// token types for the parser + enum class token_type + { + uninitialized, ///< indicating the scanner is uninitialized + literal_true, ///< the `true` literal + literal_false, ///< the `false` literal + literal_null, ///< the `null` literal + value_string, ///< a string -- use get_string() for actual value + value_unsigned, ///< an unsigned integer -- use get_number_unsigned() for actual value + value_integer, ///< a signed integer -- use get_number_integer() for actual value + value_float, ///< an floating point number -- use get_number_float() for actual value + begin_array, ///< the character for array begin `[` + begin_object, ///< the character for object begin `{` + end_array, ///< the character for array end `]` + end_object, ///< the character for object end `}` + name_separator, ///< the name separator `:` + value_separator, ///< the value separator `,` + parse_error, ///< indicating a parse error + end_of_input, ///< indicating the end of the input buffer + literal_or_value ///< a literal or the begin of a value (only for diagnostics) + }; + + /// return name of values of type token_type (only used for errors) + JSON_HEDLEY_RETURNS_NON_NULL + JSON_HEDLEY_CONST + static const char* token_type_name(const token_type t) noexcept + { + switch (t) + { + case token_type::uninitialized: + return ""; + case token_type::literal_true: + return "true literal"; + case token_type::literal_false: + return "false literal"; + case token_type::literal_null: + return "null literal"; + case token_type::value_string: + return "string literal"; + case token_type::value_unsigned: + case token_type::value_integer: + case token_type::value_float: + return "number literal"; + case token_type::begin_array: + return "'['"; + case token_type::begin_object: + return "'{'"; + case token_type::end_array: + return "']'"; + case token_type::end_object: + return "'}'"; + case token_type::name_separator: + return "':'"; + case token_type::value_separator: + return "','"; + case token_type::parse_error: + return ""; + case token_type::end_of_input: + return "end of input"; + case token_type::literal_or_value: + return "'[', '{', or a literal"; + // LCOV_EXCL_START + default: // catch non-enum values + return "unknown token"; + // LCOV_EXCL_STOP + } + } +}; +/*! +@brief lexical analysis + +This class organizes the lexical analysis during JSON deserialization. +*/ +template +class lexer : public lexer_base +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using char_type = typename InputAdapterType::char_type; + using char_int_type = typename std::char_traits::int_type; + + public: + using token_type = typename lexer_base::token_type; + + explicit lexer(InputAdapterType&& adapter, bool ignore_comments_ = false) + : ia(std::move(adapter)) + , ignore_comments(ignore_comments_) + , decimal_point_char(static_cast(get_decimal_point())) + {} + + // delete because of pointer members + lexer(const lexer&) = delete; + lexer(lexer&&) = default; + lexer& operator=(lexer&) = delete; + lexer& operator=(lexer&&) = default; + ~lexer() = default; + + private: + ///////////////////// + // locales + ///////////////////// + + /// return the locale-dependent decimal point + JSON_HEDLEY_PURE + static char get_decimal_point() noexcept + { + const auto* loc = localeconv(); + JSON_ASSERT(loc != nullptr); + return (loc->decimal_point == nullptr) ? '.' : *(loc->decimal_point); + } + + ///////////////////// + // scan functions + ///////////////////// + + /*! + @brief get codepoint from 4 hex characters following `\u` + + For input "\u c1 c2 c3 c4" the codepoint is: + (c1 * 0x1000) + (c2 * 0x0100) + (c3 * 0x0010) + c4 + = (c1 << 12) + (c2 << 8) + (c3 << 4) + (c4 << 0) + + Furthermore, the possible characters '0'..'9', 'A'..'F', and 'a'..'f' + must be converted to the integers 0x0..0x9, 0xA..0xF, 0xA..0xF, resp. The + conversion is done by subtracting the offset (0x30, 0x37, and 0x57) + between the ASCII value of the character and the desired integer value. + + @return codepoint (0x0000..0xFFFF) or -1 in case of an error (e.g. EOF or + non-hex character) + */ + int get_codepoint() + { + // this function only makes sense after reading `\u` + JSON_ASSERT(current == 'u'); + int codepoint = 0; + + const auto factors = { 12u, 8u, 4u, 0u }; + for (const auto factor : factors) + { + get(); + + if (current >= '0' && current <= '9') + { + codepoint += static_cast((static_cast(current) - 0x30u) << factor); + } + else if (current >= 'A' && current <= 'F') + { + codepoint += static_cast((static_cast(current) - 0x37u) << factor); + } + else if (current >= 'a' && current <= 'f') + { + codepoint += static_cast((static_cast(current) - 0x57u) << factor); + } + else + { + return -1; + } + } + + JSON_ASSERT(0x0000 <= codepoint && codepoint <= 0xFFFF); + return codepoint; + } + + /*! + @brief check if the next byte(s) are inside a given range + + Adds the current byte and, for each passed range, reads a new byte and + checks if it is inside the range. If a violation was detected, set up an + error message and return false. Otherwise, return true. + + @param[in] ranges list of integers; interpreted as list of pairs of + inclusive lower and upper bound, respectively + + @pre The passed list @a ranges must have 2, 4, or 6 elements; that is, + 1, 2, or 3 pairs. This precondition is enforced by an assertion. + + @return true if and only if no range violation was detected + */ + bool next_byte_in_range(std::initializer_list ranges) + { + JSON_ASSERT(ranges.size() == 2 || ranges.size() == 4 || ranges.size() == 6); + add(current); + + for (auto range = ranges.begin(); range != ranges.end(); ++range) + { + get(); + if (JSON_HEDLEY_LIKELY(*range <= current && current <= *(++range))) + { + add(current); + } + else + { + error_message = "invalid string: ill-formed UTF-8 byte"; + return false; + } + } + + return true; + } + + /*! + @brief scan a string literal + + This function scans a string according to Sect. 7 of RFC 7159. While + scanning, bytes are escaped and copied into buffer token_buffer. Then the + function returns successfully, token_buffer is *not* null-terminated (as it + may contain \0 bytes), and token_buffer.size() is the number of bytes in the + string. + + @return token_type::value_string if string could be successfully scanned, + token_type::parse_error otherwise + + @note In case of errors, variable error_message contains a textual + description. + */ + token_type scan_string() + { + // reset token_buffer (ignore opening quote) + reset(); + + // we entered the function by reading an open quote + JSON_ASSERT(current == '\"'); + + while (true) + { + // get next character + switch (get()) + { + // end of file while parsing string + case std::char_traits::eof(): + { + error_message = "invalid string: missing closing quote"; + return token_type::parse_error; + } + + // closing quote + case '\"': + { + return token_type::value_string; + } + + // escapes + case '\\': + { + switch (get()) + { + // quotation mark + case '\"': + add('\"'); + break; + // reverse solidus + case '\\': + add('\\'); + break; + // solidus + case '/': + add('/'); + break; + // backspace + case 'b': + add('\b'); + break; + // form feed + case 'f': + add('\f'); + break; + // line feed + case 'n': + add('\n'); + break; + // carriage return + case 'r': + add('\r'); + break; + // tab + case 't': + add('\t'); + break; + + // unicode escapes + case 'u': + { + const int codepoint1 = get_codepoint(); + int codepoint = codepoint1; // start with codepoint1 + + if (JSON_HEDLEY_UNLIKELY(codepoint1 == -1)) + { + error_message = "invalid string: '\\u' must be followed by 4 hex digits"; + return token_type::parse_error; + } + + // check if code point is a high surrogate + if (0xD800 <= codepoint1 && codepoint1 <= 0xDBFF) + { + // expect next \uxxxx entry + if (JSON_HEDLEY_LIKELY(get() == '\\' && get() == 'u')) + { + const int codepoint2 = get_codepoint(); + + if (JSON_HEDLEY_UNLIKELY(codepoint2 == -1)) + { + error_message = "invalid string: '\\u' must be followed by 4 hex digits"; + return token_type::parse_error; + } + + // check if codepoint2 is a low surrogate + if (JSON_HEDLEY_LIKELY(0xDC00 <= codepoint2 && codepoint2 <= 0xDFFF)) + { + // overwrite codepoint + codepoint = static_cast( + // high surrogate occupies the most significant 22 bits + (static_cast(codepoint1) << 10u) + // low surrogate occupies the least significant 15 bits + + static_cast(codepoint2) + // there is still the 0xD800, 0xDC00 and 0x10000 noise + // in the result so we have to subtract with: + // (0xD800 << 10) + DC00 - 0x10000 = 0x35FDC00 + - 0x35FDC00u); + } + else + { + error_message = "invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF"; + return token_type::parse_error; + } + } + else + { + error_message = "invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF"; + return token_type::parse_error; + } + } + else + { + if (JSON_HEDLEY_UNLIKELY(0xDC00 <= codepoint1 && codepoint1 <= 0xDFFF)) + { + error_message = "invalid string: surrogate U+DC00..U+DFFF must follow U+D800..U+DBFF"; + return token_type::parse_error; + } + } + + // result of the above calculation yields a proper codepoint + JSON_ASSERT(0x00 <= codepoint && codepoint <= 0x10FFFF); + + // translate codepoint into bytes + if (codepoint < 0x80) + { + // 1-byte characters: 0xxxxxxx (ASCII) + add(static_cast(codepoint)); + } + else if (codepoint <= 0x7FF) + { + // 2-byte characters: 110xxxxx 10xxxxxx + add(static_cast(0xC0u | (static_cast(codepoint) >> 6u))); + add(static_cast(0x80u | (static_cast(codepoint) & 0x3Fu))); + } + else if (codepoint <= 0xFFFF) + { + // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx + add(static_cast(0xE0u | (static_cast(codepoint) >> 12u))); + add(static_cast(0x80u | ((static_cast(codepoint) >> 6u) & 0x3Fu))); + add(static_cast(0x80u | (static_cast(codepoint) & 0x3Fu))); + } + else + { + // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + add(static_cast(0xF0u | (static_cast(codepoint) >> 18u))); + add(static_cast(0x80u | ((static_cast(codepoint) >> 12u) & 0x3Fu))); + add(static_cast(0x80u | ((static_cast(codepoint) >> 6u) & 0x3Fu))); + add(static_cast(0x80u | (static_cast(codepoint) & 0x3Fu))); + } + + break; + } + + // other characters after escape + default: + error_message = "invalid string: forbidden character after backslash"; + return token_type::parse_error; + } + + break; + } + + // invalid control characters + case 0x00: + { + error_message = "invalid string: control character U+0000 (NUL) must be escaped to \\u0000"; + return token_type::parse_error; + } + + case 0x01: + { + error_message = "invalid string: control character U+0001 (SOH) must be escaped to \\u0001"; + return token_type::parse_error; + } + + case 0x02: + { + error_message = "invalid string: control character U+0002 (STX) must be escaped to \\u0002"; + return token_type::parse_error; + } + + case 0x03: + { + error_message = "invalid string: control character U+0003 (ETX) must be escaped to \\u0003"; + return token_type::parse_error; + } + + case 0x04: + { + error_message = "invalid string: control character U+0004 (EOT) must be escaped to \\u0004"; + return token_type::parse_error; + } + + case 0x05: + { + error_message = "invalid string: control character U+0005 (ENQ) must be escaped to \\u0005"; + return token_type::parse_error; + } + + case 0x06: + { + error_message = "invalid string: control character U+0006 (ACK) must be escaped to \\u0006"; + return token_type::parse_error; + } + + case 0x07: + { + error_message = "invalid string: control character U+0007 (BEL) must be escaped to \\u0007"; + return token_type::parse_error; + } + + case 0x08: + { + error_message = "invalid string: control character U+0008 (BS) must be escaped to \\u0008 or \\b"; + return token_type::parse_error; + } + + case 0x09: + { + error_message = "invalid string: control character U+0009 (HT) must be escaped to \\u0009 or \\t"; + return token_type::parse_error; + } + + case 0x0A: + { + error_message = "invalid string: control character U+000A (LF) must be escaped to \\u000A or \\n"; + return token_type::parse_error; + } + + case 0x0B: + { + error_message = "invalid string: control character U+000B (VT) must be escaped to \\u000B"; + return token_type::parse_error; + } + + case 0x0C: + { + error_message = "invalid string: control character U+000C (FF) must be escaped to \\u000C or \\f"; + return token_type::parse_error; + } + + case 0x0D: + { + error_message = "invalid string: control character U+000D (CR) must be escaped to \\u000D or \\r"; + return token_type::parse_error; + } + + case 0x0E: + { + error_message = "invalid string: control character U+000E (SO) must be escaped to \\u000E"; + return token_type::parse_error; + } + + case 0x0F: + { + error_message = "invalid string: control character U+000F (SI) must be escaped to \\u000F"; + return token_type::parse_error; + } + + case 0x10: + { + error_message = "invalid string: control character U+0010 (DLE) must be escaped to \\u0010"; + return token_type::parse_error; + } + + case 0x11: + { + error_message = "invalid string: control character U+0011 (DC1) must be escaped to \\u0011"; + return token_type::parse_error; + } + + case 0x12: + { + error_message = "invalid string: control character U+0012 (DC2) must be escaped to \\u0012"; + return token_type::parse_error; + } + + case 0x13: + { + error_message = "invalid string: control character U+0013 (DC3) must be escaped to \\u0013"; + return token_type::parse_error; + } + + case 0x14: + { + error_message = "invalid string: control character U+0014 (DC4) must be escaped to \\u0014"; + return token_type::parse_error; + } + + case 0x15: + { + error_message = "invalid string: control character U+0015 (NAK) must be escaped to \\u0015"; + return token_type::parse_error; + } + + case 0x16: + { + error_message = "invalid string: control character U+0016 (SYN) must be escaped to \\u0016"; + return token_type::parse_error; + } + + case 0x17: + { + error_message = "invalid string: control character U+0017 (ETB) must be escaped to \\u0017"; + return token_type::parse_error; + } + + case 0x18: + { + error_message = "invalid string: control character U+0018 (CAN) must be escaped to \\u0018"; + return token_type::parse_error; + } + + case 0x19: + { + error_message = "invalid string: control character U+0019 (EM) must be escaped to \\u0019"; + return token_type::parse_error; + } + + case 0x1A: + { + error_message = "invalid string: control character U+001A (SUB) must be escaped to \\u001A"; + return token_type::parse_error; + } + + case 0x1B: + { + error_message = "invalid string: control character U+001B (ESC) must be escaped to \\u001B"; + return token_type::parse_error; + } + + case 0x1C: + { + error_message = "invalid string: control character U+001C (FS) must be escaped to \\u001C"; + return token_type::parse_error; + } + + case 0x1D: + { + error_message = "invalid string: control character U+001D (GS) must be escaped to \\u001D"; + return token_type::parse_error; + } + + case 0x1E: + { + error_message = "invalid string: control character U+001E (RS) must be escaped to \\u001E"; + return token_type::parse_error; + } + + case 0x1F: + { + error_message = "invalid string: control character U+001F (US) must be escaped to \\u001F"; + return token_type::parse_error; + } + + // U+0020..U+007F (except U+0022 (quote) and U+005C (backspace)) + case 0x20: + case 0x21: + case 0x23: + case 0x24: + case 0x25: + case 0x26: + case 0x27: + case 0x28: + case 0x29: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: + case 0x30: + case 0x31: + case 0x32: + case 0x33: + case 0x34: + case 0x35: + case 0x36: + case 0x37: + case 0x38: + case 0x39: + case 0x3A: + case 0x3B: + case 0x3C: + case 0x3D: + case 0x3E: + case 0x3F: + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4A: + case 0x4B: + case 0x4C: + case 0x4D: + case 0x4E: + case 0x4F: + case 0x50: + case 0x51: + case 0x52: + case 0x53: + case 0x54: + case 0x55: + case 0x56: + case 0x57: + case 0x58: + case 0x59: + case 0x5A: + case 0x5B: + case 0x5D: + case 0x5E: + case 0x5F: + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + case 0x78: + case 0x79: + case 0x7A: + case 0x7B: + case 0x7C: + case 0x7D: + case 0x7E: + case 0x7F: + { + add(current); + break; + } + + // U+0080..U+07FF: bytes C2..DF 80..BF + case 0xC2: + case 0xC3: + case 0xC4: + case 0xC5: + case 0xC6: + case 0xC7: + case 0xC8: + case 0xC9: + case 0xCA: + case 0xCB: + case 0xCC: + case 0xCD: + case 0xCE: + case 0xCF: + case 0xD0: + case 0xD1: + case 0xD2: + case 0xD3: + case 0xD4: + case 0xD5: + case 0xD6: + case 0xD7: + case 0xD8: + case 0xD9: + case 0xDA: + case 0xDB: + case 0xDC: + case 0xDD: + case 0xDE: + case 0xDF: + { + if (JSON_HEDLEY_UNLIKELY(!next_byte_in_range({0x80, 0xBF}))) + { + return token_type::parse_error; + } + break; + } + + // U+0800..U+0FFF: bytes E0 A0..BF 80..BF + case 0xE0: + { + if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0xA0, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+1000..U+CFFF: bytes E1..EC 80..BF 80..BF + // U+E000..U+FFFF: bytes EE..EF 80..BF 80..BF + case 0xE1: + case 0xE2: + case 0xE3: + case 0xE4: + case 0xE5: + case 0xE6: + case 0xE7: + case 0xE8: + case 0xE9: + case 0xEA: + case 0xEB: + case 0xEC: + case 0xEE: + case 0xEF: + { + if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+D000..U+D7FF: bytes ED 80..9F 80..BF + case 0xED: + { + if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0x9F, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+10000..U+3FFFF F0 90..BF 80..BF 80..BF + case 0xF0: + { + if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x90, 0xBF, 0x80, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+40000..U+FFFFF F1..F3 80..BF 80..BF 80..BF + case 0xF1: + case 0xF2: + case 0xF3: + { + if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // U+100000..U+10FFFF F4 80..8F 80..BF 80..BF + case 0xF4: + { + if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0x8F, 0x80, 0xBF, 0x80, 0xBF})))) + { + return token_type::parse_error; + } + break; + } + + // remaining bytes (80..C1 and F5..FF) are ill-formed + default: + { + error_message = "invalid string: ill-formed UTF-8 byte"; + return token_type::parse_error; + } + } + } + } + + /*! + * @brief scan a comment + * @return whether comment could be scanned successfully + */ + bool scan_comment() + { + switch (get()) + { + // single-line comments skip input until a newline or EOF is read + case '/': + { + while (true) + { + switch (get()) + { + case '\n': + case '\r': + case std::char_traits::eof(): + case '\0': + return true; + + default: + break; + } + } + } + + // multi-line comments skip input until */ is read + case '*': + { + while (true) + { + switch (get()) + { + case std::char_traits::eof(): + case '\0': + { + error_message = "invalid comment; missing closing '*/'"; + return false; + } + + case '*': + { + switch (get()) + { + case '/': + return true; + + default: + { + unget(); + continue; + } + } + } + + default: + continue; + } + } + } + + // unexpected character after reading '/' + default: + { + error_message = "invalid comment; expecting '/' or '*' after '/'"; + return false; + } + } + } + + JSON_HEDLEY_NON_NULL(2) + static void strtof(float& f, const char* str, char** endptr) noexcept + { + f = std::strtof(str, endptr); + } + + JSON_HEDLEY_NON_NULL(2) + static void strtof(double& f, const char* str, char** endptr) noexcept + { + f = std::strtod(str, endptr); + } + + JSON_HEDLEY_NON_NULL(2) + static void strtof(long double& f, const char* str, char** endptr) noexcept + { + f = std::strtold(str, endptr); + } + + /*! + @brief scan a number literal + + This function scans a string according to Sect. 6 of RFC 7159. + + The function is realized with a deterministic finite state machine derived + from the grammar described in RFC 7159. Starting in state "init", the + input is read and used to determined the next state. Only state "done" + accepts the number. State "error" is a trap state to model errors. In the + table below, "anything" means any character but the ones listed before. + + state | 0 | 1-9 | e E | + | - | . | anything + ---------|----------|----------|----------|---------|---------|----------|----------- + init | zero | any1 | [error] | [error] | minus | [error] | [error] + minus | zero | any1 | [error] | [error] | [error] | [error] | [error] + zero | done | done | exponent | done | done | decimal1 | done + any1 | any1 | any1 | exponent | done | done | decimal1 | done + decimal1 | decimal2 | decimal2 | [error] | [error] | [error] | [error] | [error] + decimal2 | decimal2 | decimal2 | exponent | done | done | done | done + exponent | any2 | any2 | [error] | sign | sign | [error] | [error] + sign | any2 | any2 | [error] | [error] | [error] | [error] | [error] + any2 | any2 | any2 | done | done | done | done | done + + The state machine is realized with one label per state (prefixed with + "scan_number_") and `goto` statements between them. The state machine + contains cycles, but any cycle can be left when EOF is read. Therefore, + the function is guaranteed to terminate. + + During scanning, the read bytes are stored in token_buffer. This string is + then converted to a signed integer, an unsigned integer, or a + floating-point number. + + @return token_type::value_unsigned, token_type::value_integer, or + token_type::value_float if number could be successfully scanned, + token_type::parse_error otherwise + + @note The scanner is independent of the current locale. Internally, the + locale's decimal point is used instead of `.` to work with the + locale-dependent converters. + */ + token_type scan_number() // lgtm [cpp/use-of-goto] + { + // reset token_buffer to store the number's bytes + reset(); + + // the type of the parsed number; initially set to unsigned; will be + // changed if minus sign, decimal point or exponent is read + token_type number_type = token_type::value_unsigned; + + // state (init): we just found out we need to scan a number + switch (current) + { + case '-': + { + add(current); + goto scan_number_minus; + } + + case '0': + { + add(current); + goto scan_number_zero; + } + + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any1; + } + + // all other characters are rejected outside scan_number() + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + } + +scan_number_minus: + // state: we just parsed a leading minus sign + number_type = token_type::value_integer; + switch (get()) + { + case '0': + { + add(current); + goto scan_number_zero; + } + + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any1; + } + + default: + { + error_message = "invalid number; expected digit after '-'"; + return token_type::parse_error; + } + } + +scan_number_zero: + // state: we just parse a zero (maybe with a leading minus sign) + switch (get()) + { + case '.': + { + add(decimal_point_char); + goto scan_number_decimal1; + } + + case 'e': + case 'E': + { + add(current); + goto scan_number_exponent; + } + + default: + goto scan_number_done; + } + +scan_number_any1: + // state: we just parsed a number 0-9 (maybe with a leading minus sign) + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any1; + } + + case '.': + { + add(decimal_point_char); + goto scan_number_decimal1; + } + + case 'e': + case 'E': + { + add(current); + goto scan_number_exponent; + } + + default: + goto scan_number_done; + } + +scan_number_decimal1: + // state: we just parsed a decimal point + number_type = token_type::value_float; + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_decimal2; + } + + default: + { + error_message = "invalid number; expected digit after '.'"; + return token_type::parse_error; + } + } + +scan_number_decimal2: + // we just parsed at least one number after a decimal point + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_decimal2; + } + + case 'e': + case 'E': + { + add(current); + goto scan_number_exponent; + } + + default: + goto scan_number_done; + } + +scan_number_exponent: + // we just parsed an exponent + number_type = token_type::value_float; + switch (get()) + { + case '+': + case '-': + { + add(current); + goto scan_number_sign; + } + + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any2; + } + + default: + { + error_message = + "invalid number; expected '+', '-', or digit after exponent"; + return token_type::parse_error; + } + } + +scan_number_sign: + // we just parsed an exponent sign + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any2; + } + + default: + { + error_message = "invalid number; expected digit after exponent sign"; + return token_type::parse_error; + } + } + +scan_number_any2: + // we just parsed a number after the exponent or exponent sign + switch (get()) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + { + add(current); + goto scan_number_any2; + } + + default: + goto scan_number_done; + } + +scan_number_done: + // unget the character after the number (we only read it to know that + // we are done scanning a number) + unget(); + + char* endptr = nullptr; + errno = 0; + + // try to parse integers first and fall back to floats + if (number_type == token_type::value_unsigned) + { + const auto x = std::strtoull(token_buffer.data(), &endptr, 10); + + // we checked the number format before + JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); + + if (errno == 0) + { + value_unsigned = static_cast(x); + if (value_unsigned == x) + { + return token_type::value_unsigned; + } + } + } + else if (number_type == token_type::value_integer) + { + const auto x = std::strtoll(token_buffer.data(), &endptr, 10); + + // we checked the number format before + JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); + + if (errno == 0) + { + value_integer = static_cast(x); + if (value_integer == x) + { + return token_type::value_integer; + } + } + } + + // this code is reached if we parse a floating-point number or if an + // integer conversion above failed + strtof(value_float, token_buffer.data(), &endptr); + + // we checked the number format before + JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); + + return token_type::value_float; + } + + /*! + @param[in] literal_text the literal text to expect + @param[in] length the length of the passed literal text + @param[in] return_type the token type to return on success + */ + JSON_HEDLEY_NON_NULL(2) + token_type scan_literal(const char_type* literal_text, const std::size_t length, + token_type return_type) + { + JSON_ASSERT(std::char_traits::to_char_type(current) == literal_text[0]); + for (std::size_t i = 1; i < length; ++i) + { + if (JSON_HEDLEY_UNLIKELY(std::char_traits::to_char_type(get()) != literal_text[i])) + { + error_message = "invalid literal"; + return token_type::parse_error; + } + } + return return_type; + } + + ///////////////////// + // input management + ///////////////////// + + /// reset token_buffer; current character is beginning of token + void reset() noexcept + { + token_buffer.clear(); + token_string.clear(); + token_string.push_back(std::char_traits::to_char_type(current)); + } + + /* + @brief get next character from the input + + This function provides the interface to the used input adapter. It does + not throw in case the input reached EOF, but returns a + `std::char_traits::eof()` in that case. Stores the scanned characters + for use in error messages. + + @return character read from the input + */ + char_int_type get() + { + ++position.chars_read_total; + ++position.chars_read_current_line; + + if (next_unget) + { + // just reset the next_unget variable and work with current + next_unget = false; + } + else + { + current = ia.get_character(); + } + + if (JSON_HEDLEY_LIKELY(current != std::char_traits::eof())) + { + token_string.push_back(std::char_traits::to_char_type(current)); + } + + if (current == '\n') + { + ++position.lines_read; + position.chars_read_current_line = 0; + } + + return current; + } + + /*! + @brief unget current character (read it again on next get) + + We implement unget by setting variable next_unget to true. The input is not + changed - we just simulate ungetting by modifying chars_read_total, + chars_read_current_line, and token_string. The next call to get() will + behave as if the unget character is read again. + */ + void unget() + { + next_unget = true; + + --position.chars_read_total; + + // in case we "unget" a newline, we have to also decrement the lines_read + if (position.chars_read_current_line == 0) + { + if (position.lines_read > 0) + { + --position.lines_read; + } + } + else + { + --position.chars_read_current_line; + } + + if (JSON_HEDLEY_LIKELY(current != std::char_traits::eof())) + { + JSON_ASSERT(!token_string.empty()); + token_string.pop_back(); + } + } + + /// add a character to token_buffer + void add(char_int_type c) + { + token_buffer.push_back(static_cast(c)); + } + + public: + ///////////////////// + // value getters + ///////////////////// + + /// return integer value + constexpr number_integer_t get_number_integer() const noexcept + { + return value_integer; + } + + /// return unsigned integer value + constexpr number_unsigned_t get_number_unsigned() const noexcept + { + return value_unsigned; + } + + /// return floating-point value + constexpr number_float_t get_number_float() const noexcept + { + return value_float; + } + + /// return current string value (implicitly resets the token; useful only once) + string_t& get_string() + { + return token_buffer; + } + + ///////////////////// + // diagnostics + ///////////////////// + + /// return position of last read token + constexpr position_t get_position() const noexcept + { + return position; + } + + /// return the last read token (for errors only). Will never contain EOF + /// (an arbitrary value that is not a valid char value, often -1), because + /// 255 may legitimately occur. May contain NUL, which should be escaped. + std::string get_token_string() const + { + // escape control characters + std::string result; + for (const auto c : token_string) + { + if (static_cast(c) <= '\x1F') + { + // escape control characters + std::array cs{{}}; + (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); + result += cs.data(); + } + else + { + // add character as is + result.push_back(static_cast(c)); + } + } + + return result; + } + + /// return syntax error message + JSON_HEDLEY_RETURNS_NON_NULL + constexpr const char* get_error_message() const noexcept + { + return error_message; + } + + ///////////////////// + // actual scanner + ///////////////////// + + /*! + @brief skip the UTF-8 byte order mark + @return true iff there is no BOM or the correct BOM has been skipped + */ + bool skip_bom() + { + if (get() == 0xEF) + { + // check if we completely parse the BOM + return get() == 0xBB && get() == 0xBF; + } + + // the first character is not the beginning of the BOM; unget it to + // process is later + unget(); + return true; + } + + void skip_whitespace() + { + do + { + get(); + } + while (current == ' ' || current == '\t' || current == '\n' || current == '\r'); + } + + token_type scan() + { + // initially, skip the BOM + if (position.chars_read_total == 0 && !skip_bom()) + { + error_message = "invalid BOM; must be 0xEF 0xBB 0xBF if given"; + return token_type::parse_error; + } + + // read next character and ignore whitespace + skip_whitespace(); + + // ignore comments + while (ignore_comments && current == '/') + { + if (!scan_comment()) + { + return token_type::parse_error; + } + + // skip following whitespace + skip_whitespace(); + } + + switch (current) + { + // structural characters + case '[': + return token_type::begin_array; + case ']': + return token_type::end_array; + case '{': + return token_type::begin_object; + case '}': + return token_type::end_object; + case ':': + return token_type::name_separator; + case ',': + return token_type::value_separator; + + // literals + case 't': + { + std::array true_literal = {{char_type('t'), char_type('r'), char_type('u'), char_type('e')}}; + return scan_literal(true_literal.data(), true_literal.size(), token_type::literal_true); + } + case 'f': + { + std::array false_literal = {{char_type('f'), char_type('a'), char_type('l'), char_type('s'), char_type('e')}}; + return scan_literal(false_literal.data(), false_literal.size(), token_type::literal_false); + } + case 'n': + { + std::array null_literal = {{char_type('n'), char_type('u'), char_type('l'), char_type('l')}}; + return scan_literal(null_literal.data(), null_literal.size(), token_type::literal_null); + } + + // string + case '\"': + return scan_string(); + + // number + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + return scan_number(); + + // end of input (the null byte is needed when parsing from + // string literals) + case '\0': + case std::char_traits::eof(): + return token_type::end_of_input; + + // error + default: + error_message = "invalid literal"; + return token_type::parse_error; + } + } + + private: + /// input adapter + InputAdapterType ia; + + /// whether comments should be ignored (true) or signaled as errors (false) + const bool ignore_comments = false; + + /// the current character + char_int_type current = std::char_traits::eof(); + + /// whether the next get() call should just return current + bool next_unget = false; + + /// the start position of the current token + position_t position {}; + + /// raw input token string (for error messages) + std::vector token_string {}; + + /// buffer for variable-length tokens (numbers, strings) + string_t token_buffer {}; + + /// a description of occurred lexer errors + const char* error_message = ""; + + // number values + number_integer_t value_integer = 0; + number_unsigned_t value_unsigned = 0; + number_float_t value_float = 0; + + /// the decimal point + const char_int_type decimal_point_char = '.'; +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + + +#include // size_t +#include // declval +#include // string + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +template +using null_function_t = decltype(std::declval().null()); + +template +using boolean_function_t = + decltype(std::declval().boolean(std::declval())); + +template +using number_integer_function_t = + decltype(std::declval().number_integer(std::declval())); + +template +using number_unsigned_function_t = + decltype(std::declval().number_unsigned(std::declval())); + +template +using number_float_function_t = decltype(std::declval().number_float( + std::declval(), std::declval())); + +template +using string_function_t = + decltype(std::declval().string(std::declval())); + +template +using binary_function_t = + decltype(std::declval().binary(std::declval())); + +template +using start_object_function_t = + decltype(std::declval().start_object(std::declval())); + +template +using key_function_t = + decltype(std::declval().key(std::declval())); + +template +using end_object_function_t = decltype(std::declval().end_object()); + +template +using start_array_function_t = + decltype(std::declval().start_array(std::declval())); + +template +using end_array_function_t = decltype(std::declval().end_array()); + +template +using parse_error_function_t = decltype(std::declval().parse_error( + std::declval(), std::declval(), + std::declval())); + +template +struct is_sax +{ + private: + static_assert(is_basic_json::value, + "BasicJsonType must be of type basic_json<...>"); + + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using exception_t = typename BasicJsonType::exception; + + public: + static constexpr bool value = + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value && + is_detected_exact::value; +}; + +template +struct is_sax_static_asserts +{ + private: + static_assert(is_basic_json::value, + "BasicJsonType must be of type basic_json<...>"); + + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using exception_t = typename BasicJsonType::exception; + + public: + static_assert(is_detected_exact::value, + "Missing/invalid function: bool null()"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool boolean(bool)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool boolean(bool)"); + static_assert( + is_detected_exact::value, + "Missing/invalid function: bool number_integer(number_integer_t)"); + static_assert( + is_detected_exact::value, + "Missing/invalid function: bool number_unsigned(number_unsigned_t)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool number_float(number_float_t, const string_t&)"); + static_assert( + is_detected_exact::value, + "Missing/invalid function: bool string(string_t&)"); + static_assert( + is_detected_exact::value, + "Missing/invalid function: bool binary(binary_t&)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool start_object(std::size_t)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool key(string_t&)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool end_object()"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool start_array(std::size_t)"); + static_assert(is_detected_exact::value, + "Missing/invalid function: bool end_array()"); + static_assert( + is_detected_exact::value, + "Missing/invalid function: bool parse_error(std::size_t, const " + "std::string&, const exception&)"); +}; +} // namespace detail +} // namespace nlohmann + +// #include + + +namespace nlohmann +{ +namespace detail +{ + +/// how to treat CBOR tags +enum class cbor_tag_handler_t +{ + error, ///< throw a parse_error exception in case of a tag + ignore ///< ignore tags +}; + +/*! +@brief determine system byte order + +@return true if and only if system's byte order is little endian + +@note from https://stackoverflow.com/a/1001328/266378 +*/ +static inline bool little_endianess(int num = 1) noexcept +{ + return *reinterpret_cast(&num) == 1; +} + + +/////////////////// +// binary reader // +/////////////////// + +/*! +@brief deserialization of CBOR, MessagePack, and UBJSON values +*/ +template> +class binary_reader +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using json_sax_t = SAX; + using char_type = typename InputAdapterType::char_type; + using char_int_type = typename std::char_traits::int_type; + + public: + /*! + @brief create a binary reader + + @param[in] adapter input adapter to read from + */ + explicit binary_reader(InputAdapterType&& adapter) : ia(std::move(adapter)) + { + (void)detail::is_sax_static_asserts {}; + } + + // make class move-only + binary_reader(const binary_reader&) = delete; + binary_reader(binary_reader&&) = default; + binary_reader& operator=(const binary_reader&) = delete; + binary_reader& operator=(binary_reader&&) = default; + ~binary_reader() = default; + + /*! + @param[in] format the binary format to parse + @param[in] sax_ a SAX event processor + @param[in] strict whether to expect the input to be consumed completed + @param[in] tag_handler how to treat CBOR tags + + @return + */ + JSON_HEDLEY_NON_NULL(3) + bool sax_parse(const input_format_t format, + json_sax_t* sax_, + const bool strict = true, + const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) + { + sax = sax_; + bool result = false; + + switch (format) + { + case input_format_t::bson: + result = parse_bson_internal(); + break; + + case input_format_t::cbor: + result = parse_cbor_internal(true, tag_handler); + break; + + case input_format_t::msgpack: + result = parse_msgpack_internal(); + break; + + case input_format_t::ubjson: + result = parse_ubjson_internal(); + break; + + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + } + + // strict mode: next byte must be EOF + if (result && strict) + { + if (format == input_format_t::ubjson) + { + get_ignore_noop(); + } + else + { + get(); + } + + if (JSON_HEDLEY_UNLIKELY(current != std::char_traits::eof())) + { + return sax->parse_error(chars_read, get_token_string(), + parse_error::create(110, chars_read, exception_message(format, "expected end of input; last byte: 0x" + get_token_string(), "value"), BasicJsonType())); + } + } + + return result; + } + + private: + ////////// + // BSON // + ////////// + + /*! + @brief Reads in a BSON-object and passes it to the SAX-parser. + @return whether a valid BSON-value was passed to the SAX parser + */ + bool parse_bson_internal() + { + std::int32_t document_size{}; + get_number(input_format_t::bson, document_size); + + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1)))) + { + return false; + } + + if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_list(/*is_array*/false))) + { + return false; + } + + return sax->end_object(); + } + + /*! + @brief Parses a C-style string from the BSON input. + @param[in, out] result A reference to the string variable where the read + string is to be stored. + @return `true` if the \x00-byte indicating the end of the string was + encountered before the EOF; false` indicates an unexpected EOF. + */ + bool get_bson_cstr(string_t& result) + { + auto out = std::back_inserter(result); + while (true) + { + get(); + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::bson, "cstring"))) + { + return false; + } + if (current == 0x00) + { + return true; + } + *out++ = static_cast(current); + } + } + + /*! + @brief Parses a zero-terminated string of length @a len from the BSON + input. + @param[in] len The length (including the zero-byte at the end) of the + string to be read. + @param[in, out] result A reference to the string variable where the read + string is to be stored. + @tparam NumberType The type of the length @a len + @pre len >= 1 + @return `true` if the string was successfully parsed + */ + template + bool get_bson_string(const NumberType len, string_t& result) + { + if (JSON_HEDLEY_UNLIKELY(len < 1)) + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "string length must be at least 1, is " + std::to_string(len), "string"), BasicJsonType())); + } + + return get_string(input_format_t::bson, len - static_cast(1), result) && get() != std::char_traits::eof(); + } + + /*! + @brief Parses a byte array input of length @a len from the BSON input. + @param[in] len The length of the byte array to be read. + @param[in, out] result A reference to the binary variable where the read + array is to be stored. + @tparam NumberType The type of the length @a len + @pre len >= 0 + @return `true` if the byte array was successfully parsed + */ + template + bool get_bson_binary(const NumberType len, binary_t& result) + { + if (JSON_HEDLEY_UNLIKELY(len < 0)) + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "byte array length cannot be negative, is " + std::to_string(len), "binary"), BasicJsonType())); + } + + // All BSON binary values have a subtype + std::uint8_t subtype{}; + get_number(input_format_t::bson, subtype); + result.set_subtype(subtype); + + return get_binary(input_format_t::bson, len, result); + } + + /*! + @brief Read a BSON document element of the given @a element_type. + @param[in] element_type The BSON element type, c.f. http://bsonspec.org/spec.html + @param[in] element_type_parse_position The position in the input stream, + where the `element_type` was read. + @warning Not all BSON element types are supported yet. An unsupported + @a element_type will give rise to a parse_error.114: + Unsupported BSON record type 0x... + @return whether a valid BSON-object/array was passed to the SAX parser + */ + bool parse_bson_element_internal(const char_int_type element_type, + const std::size_t element_type_parse_position) + { + switch (element_type) + { + case 0x01: // double + { + double number{}; + return get_number(input_format_t::bson, number) && sax->number_float(static_cast(number), ""); + } + + case 0x02: // string + { + std::int32_t len{}; + string_t value; + return get_number(input_format_t::bson, len) && get_bson_string(len, value) && sax->string(value); + } + + case 0x03: // object + { + return parse_bson_internal(); + } + + case 0x04: // array + { + return parse_bson_array(); + } + + case 0x05: // binary + { + std::int32_t len{}; + binary_t value; + return get_number(input_format_t::bson, len) && get_bson_binary(len, value) && sax->binary(value); + } + + case 0x08: // boolean + { + return sax->boolean(get() != 0); + } + + case 0x0A: // null + { + return sax->null(); + } + + case 0x10: // int32 + { + std::int32_t value{}; + return get_number(input_format_t::bson, value) && sax->number_integer(value); + } + + case 0x12: // int64 + { + std::int64_t value{}; + return get_number(input_format_t::bson, value) && sax->number_integer(value); + } + + default: // anything else not supported (yet) + { + std::array cr{{}}; + (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(element_type)); + return sax->parse_error(element_type_parse_position, std::string(cr.data()), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr.data()), BasicJsonType())); + } + } + } + + /*! + @brief Read a BSON element list (as specified in the BSON-spec) + + The same binary layout is used for objects and arrays, hence it must be + indicated with the argument @a is_array which one is expected + (true --> array, false --> object). + + @param[in] is_array Determines if the element list being read is to be + treated as an object (@a is_array == false), or as an + array (@a is_array == true). + @return whether a valid BSON-object/array was passed to the SAX parser + */ + bool parse_bson_element_list(const bool is_array) + { + string_t key; + + while (auto element_type = get()) + { + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::bson, "element list"))) + { + return false; + } + + const std::size_t element_type_parse_position = chars_read; + if (JSON_HEDLEY_UNLIKELY(!get_bson_cstr(key))) + { + return false; + } + + if (!is_array && !sax->key(key)) + { + return false; + } + + if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_internal(element_type, element_type_parse_position))) + { + return false; + } + + // get_bson_cstr only appends + key.clear(); + } + + return true; + } + + /*! + @brief Reads an array from the BSON input and passes it to the SAX-parser. + @return whether a valid BSON-array was passed to the SAX parser + */ + bool parse_bson_array() + { + std::int32_t document_size{}; + get_number(input_format_t::bson, document_size); + + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1)))) + { + return false; + } + + if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_list(/*is_array*/true))) + { + return false; + } + + return sax->end_array(); + } + + ////////// + // CBOR // + ////////// + + /*! + @param[in] get_char whether a new character should be retrieved from the + input (true) or whether the last read character should + be considered instead (false) + @param[in] tag_handler how CBOR tags should be treated + + @return whether a valid CBOR value was passed to the SAX parser + */ + bool parse_cbor_internal(const bool get_char, + const cbor_tag_handler_t tag_handler) + { + switch (get_char ? get() : current) + { + // EOF + case std::char_traits::eof(): + return unexpect_eof(input_format_t::cbor, "value"); + + // Integer 0x00..0x17 (0..23) + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + return sax->number_unsigned(static_cast(current)); + + case 0x18: // Unsigned integer (one-byte uint8_t follows) + { + std::uint8_t number{}; + return get_number(input_format_t::cbor, number) && sax->number_unsigned(number); + } + + case 0x19: // Unsigned integer (two-byte uint16_t follows) + { + std::uint16_t number{}; + return get_number(input_format_t::cbor, number) && sax->number_unsigned(number); + } + + case 0x1A: // Unsigned integer (four-byte uint32_t follows) + { + std::uint32_t number{}; + return get_number(input_format_t::cbor, number) && sax->number_unsigned(number); + } + + case 0x1B: // Unsigned integer (eight-byte uint64_t follows) + { + std::uint64_t number{}; + return get_number(input_format_t::cbor, number) && sax->number_unsigned(number); + } + + // Negative integer -1-0x00..-1-0x17 (-1..-24) + case 0x20: + case 0x21: + case 0x22: + case 0x23: + case 0x24: + case 0x25: + case 0x26: + case 0x27: + case 0x28: + case 0x29: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: + case 0x30: + case 0x31: + case 0x32: + case 0x33: + case 0x34: + case 0x35: + case 0x36: + case 0x37: + return sax->number_integer(static_cast(0x20 - 1 - current)); + + case 0x38: // Negative integer (one-byte uint8_t follows) + { + std::uint8_t number{}; + return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast(-1) - number); + } + + case 0x39: // Negative integer -1-n (two-byte uint16_t follows) + { + std::uint16_t number{}; + return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast(-1) - number); + } + + case 0x3A: // Negative integer -1-n (four-byte uint32_t follows) + { + std::uint32_t number{}; + return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast(-1) - number); + } + + case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows) + { + std::uint64_t number{}; + return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast(-1) + - static_cast(number)); + } + + // Binary data (0x00..0x17 bytes follow) + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4A: + case 0x4B: + case 0x4C: + case 0x4D: + case 0x4E: + case 0x4F: + case 0x50: + case 0x51: + case 0x52: + case 0x53: + case 0x54: + case 0x55: + case 0x56: + case 0x57: + case 0x58: // Binary data (one-byte uint8_t for n follows) + case 0x59: // Binary data (two-byte uint16_t for n follow) + case 0x5A: // Binary data (four-byte uint32_t for n follow) + case 0x5B: // Binary data (eight-byte uint64_t for n follow) + case 0x5F: // Binary data (indefinite length) + { + binary_t b; + return get_cbor_binary(b) && sax->binary(b); + } + + // UTF-8 string (0x00..0x17 bytes follow) + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + case 0x78: // UTF-8 string (one-byte uint8_t for n follows) + case 0x79: // UTF-8 string (two-byte uint16_t for n follow) + case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) + case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) + case 0x7F: // UTF-8 string (indefinite length) + { + string_t s; + return get_cbor_string(s) && sax->string(s); + } + + // array (0x00..0x17 data items follow) + case 0x80: + case 0x81: + case 0x82: + case 0x83: + case 0x84: + case 0x85: + case 0x86: + case 0x87: + case 0x88: + case 0x89: + case 0x8A: + case 0x8B: + case 0x8C: + case 0x8D: + case 0x8E: + case 0x8F: + case 0x90: + case 0x91: + case 0x92: + case 0x93: + case 0x94: + case 0x95: + case 0x96: + case 0x97: + return get_cbor_array(static_cast(static_cast(current) & 0x1Fu), tag_handler); + + case 0x98: // array (one-byte uint8_t for n follows) + { + std::uint8_t len{}; + return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast(len), tag_handler); + } + + case 0x99: // array (two-byte uint16_t for n follow) + { + std::uint16_t len{}; + return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast(len), tag_handler); + } + + case 0x9A: // array (four-byte uint32_t for n follow) + { + std::uint32_t len{}; + return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast(len), tag_handler); + } + + case 0x9B: // array (eight-byte uint64_t for n follow) + { + std::uint64_t len{}; + return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast(len), tag_handler); + } + + case 0x9F: // array (indefinite length) + return get_cbor_array(std::size_t(-1), tag_handler); + + // map (0x00..0x17 pairs of data items follow) + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + return get_cbor_object(static_cast(static_cast(current) & 0x1Fu), tag_handler); + + case 0xB8: // map (one-byte uint8_t for n follows) + { + std::uint8_t len{}; + return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast(len), tag_handler); + } + + case 0xB9: // map (two-byte uint16_t for n follow) + { + std::uint16_t len{}; + return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast(len), tag_handler); + } + + case 0xBA: // map (four-byte uint32_t for n follow) + { + std::uint32_t len{}; + return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast(len), tag_handler); + } + + case 0xBB: // map (eight-byte uint64_t for n follow) + { + std::uint64_t len{}; + return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast(len), tag_handler); + } + + case 0xBF: // map (indefinite length) + return get_cbor_object(std::size_t(-1), tag_handler); + + case 0xC6: // tagged item + case 0xC7: + case 0xC8: + case 0xC9: + case 0xCA: + case 0xCB: + case 0xCC: + case 0xCD: + case 0xCE: + case 0xCF: + case 0xD0: + case 0xD1: + case 0xD2: + case 0xD3: + case 0xD4: + case 0xD8: // tagged item (1 bytes follow) + case 0xD9: // tagged item (2 bytes follow) + case 0xDA: // tagged item (4 bytes follow) + case 0xDB: // tagged item (8 bytes follow) + { + switch (tag_handler) + { + case cbor_tag_handler_t::error: + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value"), BasicJsonType())); + } + + case cbor_tag_handler_t::ignore: + { + switch (current) + { + case 0xD8: + { + std::uint8_t len{}; + get_number(input_format_t::cbor, len); + break; + } + case 0xD9: + { + std::uint16_t len{}; + get_number(input_format_t::cbor, len); + break; + } + case 0xDA: + { + std::uint32_t len{}; + get_number(input_format_t::cbor, len); + break; + } + case 0xDB: + { + std::uint64_t len{}; + get_number(input_format_t::cbor, len); + break; + } + default: + break; + } + return parse_cbor_internal(true, tag_handler); + } + + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + return false; // LCOV_EXCL_LINE + } + } + + case 0xF4: // false + return sax->boolean(false); + + case 0xF5: // true + return sax->boolean(true); + + case 0xF6: // null + return sax->null(); + + case 0xF9: // Half-Precision Float (two-byte IEEE 754) + { + const auto byte1_raw = get(); + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "number"))) + { + return false; + } + const auto byte2_raw = get(); + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "number"))) + { + return false; + } + + const auto byte1 = static_cast(byte1_raw); + const auto byte2 = static_cast(byte2_raw); + + // code from RFC 7049, Appendix D, Figure 3: + // As half-precision floating-point numbers were only added + // to IEEE 754 in 2008, today's programming platforms often + // still only have limited support for them. It is very + // easy to include at least decoding support for them even + // without such support. An example of a small decoder for + // half-precision floating-point numbers in the C language + // is shown in Fig. 3. + const auto half = static_cast((byte1 << 8u) + byte2); + const double val = [&half] + { + const int exp = (half >> 10u) & 0x1Fu; + const unsigned int mant = half & 0x3FFu; + JSON_ASSERT(0 <= exp&& exp <= 32); + JSON_ASSERT(mant <= 1024); + switch (exp) + { + case 0: + return std::ldexp(mant, -24); + case 31: + return (mant == 0) + ? std::numeric_limits::infinity() + : std::numeric_limits::quiet_NaN(); + default: + return std::ldexp(mant + 1024, exp - 25); + } + }(); + return sax->number_float((half & 0x8000u) != 0 + ? static_cast(-val) + : static_cast(val), ""); + } + + case 0xFA: // Single-Precision Float (four-byte IEEE 754) + { + float number{}; + return get_number(input_format_t::cbor, number) && sax->number_float(static_cast(number), ""); + } + + case 0xFB: // Double-Precision Float (eight-byte IEEE 754) + { + double number{}; + return get_number(input_format_t::cbor, number) && sax->number_float(static_cast(number), ""); + } + + default: // anything else (0xFF is handled inside the other types) + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value"), BasicJsonType())); + } + } + } + + /*! + @brief reads a CBOR string + + This function first reads starting bytes to determine the expected + string length and then copies this number of bytes into a string. + Additionally, CBOR's strings with indefinite lengths are supported. + + @param[out] result created string + + @return whether string creation completed + */ + bool get_cbor_string(string_t& result) + { + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "string"))) + { + return false; + } + + switch (current) + { + // UTF-8 string (0x00..0x17 bytes follow) + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + { + return get_string(input_format_t::cbor, static_cast(current) & 0x1Fu, result); + } + + case 0x78: // UTF-8 string (one-byte uint8_t for n follows) + { + std::uint8_t len{}; + return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result); + } + + case 0x79: // UTF-8 string (two-byte uint16_t for n follow) + { + std::uint16_t len{}; + return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result); + } + + case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) + { + std::uint32_t len{}; + return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result); + } + + case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) + { + std::uint64_t len{}; + return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result); + } + + case 0x7F: // UTF-8 string (indefinite length) + { + while (get() != 0xFF) + { + string_t chunk; + if (!get_cbor_string(chunk)) + { + return false; + } + result.append(chunk); + } + return true; + } + + default: + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x60-0x7B) or indefinite string type (0x7F); last byte: 0x" + last_token, "string"), BasicJsonType())); + } + } + } + + /*! + @brief reads a CBOR byte array + + This function first reads starting bytes to determine the expected + byte array length and then copies this number of bytes into the byte array. + Additionally, CBOR's byte arrays with indefinite lengths are supported. + + @param[out] result created byte array + + @return whether byte array creation completed + */ + bool get_cbor_binary(binary_t& result) + { + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "binary"))) + { + return false; + } + + switch (current) + { + // Binary data (0x00..0x17 bytes follow) + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4A: + case 0x4B: + case 0x4C: + case 0x4D: + case 0x4E: + case 0x4F: + case 0x50: + case 0x51: + case 0x52: + case 0x53: + case 0x54: + case 0x55: + case 0x56: + case 0x57: + { + return get_binary(input_format_t::cbor, static_cast(current) & 0x1Fu, result); + } + + case 0x58: // Binary data (one-byte uint8_t for n follows) + { + std::uint8_t len{}; + return get_number(input_format_t::cbor, len) && + get_binary(input_format_t::cbor, len, result); + } + + case 0x59: // Binary data (two-byte uint16_t for n follow) + { + std::uint16_t len{}; + return get_number(input_format_t::cbor, len) && + get_binary(input_format_t::cbor, len, result); + } + + case 0x5A: // Binary data (four-byte uint32_t for n follow) + { + std::uint32_t len{}; + return get_number(input_format_t::cbor, len) && + get_binary(input_format_t::cbor, len, result); + } + + case 0x5B: // Binary data (eight-byte uint64_t for n follow) + { + std::uint64_t len{}; + return get_number(input_format_t::cbor, len) && + get_binary(input_format_t::cbor, len, result); + } + + case 0x5F: // Binary data (indefinite length) + { + while (get() != 0xFF) + { + binary_t chunk; + if (!get_cbor_binary(chunk)) + { + return false; + } + result.insert(result.end(), chunk.begin(), chunk.end()); + } + return true; + } + + default: + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x40-0x5B) or indefinite binary array type (0x5F); last byte: 0x" + last_token, "binary"), BasicJsonType())); + } + } + } + + /*! + @param[in] len the length of the array or std::size_t(-1) for an + array of indefinite size + @param[in] tag_handler how CBOR tags should be treated + @return whether array creation completed + */ + bool get_cbor_array(const std::size_t len, + const cbor_tag_handler_t tag_handler) + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(len))) + { + return false; + } + + if (len != std::size_t(-1)) + { + for (std::size_t i = 0; i < len; ++i) + { + if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler))) + { + return false; + } + } + } + else + { + while (get() != 0xFF) + { + if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(false, tag_handler))) + { + return false; + } + } + } + + return sax->end_array(); + } + + /*! + @param[in] len the length of the object or std::size_t(-1) for an + object of indefinite size + @param[in] tag_handler how CBOR tags should be treated + @return whether object creation completed + */ + bool get_cbor_object(const std::size_t len, + const cbor_tag_handler_t tag_handler) + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(len))) + { + return false; + } + + string_t key; + if (len != std::size_t(-1)) + { + for (std::size_t i = 0; i < len; ++i) + { + get(); + if (JSON_HEDLEY_UNLIKELY(!get_cbor_string(key) || !sax->key(key))) + { + return false; + } + + if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler))) + { + return false; + } + key.clear(); + } + } + else + { + while (get() != 0xFF) + { + if (JSON_HEDLEY_UNLIKELY(!get_cbor_string(key) || !sax->key(key))) + { + return false; + } + + if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler))) + { + return false; + } + key.clear(); + } + } + + return sax->end_object(); + } + + ///////////// + // MsgPack // + ///////////// + + /*! + @return whether a valid MessagePack value was passed to the SAX parser + */ + bool parse_msgpack_internal() + { + switch (get()) + { + // EOF + case std::char_traits::eof(): + return unexpect_eof(input_format_t::msgpack, "value"); + + // positive fixint + case 0x00: + case 0x01: + case 0x02: + case 0x03: + case 0x04: + case 0x05: + case 0x06: + case 0x07: + case 0x08: + case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: + case 0x0D: + case 0x0E: + case 0x0F: + case 0x10: + case 0x11: + case 0x12: + case 0x13: + case 0x14: + case 0x15: + case 0x16: + case 0x17: + case 0x18: + case 0x19: + case 0x1A: + case 0x1B: + case 0x1C: + case 0x1D: + case 0x1E: + case 0x1F: + case 0x20: + case 0x21: + case 0x22: + case 0x23: + case 0x24: + case 0x25: + case 0x26: + case 0x27: + case 0x28: + case 0x29: + case 0x2A: + case 0x2B: + case 0x2C: + case 0x2D: + case 0x2E: + case 0x2F: + case 0x30: + case 0x31: + case 0x32: + case 0x33: + case 0x34: + case 0x35: + case 0x36: + case 0x37: + case 0x38: + case 0x39: + case 0x3A: + case 0x3B: + case 0x3C: + case 0x3D: + case 0x3E: + case 0x3F: + case 0x40: + case 0x41: + case 0x42: + case 0x43: + case 0x44: + case 0x45: + case 0x46: + case 0x47: + case 0x48: + case 0x49: + case 0x4A: + case 0x4B: + case 0x4C: + case 0x4D: + case 0x4E: + case 0x4F: + case 0x50: + case 0x51: + case 0x52: + case 0x53: + case 0x54: + case 0x55: + case 0x56: + case 0x57: + case 0x58: + case 0x59: + case 0x5A: + case 0x5B: + case 0x5C: + case 0x5D: + case 0x5E: + case 0x5F: + case 0x60: + case 0x61: + case 0x62: + case 0x63: + case 0x64: + case 0x65: + case 0x66: + case 0x67: + case 0x68: + case 0x69: + case 0x6A: + case 0x6B: + case 0x6C: + case 0x6D: + case 0x6E: + case 0x6F: + case 0x70: + case 0x71: + case 0x72: + case 0x73: + case 0x74: + case 0x75: + case 0x76: + case 0x77: + case 0x78: + case 0x79: + case 0x7A: + case 0x7B: + case 0x7C: + case 0x7D: + case 0x7E: + case 0x7F: + return sax->number_unsigned(static_cast(current)); + + // fixmap + case 0x80: + case 0x81: + case 0x82: + case 0x83: + case 0x84: + case 0x85: + case 0x86: + case 0x87: + case 0x88: + case 0x89: + case 0x8A: + case 0x8B: + case 0x8C: + case 0x8D: + case 0x8E: + case 0x8F: + return get_msgpack_object(static_cast(static_cast(current) & 0x0Fu)); + + // fixarray + case 0x90: + case 0x91: + case 0x92: + case 0x93: + case 0x94: + case 0x95: + case 0x96: + case 0x97: + case 0x98: + case 0x99: + case 0x9A: + case 0x9B: + case 0x9C: + case 0x9D: + case 0x9E: + case 0x9F: + return get_msgpack_array(static_cast(static_cast(current) & 0x0Fu)); + + // fixstr + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + case 0xB8: + case 0xB9: + case 0xBA: + case 0xBB: + case 0xBC: + case 0xBD: + case 0xBE: + case 0xBF: + case 0xD9: // str 8 + case 0xDA: // str 16 + case 0xDB: // str 32 + { + string_t s; + return get_msgpack_string(s) && sax->string(s); + } + + case 0xC0: // nil + return sax->null(); + + case 0xC2: // false + return sax->boolean(false); + + case 0xC3: // true + return sax->boolean(true); + + case 0xC4: // bin 8 + case 0xC5: // bin 16 + case 0xC6: // bin 32 + case 0xC7: // ext 8 + case 0xC8: // ext 16 + case 0xC9: // ext 32 + case 0xD4: // fixext 1 + case 0xD5: // fixext 2 + case 0xD6: // fixext 4 + case 0xD7: // fixext 8 + case 0xD8: // fixext 16 + { + binary_t b; + return get_msgpack_binary(b) && sax->binary(b); + } + + case 0xCA: // float 32 + { + float number{}; + return get_number(input_format_t::msgpack, number) && sax->number_float(static_cast(number), ""); + } + + case 0xCB: // float 64 + { + double number{}; + return get_number(input_format_t::msgpack, number) && sax->number_float(static_cast(number), ""); + } + + case 0xCC: // uint 8 + { + std::uint8_t number{}; + return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number); + } + + case 0xCD: // uint 16 + { + std::uint16_t number{}; + return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number); + } + + case 0xCE: // uint 32 + { + std::uint32_t number{}; + return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number); + } + + case 0xCF: // uint 64 + { + std::uint64_t number{}; + return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number); + } + + case 0xD0: // int 8 + { + std::int8_t number{}; + return get_number(input_format_t::msgpack, number) && sax->number_integer(number); + } + + case 0xD1: // int 16 + { + std::int16_t number{}; + return get_number(input_format_t::msgpack, number) && sax->number_integer(number); + } + + case 0xD2: // int 32 + { + std::int32_t number{}; + return get_number(input_format_t::msgpack, number) && sax->number_integer(number); + } + + case 0xD3: // int 64 + { + std::int64_t number{}; + return get_number(input_format_t::msgpack, number) && sax->number_integer(number); + } + + case 0xDC: // array 16 + { + std::uint16_t len{}; + return get_number(input_format_t::msgpack, len) && get_msgpack_array(static_cast(len)); + } + + case 0xDD: // array 32 + { + std::uint32_t len{}; + return get_number(input_format_t::msgpack, len) && get_msgpack_array(static_cast(len)); + } + + case 0xDE: // map 16 + { + std::uint16_t len{}; + return get_number(input_format_t::msgpack, len) && get_msgpack_object(static_cast(len)); + } + + case 0xDF: // map 32 + { + std::uint32_t len{}; + return get_number(input_format_t::msgpack, len) && get_msgpack_object(static_cast(len)); + } + + // negative fixint + case 0xE0: + case 0xE1: + case 0xE2: + case 0xE3: + case 0xE4: + case 0xE5: + case 0xE6: + case 0xE7: + case 0xE8: + case 0xE9: + case 0xEA: + case 0xEB: + case 0xEC: + case 0xED: + case 0xEE: + case 0xEF: + case 0xF0: + case 0xF1: + case 0xF2: + case 0xF3: + case 0xF4: + case 0xF5: + case 0xF6: + case 0xF7: + case 0xF8: + case 0xF9: + case 0xFA: + case 0xFB: + case 0xFC: + case 0xFD: + case 0xFE: + case 0xFF: + return sax->number_integer(static_cast(current)); + + default: // anything else + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::msgpack, "invalid byte: 0x" + last_token, "value"), BasicJsonType())); + } + } + } + + /*! + @brief reads a MessagePack string + + This function first reads starting bytes to determine the expected + string length and then copies this number of bytes into a string. + + @param[out] result created string + + @return whether string creation completed + */ + bool get_msgpack_string(string_t& result) + { + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::msgpack, "string"))) + { + return false; + } + + switch (current) + { + // fixstr + case 0xA0: + case 0xA1: + case 0xA2: + case 0xA3: + case 0xA4: + case 0xA5: + case 0xA6: + case 0xA7: + case 0xA8: + case 0xA9: + case 0xAA: + case 0xAB: + case 0xAC: + case 0xAD: + case 0xAE: + case 0xAF: + case 0xB0: + case 0xB1: + case 0xB2: + case 0xB3: + case 0xB4: + case 0xB5: + case 0xB6: + case 0xB7: + case 0xB8: + case 0xB9: + case 0xBA: + case 0xBB: + case 0xBC: + case 0xBD: + case 0xBE: + case 0xBF: + { + return get_string(input_format_t::msgpack, static_cast(current) & 0x1Fu, result); + } + + case 0xD9: // str 8 + { + std::uint8_t len{}; + return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result); + } + + case 0xDA: // str 16 + { + std::uint16_t len{}; + return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result); + } + + case 0xDB: // str 32 + { + std::uint32_t len{}; + return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result); + } + + default: + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::msgpack, "expected length specification (0xA0-0xBF, 0xD9-0xDB); last byte: 0x" + last_token, "string"), BasicJsonType())); + } + } + } + + /*! + @brief reads a MessagePack byte array + + This function first reads starting bytes to determine the expected + byte array length and then copies this number of bytes into a byte array. + + @param[out] result created byte array + + @return whether byte array creation completed + */ + bool get_msgpack_binary(binary_t& result) + { + // helper function to set the subtype + auto assign_and_return_true = [&result](std::int8_t subtype) + { + result.set_subtype(static_cast(subtype)); + return true; + }; + + switch (current) + { + case 0xC4: // bin 8 + { + std::uint8_t len{}; + return get_number(input_format_t::msgpack, len) && + get_binary(input_format_t::msgpack, len, result); + } + + case 0xC5: // bin 16 + { + std::uint16_t len{}; + return get_number(input_format_t::msgpack, len) && + get_binary(input_format_t::msgpack, len, result); + } + + case 0xC6: // bin 32 + { + std::uint32_t len{}; + return get_number(input_format_t::msgpack, len) && + get_binary(input_format_t::msgpack, len, result); + } + + case 0xC7: // ext 8 + { + std::uint8_t len{}; + std::int8_t subtype{}; + return get_number(input_format_t::msgpack, len) && + get_number(input_format_t::msgpack, subtype) && + get_binary(input_format_t::msgpack, len, result) && + assign_and_return_true(subtype); + } + + case 0xC8: // ext 16 + { + std::uint16_t len{}; + std::int8_t subtype{}; + return get_number(input_format_t::msgpack, len) && + get_number(input_format_t::msgpack, subtype) && + get_binary(input_format_t::msgpack, len, result) && + assign_and_return_true(subtype); + } + + case 0xC9: // ext 32 + { + std::uint32_t len{}; + std::int8_t subtype{}; + return get_number(input_format_t::msgpack, len) && + get_number(input_format_t::msgpack, subtype) && + get_binary(input_format_t::msgpack, len, result) && + assign_and_return_true(subtype); + } + + case 0xD4: // fixext 1 + { + std::int8_t subtype{}; + return get_number(input_format_t::msgpack, subtype) && + get_binary(input_format_t::msgpack, 1, result) && + assign_and_return_true(subtype); + } + + case 0xD5: // fixext 2 + { + std::int8_t subtype{}; + return get_number(input_format_t::msgpack, subtype) && + get_binary(input_format_t::msgpack, 2, result) && + assign_and_return_true(subtype); + } + + case 0xD6: // fixext 4 + { + std::int8_t subtype{}; + return get_number(input_format_t::msgpack, subtype) && + get_binary(input_format_t::msgpack, 4, result) && + assign_and_return_true(subtype); + } + + case 0xD7: // fixext 8 + { + std::int8_t subtype{}; + return get_number(input_format_t::msgpack, subtype) && + get_binary(input_format_t::msgpack, 8, result) && + assign_and_return_true(subtype); + } + + case 0xD8: // fixext 16 + { + std::int8_t subtype{}; + return get_number(input_format_t::msgpack, subtype) && + get_binary(input_format_t::msgpack, 16, result) && + assign_and_return_true(subtype); + } + + default: // LCOV_EXCL_LINE + return false; // LCOV_EXCL_LINE + } + } + + /*! + @param[in] len the length of the array + @return whether array creation completed + */ + bool get_msgpack_array(const std::size_t len) + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(len))) + { + return false; + } + + for (std::size_t i = 0; i < len; ++i) + { + if (JSON_HEDLEY_UNLIKELY(!parse_msgpack_internal())) + { + return false; + } + } + + return sax->end_array(); + } + + /*! + @param[in] len the length of the object + @return whether object creation completed + */ + bool get_msgpack_object(const std::size_t len) + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(len))) + { + return false; + } + + string_t key; + for (std::size_t i = 0; i < len; ++i) + { + get(); + if (JSON_HEDLEY_UNLIKELY(!get_msgpack_string(key) || !sax->key(key))) + { + return false; + } + + if (JSON_HEDLEY_UNLIKELY(!parse_msgpack_internal())) + { + return false; + } + key.clear(); + } + + return sax->end_object(); + } + + //////////// + // UBJSON // + //////////// + + /*! + @param[in] get_char whether a new character should be retrieved from the + input (true, default) or whether the last read + character should be considered instead + + @return whether a valid UBJSON value was passed to the SAX parser + */ + bool parse_ubjson_internal(const bool get_char = true) + { + return get_ubjson_value(get_char ? get_ignore_noop() : current); + } + + /*! + @brief reads a UBJSON string + + This function is either called after reading the 'S' byte explicitly + indicating a string, or in case of an object key where the 'S' byte can be + left out. + + @param[out] result created string + @param[in] get_char whether a new character should be retrieved from the + input (true, default) or whether the last read + character should be considered instead + + @return whether string creation completed + */ + bool get_ubjson_string(string_t& result, const bool get_char = true) + { + if (get_char) + { + get(); // TODO(niels): may we ignore N here? + } + + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "value"))) + { + return false; + } + + switch (current) + { + case 'U': + { + std::uint8_t len{}; + return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); + } + + case 'i': + { + std::int8_t len{}; + return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); + } + + case 'I': + { + std::int16_t len{}; + return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); + } + + case 'l': + { + std::int32_t len{}; + return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); + } + + case 'L': + { + std::int64_t len{}; + return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); + } + + default: + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L); last byte: 0x" + last_token, "string"), BasicJsonType())); + } + } + + /*! + @param[out] result determined size + @return whether size determination completed + */ + bool get_ubjson_size_value(std::size_t& result) + { + switch (get_ignore_noop()) + { + case 'U': + { + std::uint8_t number{}; + if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + case 'i': + { + std::int8_t number{}; + if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + case 'I': + { + std::int16_t number{}; + if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + case 'l': + { + std::int32_t number{}; + if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + case 'L': + { + std::int64_t number{}; + if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) + { + return false; + } + result = static_cast(number); + return true; + } + + default: + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L) after '#'; last byte: 0x" + last_token, "size"), BasicJsonType())); + } + } + } + + /*! + @brief determine the type and size for a container + + In the optimized UBJSON format, a type and a size can be provided to allow + for a more compact representation. + + @param[out] result pair of the size and the type + + @return whether pair creation completed + */ + bool get_ubjson_size_type(std::pair& result) + { + result.first = string_t::npos; // size + result.second = 0; // type + + get_ignore_noop(); + + if (current == '$') + { + result.second = get(); // must not ignore 'N', because 'N' maybe the type + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "type"))) + { + return false; + } + + get_ignore_noop(); + if (JSON_HEDLEY_UNLIKELY(current != '#')) + { + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "value"))) + { + return false; + } + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "expected '#' after type information; last byte: 0x" + last_token, "size"), BasicJsonType())); + } + + return get_ubjson_size_value(result.first); + } + + if (current == '#') + { + return get_ubjson_size_value(result.first); + } + + return true; + } + + /*! + @param prefix the previously read or set type prefix + @return whether value creation completed + */ + bool get_ubjson_value(const char_int_type prefix) + { + switch (prefix) + { + case std::char_traits::eof(): // EOF + return unexpect_eof(input_format_t::ubjson, "value"); + + case 'T': // true + return sax->boolean(true); + case 'F': // false + return sax->boolean(false); + + case 'Z': // null + return sax->null(); + + case 'U': + { + std::uint8_t number{}; + return get_number(input_format_t::ubjson, number) && sax->number_unsigned(number); + } + + case 'i': + { + std::int8_t number{}; + return get_number(input_format_t::ubjson, number) && sax->number_integer(number); + } + + case 'I': + { + std::int16_t number{}; + return get_number(input_format_t::ubjson, number) && sax->number_integer(number); + } + + case 'l': + { + std::int32_t number{}; + return get_number(input_format_t::ubjson, number) && sax->number_integer(number); + } + + case 'L': + { + std::int64_t number{}; + return get_number(input_format_t::ubjson, number) && sax->number_integer(number); + } + + case 'd': + { + float number{}; + return get_number(input_format_t::ubjson, number) && sax->number_float(static_cast(number), ""); + } + + case 'D': + { + double number{}; + return get_number(input_format_t::ubjson, number) && sax->number_float(static_cast(number), ""); + } + + case 'H': + { + return get_ubjson_high_precision_number(); + } + + case 'C': // char + { + get(); + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "char"))) + { + return false; + } + if (JSON_HEDLEY_UNLIKELY(current > 127)) + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "byte after 'C' must be in range 0x00..0x7F; last byte: 0x" + last_token, "char"), BasicJsonType())); + } + string_t s(1, static_cast(current)); + return sax->string(s); + } + + case 'S': // string + { + string_t s; + return get_ubjson_string(s) && sax->string(s); + } + + case '[': // array + return get_ubjson_array(); + + case '{': // object + return get_ubjson_object(); + + default: // anything else + { + auto last_token = get_token_string(); + return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "invalid byte: 0x" + last_token, "value"), BasicJsonType())); + } + } + } + + /*! + @return whether array creation completed + */ + bool get_ubjson_array() + { + std::pair size_and_type; + if (JSON_HEDLEY_UNLIKELY(!get_ubjson_size_type(size_and_type))) + { + return false; + } + + if (size_and_type.first != string_t::npos) + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(size_and_type.first))) + { + return false; + } + + if (size_and_type.second != 0) + { + if (size_and_type.second != 'N') + { + for (std::size_t i = 0; i < size_and_type.first; ++i) + { + if (JSON_HEDLEY_UNLIKELY(!get_ubjson_value(size_and_type.second))) + { + return false; + } + } + } + } + else + { + for (std::size_t i = 0; i < size_and_type.first; ++i) + { + if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal())) + { + return false; + } + } + } + } + else + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1)))) + { + return false; + } + + while (current != ']') + { + if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal(false))) + { + return false; + } + get_ignore_noop(); + } + } + + return sax->end_array(); + } + + /*! + @return whether object creation completed + */ + bool get_ubjson_object() + { + std::pair size_and_type; + if (JSON_HEDLEY_UNLIKELY(!get_ubjson_size_type(size_and_type))) + { + return false; + } + + string_t key; + if (size_and_type.first != string_t::npos) + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(size_and_type.first))) + { + return false; + } + + if (size_and_type.second != 0) + { + for (std::size_t i = 0; i < size_and_type.first; ++i) + { + if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key) || !sax->key(key))) + { + return false; + } + if (JSON_HEDLEY_UNLIKELY(!get_ubjson_value(size_and_type.second))) + { + return false; + } + key.clear(); + } + } + else + { + for (std::size_t i = 0; i < size_and_type.first; ++i) + { + if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key) || !sax->key(key))) + { + return false; + } + if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal())) + { + return false; + } + key.clear(); + } + } + } + else + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1)))) + { + return false; + } + + while (current != '}') + { + if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key, false) || !sax->key(key))) + { + return false; + } + if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal())) + { + return false; + } + get_ignore_noop(); + key.clear(); + } + } + + return sax->end_object(); + } + + // Note, no reader for UBJSON binary types is implemented because they do + // not exist + + bool get_ubjson_high_precision_number() + { + // get size of following number string + std::size_t size{}; + auto res = get_ubjson_size_value(size); + if (JSON_HEDLEY_UNLIKELY(!res)) + { + return res; + } + + // get number string + std::vector number_vector; + for (std::size_t i = 0; i < size; ++i) + { + get(); + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "number"))) + { + return false; + } + number_vector.push_back(static_cast(current)); + } + + // parse number string + auto number_ia = detail::input_adapter(std::forward(number_vector)); + auto number_lexer = detail::lexer(std::move(number_ia), false); + const auto result_number = number_lexer.scan(); + const auto number_string = number_lexer.get_token_string(); + const auto result_remainder = number_lexer.scan(); + + using token_type = typename detail::lexer_base::token_type; + + if (JSON_HEDLEY_UNLIKELY(result_remainder != token_type::end_of_input)) + { + return sax->parse_error(chars_read, number_string, parse_error::create(115, chars_read, exception_message(input_format_t::ubjson, "invalid number text: " + number_lexer.get_token_string(), "high-precision number"), BasicJsonType())); + } + + switch (result_number) + { + case token_type::value_integer: + return sax->number_integer(number_lexer.get_number_integer()); + case token_type::value_unsigned: + return sax->number_unsigned(number_lexer.get_number_unsigned()); + case token_type::value_float: + return sax->number_float(number_lexer.get_number_float(), std::move(number_string)); + default: + return sax->parse_error(chars_read, number_string, parse_error::create(115, chars_read, exception_message(input_format_t::ubjson, "invalid number text: " + number_lexer.get_token_string(), "high-precision number"), BasicJsonType())); + } + } + + /////////////////////// + // Utility functions // + /////////////////////// + + /*! + @brief get next character from the input + + This function provides the interface to the used input adapter. It does + not throw in case the input reached EOF, but returns a -'ve valued + `std::char_traits::eof()` in that case. + + @return character read from the input + */ + char_int_type get() + { + ++chars_read; + return current = ia.get_character(); + } + + /*! + @return character read from the input after ignoring all 'N' entries + */ + char_int_type get_ignore_noop() + { + do + { + get(); + } + while (current == 'N'); + + return current; + } + + /* + @brief read a number from the input + + @tparam NumberType the type of the number + @param[in] format the current format (for diagnostics) + @param[out] result number of type @a NumberType + + @return whether conversion completed + + @note This function needs to respect the system's endianess, because + bytes in CBOR, MessagePack, and UBJSON are stored in network order + (big endian) and therefore need reordering on little endian systems. + */ + template + bool get_number(const input_format_t format, NumberType& result) + { + // step 1: read input into array with system's byte order + std::array vec; + for (std::size_t i = 0; i < sizeof(NumberType); ++i) + { + get(); + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "number"))) + { + return false; + } + + // reverse byte order prior to conversion if necessary + if (is_little_endian != InputIsLittleEndian) + { + vec[sizeof(NumberType) - i - 1] = static_cast(current); + } + else + { + vec[i] = static_cast(current); // LCOV_EXCL_LINE + } + } + + // step 2: convert array into number of type T and return + std::memcpy(&result, vec.data(), sizeof(NumberType)); + return true; + } + + /*! + @brief create a string by reading characters from the input + + @tparam NumberType the type of the number + @param[in] format the current format (for diagnostics) + @param[in] len number of characters to read + @param[out] result string created by reading @a len bytes + + @return whether string creation completed + + @note We can not reserve @a len bytes for the result, because @a len + may be too large. Usually, @ref unexpect_eof() detects the end of + the input before we run out of string memory. + */ + template + bool get_string(const input_format_t format, + const NumberType len, + string_t& result) + { + bool success = true; + for (NumberType i = 0; i < len; i++) + { + get(); + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "string"))) + { + success = false; + break; + } + result.push_back(static_cast(current)); + } + return success; + } + + /*! + @brief create a byte array by reading bytes from the input + + @tparam NumberType the type of the number + @param[in] format the current format (for diagnostics) + @param[in] len number of bytes to read + @param[out] result byte array created by reading @a len bytes + + @return whether byte array creation completed + + @note We can not reserve @a len bytes for the result, because @a len + may be too large. Usually, @ref unexpect_eof() detects the end of + the input before we run out of memory. + */ + template + bool get_binary(const input_format_t format, + const NumberType len, + binary_t& result) + { + bool success = true; + for (NumberType i = 0; i < len; i++) + { + get(); + if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "binary"))) + { + success = false; + break; + } + result.push_back(static_cast(current)); + } + return success; + } + + /*! + @param[in] format the current format (for diagnostics) + @param[in] context further context information (for diagnostics) + @return whether the last read character is not EOF + */ + JSON_HEDLEY_NON_NULL(3) + bool unexpect_eof(const input_format_t format, const char* context) const + { + if (JSON_HEDLEY_UNLIKELY(current == std::char_traits::eof())) + { + return sax->parse_error(chars_read, "", + parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context), BasicJsonType())); + } + return true; + } + + /*! + @return a string representation of the last read byte + */ + std::string get_token_string() const + { + std::array cr{{}}; + (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(current)); + return std::string{cr.data()}; + } + + /*! + @param[in] format the current format + @param[in] detail a detailed error message + @param[in] context further context information + @return a message string to use in the parse_error exceptions + */ + std::string exception_message(const input_format_t format, + const std::string& detail, + const std::string& context) const + { + std::string error_msg = "syntax error while parsing "; + + switch (format) + { + case input_format_t::cbor: + error_msg += "CBOR"; + break; + + case input_format_t::msgpack: + error_msg += "MessagePack"; + break; + + case input_format_t::ubjson: + error_msg += "UBJSON"; + break; + + case input_format_t::bson: + error_msg += "BSON"; + break; + + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + } + + return error_msg + " " + context + ": " + detail; + } + + private: + /// input adapter + InputAdapterType ia; + + /// the current character + char_int_type current = std::char_traits::eof(); + + /// the number of characters read + std::size_t chars_read = 0; + + /// whether we can assume little endianess + const bool is_little_endian = little_endianess(); + + /// the SAX parser + json_sax_t* sax = nullptr; +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + +// #include + + +#include // isfinite +#include // uint8_t +#include // function +#include // string +#include // move +#include // vector + +// #include + +// #include + +// #include + +// #include + +// #include + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +//////////// +// parser // +//////////// + +enum class parse_event_t : uint8_t +{ + /// the parser read `{` and started to process a JSON object + object_start, + /// the parser read `}` and finished processing a JSON object + object_end, + /// the parser read `[` and started to process a JSON array + array_start, + /// the parser read `]` and finished processing a JSON array + array_end, + /// the parser read a key of a value in an object + key, + /// the parser finished reading a JSON value + value +}; + +template +using parser_callback_t = + std::function; + +/*! +@brief syntax analysis + +This class implements a recursive descent parser. +*/ +template +class parser +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using lexer_t = lexer; + using token_type = typename lexer_t::token_type; + + public: + /// a parser reading from an input adapter + explicit parser(InputAdapterType&& adapter, + const parser_callback_t cb = nullptr, + const bool allow_exceptions_ = true, + const bool skip_comments = false) + : callback(cb) + , m_lexer(std::move(adapter), skip_comments) + , allow_exceptions(allow_exceptions_) + { + // read first token + get_token(); + } + + /*! + @brief public parser interface + + @param[in] strict whether to expect the last token to be EOF + @param[in,out] result parsed JSON value + + @throw parse_error.101 in case of an unexpected token + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + */ + void parse(const bool strict, BasicJsonType& result) + { + if (callback) + { + json_sax_dom_callback_parser sdp(result, callback, allow_exceptions); + sax_parse_internal(&sdp); + + // in strict mode, input must be completely read + if (strict && (get_token() != token_type::end_of_input)) + { + sdp.parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), + exception_message(token_type::end_of_input, "value"), BasicJsonType())); + } + + // in case of an error, return discarded value + if (sdp.is_errored()) + { + result = value_t::discarded; + return; + } + + // set top-level value to null if it was discarded by the callback + // function + if (result.is_discarded()) + { + result = nullptr; + } + } + else + { + json_sax_dom_parser sdp(result, allow_exceptions); + sax_parse_internal(&sdp); + + // in strict mode, input must be completely read + if (strict && (get_token() != token_type::end_of_input)) + { + sdp.parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), BasicJsonType())); + } + + // in case of an error, return discarded value + if (sdp.is_errored()) + { + result = value_t::discarded; + return; + } + } + + result.assert_invariant(); + } + + /*! + @brief public accept interface + + @param[in] strict whether to expect the last token to be EOF + @return whether the input is a proper JSON text + */ + bool accept(const bool strict = true) + { + json_sax_acceptor sax_acceptor; + return sax_parse(&sax_acceptor, strict); + } + + template + JSON_HEDLEY_NON_NULL(2) + bool sax_parse(SAX* sax, const bool strict = true) + { + (void)detail::is_sax_static_asserts {}; + const bool result = sax_parse_internal(sax); + + // strict mode: next byte must be EOF + if (result && strict && (get_token() != token_type::end_of_input)) + { + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), BasicJsonType())); + } + + return result; + } + + private: + template + JSON_HEDLEY_NON_NULL(2) + bool sax_parse_internal(SAX* sax) + { + // stack to remember the hierarchy of structured values we are parsing + // true = array; false = object + std::vector states; + // value to avoid a goto (see comment where set to true) + bool skip_to_state_evaluation = false; + + while (true) + { + if (!skip_to_state_evaluation) + { + // invariant: get_token() was called before each iteration + switch (last_token) + { + case token_type::begin_object: + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1)))) + { + return false; + } + + // closing } -> we are done + if (get_token() == token_type::end_object) + { + if (JSON_HEDLEY_UNLIKELY(!sax->end_object())) + { + return false; + } + break; + } + + // parse key + if (JSON_HEDLEY_UNLIKELY(last_token != token_type::value_string)) + { + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), BasicJsonType())); + } + if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string()))) + { + return false; + } + + // parse separator (:) + if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator)) + { + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), BasicJsonType())); + } + + // remember we are now inside an object + states.push_back(false); + + // parse values + get_token(); + continue; + } + + case token_type::begin_array: + { + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1)))) + { + return false; + } + + // closing ] -> we are done + if (get_token() == token_type::end_array) + { + if (JSON_HEDLEY_UNLIKELY(!sax->end_array())) + { + return false; + } + break; + } + + // remember we are now inside an array + states.push_back(true); + + // parse values (no need to call get_token) + continue; + } + + case token_type::value_float: + { + const auto res = m_lexer.get_number_float(); + + if (JSON_HEDLEY_UNLIKELY(!std::isfinite(res))) + { + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + out_of_range::create(406, "number overflow parsing '" + m_lexer.get_token_string() + "'", BasicJsonType())); + } + + if (JSON_HEDLEY_UNLIKELY(!sax->number_float(res, m_lexer.get_string()))) + { + return false; + } + + break; + } + + case token_type::literal_false: + { + if (JSON_HEDLEY_UNLIKELY(!sax->boolean(false))) + { + return false; + } + break; + } + + case token_type::literal_null: + { + if (JSON_HEDLEY_UNLIKELY(!sax->null())) + { + return false; + } + break; + } + + case token_type::literal_true: + { + if (JSON_HEDLEY_UNLIKELY(!sax->boolean(true))) + { + return false; + } + break; + } + + case token_type::value_integer: + { + if (JSON_HEDLEY_UNLIKELY(!sax->number_integer(m_lexer.get_number_integer()))) + { + return false; + } + break; + } + + case token_type::value_string: + { + if (JSON_HEDLEY_UNLIKELY(!sax->string(m_lexer.get_string()))) + { + return false; + } + break; + } + + case token_type::value_unsigned: + { + if (JSON_HEDLEY_UNLIKELY(!sax->number_unsigned(m_lexer.get_number_unsigned()))) + { + return false; + } + break; + } + + case token_type::parse_error: + { + // using "uninitialized" to avoid "expected" message + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::uninitialized, "value"), BasicJsonType())); + } + + default: // the last token was unexpected + { + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::literal_or_value, "value"), BasicJsonType())); + } + } + } + else + { + skip_to_state_evaluation = false; + } + + // we reached this line after we successfully parsed a value + if (states.empty()) + { + // empty stack: we reached the end of the hierarchy: done + return true; + } + + if (states.back()) // array + { + // comma -> next value + if (get_token() == token_type::value_separator) + { + // parse a new value + get_token(); + continue; + } + + // closing ] + if (JSON_HEDLEY_LIKELY(last_token == token_type::end_array)) + { + if (JSON_HEDLEY_UNLIKELY(!sax->end_array())) + { + return false; + } + + // We are done with this array. Before we can parse a + // new value, we need to evaluate the new state first. + // By setting skip_to_state_evaluation to false, we + // are effectively jumping to the beginning of this if. + JSON_ASSERT(!states.empty()); + states.pop_back(); + skip_to_state_evaluation = true; + continue; + } + + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_array, "array"), BasicJsonType())); + } + + // states.back() is false -> object + + // comma -> next value + if (get_token() == token_type::value_separator) + { + // parse key + if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::value_string)) + { + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), BasicJsonType())); + } + + if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string()))) + { + return false; + } + + // parse separator (:) + if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator)) + { + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), BasicJsonType())); + } + + // parse values + get_token(); + continue; + } + + // closing } + if (JSON_HEDLEY_LIKELY(last_token == token_type::end_object)) + { + if (JSON_HEDLEY_UNLIKELY(!sax->end_object())) + { + return false; + } + + // We are done with this object. Before we can parse a + // new value, we need to evaluate the new state first. + // By setting skip_to_state_evaluation to false, we + // are effectively jumping to the beginning of this if. + JSON_ASSERT(!states.empty()); + states.pop_back(); + skip_to_state_evaluation = true; + continue; + } + + return sax->parse_error(m_lexer.get_position(), + m_lexer.get_token_string(), + parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_object, "object"), BasicJsonType())); + } + } + + /// get next token from lexer + token_type get_token() + { + return last_token = m_lexer.scan(); + } + + std::string exception_message(const token_type expected, const std::string& context) + { + std::string error_msg = "syntax error "; + + if (!context.empty()) + { + error_msg += "while parsing " + context + " "; + } + + error_msg += "- "; + + if (last_token == token_type::parse_error) + { + error_msg += std::string(m_lexer.get_error_message()) + "; last read: '" + + m_lexer.get_token_string() + "'"; + } + else + { + error_msg += "unexpected " + std::string(lexer_t::token_type_name(last_token)); + } + + if (expected != token_type::uninitialized) + { + error_msg += "; expected " + std::string(lexer_t::token_type_name(expected)); + } + + return error_msg; + } + + private: + /// callback function + const parser_callback_t callback = nullptr; + /// the type of the last read token + token_type last_token = token_type::uninitialized; + /// the lexer + lexer_t m_lexer; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; +}; +} // namespace detail +} // namespace nlohmann + +// #include + + +// #include + + +#include // ptrdiff_t +#include // numeric_limits + +// #include + + +namespace nlohmann +{ +namespace detail +{ +/* +@brief an iterator for primitive JSON types + +This class models an iterator for primitive JSON types (boolean, number, +string). It's only purpose is to allow the iterator/const_iterator classes +to "iterate" over primitive values. Internally, the iterator is modeled by +a `difference_type` variable. Value begin_value (`0`) models the begin, +end_value (`1`) models past the end. +*/ +class primitive_iterator_t +{ + private: + using difference_type = std::ptrdiff_t; + static constexpr difference_type begin_value = 0; + static constexpr difference_type end_value = begin_value + 1; + + JSON_PRIVATE_UNLESS_TESTED: + /// iterator as signed integer type + difference_type m_it = (std::numeric_limits::min)(); + + public: + constexpr difference_type get_value() const noexcept + { + return m_it; + } + + /// set iterator to a defined beginning + void set_begin() noexcept + { + m_it = begin_value; + } + + /// set iterator to a defined past the end + void set_end() noexcept + { + m_it = end_value; + } + + /// return whether the iterator can be dereferenced + constexpr bool is_begin() const noexcept + { + return m_it == begin_value; + } + + /// return whether the iterator is at end + constexpr bool is_end() const noexcept + { + return m_it == end_value; + } + + friend constexpr bool operator==(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept + { + return lhs.m_it == rhs.m_it; + } + + friend constexpr bool operator<(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept + { + return lhs.m_it < rhs.m_it; + } + + primitive_iterator_t operator+(difference_type n) noexcept + { + auto result = *this; + result += n; + return result; + } + + friend constexpr difference_type operator-(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept + { + return lhs.m_it - rhs.m_it; + } + + primitive_iterator_t& operator++() noexcept + { + ++m_it; + return *this; + } + + primitive_iterator_t const operator++(int) noexcept + { + auto result = *this; + ++m_it; + return result; + } + + primitive_iterator_t& operator--() noexcept + { + --m_it; + return *this; + } + + primitive_iterator_t const operator--(int) noexcept + { + auto result = *this; + --m_it; + return result; + } + + primitive_iterator_t& operator+=(difference_type n) noexcept + { + m_it += n; + return *this; + } + + primitive_iterator_t& operator-=(difference_type n) noexcept + { + m_it -= n; + return *this; + } +}; +} // namespace detail +} // namespace nlohmann + + +namespace nlohmann +{ +namespace detail +{ +/*! +@brief an iterator value + +@note This structure could easily be a union, but MSVC currently does not allow +unions members with complex constructors, see https://github.com/nlohmann/json/pull/105. +*/ +template struct internal_iterator +{ + /// iterator for JSON objects + typename BasicJsonType::object_t::iterator object_iterator {}; + /// iterator for JSON arrays + typename BasicJsonType::array_t::iterator array_iterator {}; + /// generic iterator for all other types + primitive_iterator_t primitive_iterator {}; +}; +} // namespace detail +} // namespace nlohmann + +// #include + + +#include // iterator, random_access_iterator_tag, bidirectional_iterator_tag, advance, next +#include // conditional, is_const, remove_const + +// #include + +// #include + +// #include + +// #include + +// #include + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +// forward declare, to be able to friend it later on +template class iteration_proxy; +template class iteration_proxy_value; + +/*! +@brief a template for a bidirectional iterator for the @ref basic_json class +This class implements a both iterators (iterator and const_iterator) for the +@ref basic_json class. +@note An iterator is called *initialized* when a pointer to a JSON value has + been set (e.g., by a constructor or a copy assignment). If the iterator is + default-constructed, it is *uninitialized* and most methods are undefined. + **The library uses assertions to detect calls on uninitialized iterators.** +@requirement The class satisfies the following concept requirements: +- +[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator): + The iterator that can be moved can be moved in both directions (i.e. + incremented and decremented). +@since version 1.0.0, simplified in version 2.0.9, change to bidirectional + iterators in version 3.0.0 (see https://github.com/nlohmann/json/issues/593) +*/ +template +class iter_impl +{ + /// the iterator with BasicJsonType of different const-ness + using other_iter_impl = iter_impl::value, typename std::remove_const::type, const BasicJsonType>::type>; + /// allow basic_json to access private members + friend other_iter_impl; + friend BasicJsonType; + friend iteration_proxy; + friend iteration_proxy_value; + + using object_t = typename BasicJsonType::object_t; + using array_t = typename BasicJsonType::array_t; + // make sure BasicJsonType is basic_json or const basic_json + static_assert(is_basic_json::type>::value, + "iter_impl only accepts (const) basic_json"); + + public: + + /// The std::iterator class template (used as a base class to provide typedefs) is deprecated in C++17. + /// The C++ Standard has never required user-defined iterators to derive from std::iterator. + /// A user-defined iterator should provide publicly accessible typedefs named + /// iterator_category, value_type, difference_type, pointer, and reference. + /// Note that value_type is required to be non-const, even for constant iterators. + using iterator_category = std::bidirectional_iterator_tag; + + /// the type of the values when the iterator is dereferenced + using value_type = typename BasicJsonType::value_type; + /// a type to represent differences between iterators + using difference_type = typename BasicJsonType::difference_type; + /// defines a pointer to the type iterated over (value_type) + using pointer = typename std::conditional::value, + typename BasicJsonType::const_pointer, + typename BasicJsonType::pointer>::type; + /// defines a reference to the type iterated over (value_type) + using reference = + typename std::conditional::value, + typename BasicJsonType::const_reference, + typename BasicJsonType::reference>::type; + + /// default constructor + iter_impl() = default; + + /*! + @brief constructor for a given JSON instance + @param[in] object pointer to a JSON object for this iterator + @pre object != nullptr + @post The iterator is initialized; i.e. `m_object != nullptr`. + */ + explicit iter_impl(pointer object) noexcept : m_object(object) + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + m_it.object_iterator = typename object_t::iterator(); + break; + } + + case value_t::array: + { + m_it.array_iterator = typename array_t::iterator(); + break; + } + + default: + { + m_it.primitive_iterator = primitive_iterator_t(); + break; + } + } + } + + /*! + @note The conventional copy constructor and copy assignment are implicitly + defined. Combined with the following converting constructor and + assignment, they support: (1) copy from iterator to iterator, (2) + copy from const iterator to const iterator, and (3) conversion from + iterator to const iterator. However conversion from const iterator + to iterator is not defined. + */ + + /*! + @brief const copy constructor + @param[in] other const iterator to copy from + @note This copy constructor had to be defined explicitly to circumvent a bug + occurring on msvc v19.0 compiler (VS 2015) debug build. For more + information refer to: https://github.com/nlohmann/json/issues/1608 + */ + iter_impl(const iter_impl& other) noexcept + : m_object(other.m_object), m_it(other.m_it) + {} + + /*! + @brief converting assignment + @param[in] other const iterator to copy from + @return const/non-const iterator + @note It is not checked whether @a other is initialized. + */ + iter_impl& operator=(const iter_impl& other) noexcept + { + m_object = other.m_object; + m_it = other.m_it; + return *this; + } + + /*! + @brief converting constructor + @param[in] other non-const iterator to copy from + @note It is not checked whether @a other is initialized. + */ + iter_impl(const iter_impl::type>& other) noexcept + : m_object(other.m_object), m_it(other.m_it) + {} + + /*! + @brief converting assignment + @param[in] other non-const iterator to copy from + @return const/non-const iterator + @note It is not checked whether @a other is initialized. + */ + iter_impl& operator=(const iter_impl::type>& other) noexcept + { + m_object = other.m_object; + m_it = other.m_it; + return *this; + } + + JSON_PRIVATE_UNLESS_TESTED: + /*! + @brief set the iterator to the first value + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + void set_begin() noexcept + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + m_it.object_iterator = m_object->m_value.object->begin(); + break; + } + + case value_t::array: + { + m_it.array_iterator = m_object->m_value.array->begin(); + break; + } + + case value_t::null: + { + // set to end so begin()==end() is true: null is empty + m_it.primitive_iterator.set_end(); + break; + } + + default: + { + m_it.primitive_iterator.set_begin(); + break; + } + } + } + + /*! + @brief set the iterator past the last value + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + void set_end() noexcept + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + m_it.object_iterator = m_object->m_value.object->end(); + break; + } + + case value_t::array: + { + m_it.array_iterator = m_object->m_value.array->end(); + break; + } + + default: + { + m_it.primitive_iterator.set_end(); + break; + } + } + } + + public: + /*! + @brief return a reference to the value pointed to by the iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + reference operator*() const + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + JSON_ASSERT(m_it.object_iterator != m_object->m_value.object->end()); + return m_it.object_iterator->second; + } + + case value_t::array: + { + JSON_ASSERT(m_it.array_iterator != m_object->m_value.array->end()); + return *m_it.array_iterator; + } + + case value_t::null: + JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); + + default: + { + if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin())) + { + return *m_object; + } + + JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); + } + } + } + + /*! + @brief dereference the iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + pointer operator->() const + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + JSON_ASSERT(m_it.object_iterator != m_object->m_value.object->end()); + return &(m_it.object_iterator->second); + } + + case value_t::array: + { + JSON_ASSERT(m_it.array_iterator != m_object->m_value.array->end()); + return &*m_it.array_iterator; + } + + default: + { + if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin())) + { + return m_object; + } + + JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); + } + } + } + + /*! + @brief post-increment (it++) + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl const operator++(int) + { + auto result = *this; + ++(*this); + return result; + } + + /*! + @brief pre-increment (++it) + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl& operator++() + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + std::advance(m_it.object_iterator, 1); + break; + } + + case value_t::array: + { + std::advance(m_it.array_iterator, 1); + break; + } + + default: + { + ++m_it.primitive_iterator; + break; + } + } + + return *this; + } + + /*! + @brief post-decrement (it--) + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl const operator--(int) + { + auto result = *this; + --(*this); + return result; + } + + /*! + @brief pre-decrement (--it) + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl& operator--() + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + { + std::advance(m_it.object_iterator, -1); + break; + } + + case value_t::array: + { + std::advance(m_it.array_iterator, -1); + break; + } + + default: + { + --m_it.primitive_iterator; + break; + } + } + + return *this; + } + + /*! + @brief comparison: equal + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > + bool operator==(const IterImpl& other) const + { + // if objects are not the same, the comparison is undefined + if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object)) + { + JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", *m_object)); + } + + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + return (m_it.object_iterator == other.m_it.object_iterator); + + case value_t::array: + return (m_it.array_iterator == other.m_it.array_iterator); + + default: + return (m_it.primitive_iterator == other.m_it.primitive_iterator); + } + } + + /*! + @brief comparison: not equal + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > + bool operator!=(const IterImpl& other) const + { + return !operator==(other); + } + + /*! + @brief comparison: smaller + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator<(const iter_impl& other) const + { + // if objects are not the same, the comparison is undefined + if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object)) + { + JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", *m_object)); + } + + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + JSON_THROW(invalid_iterator::create(213, "cannot compare order of object iterators", *m_object)); + + case value_t::array: + return (m_it.array_iterator < other.m_it.array_iterator); + + default: + return (m_it.primitive_iterator < other.m_it.primitive_iterator); + } + } + + /*! + @brief comparison: less than or equal + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator<=(const iter_impl& other) const + { + return !other.operator < (*this); + } + + /*! + @brief comparison: greater than + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator>(const iter_impl& other) const + { + return !operator<=(other); + } + + /*! + @brief comparison: greater than or equal + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + bool operator>=(const iter_impl& other) const + { + return !operator<(other); + } + + /*! + @brief add to iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl& operator+=(difference_type i) + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", *m_object)); + + case value_t::array: + { + std::advance(m_it.array_iterator, i); + break; + } + + default: + { + m_it.primitive_iterator += i; + break; + } + } + + return *this; + } + + /*! + @brief subtract from iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl& operator-=(difference_type i) + { + return operator+=(-i); + } + + /*! + @brief add to iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl operator+(difference_type i) const + { + auto result = *this; + result += i; + return result; + } + + /*! + @brief addition of distance and iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + friend iter_impl operator+(difference_type i, const iter_impl& it) + { + auto result = it; + result += i; + return result; + } + + /*! + @brief subtract from iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + iter_impl operator-(difference_type i) const + { + auto result = *this; + result -= i; + return result; + } + + /*! + @brief return difference + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + difference_type operator-(const iter_impl& other) const + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", *m_object)); + + case value_t::array: + return m_it.array_iterator - other.m_it.array_iterator; + + default: + return m_it.primitive_iterator - other.m_it.primitive_iterator; + } + } + + /*! + @brief access to successor + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + reference operator[](difference_type n) const + { + JSON_ASSERT(m_object != nullptr); + + switch (m_object->m_type) + { + case value_t::object: + JSON_THROW(invalid_iterator::create(208, "cannot use operator[] for object iterators", *m_object)); + + case value_t::array: + return *std::next(m_it.array_iterator, n); + + case value_t::null: + JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); + + default: + { + if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.get_value() == -n)) + { + return *m_object; + } + + JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); + } + } + } + + /*! + @brief return the key of an object iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + const typename object_t::key_type& key() const + { + JSON_ASSERT(m_object != nullptr); + + if (JSON_HEDLEY_LIKELY(m_object->is_object())) + { + return m_it.object_iterator->first; + } + + JSON_THROW(invalid_iterator::create(207, "cannot use key() for non-object iterators", *m_object)); + } + + /*! + @brief return the value of an iterator + @pre The iterator is initialized; i.e. `m_object != nullptr`. + */ + reference value() const + { + return operator*(); + } + + JSON_PRIVATE_UNLESS_TESTED: + /// associated JSON instance + pointer m_object = nullptr; + /// the actual iterator of the associated instance + internal_iterator::type> m_it {}; +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + + +#include // ptrdiff_t +#include // reverse_iterator +#include // declval + +namespace nlohmann +{ +namespace detail +{ +////////////////////// +// reverse_iterator // +////////////////////// + +/*! +@brief a template for a reverse iterator class + +@tparam Base the base iterator type to reverse. Valid types are @ref +iterator (to create @ref reverse_iterator) and @ref const_iterator (to +create @ref const_reverse_iterator). + +@requirement The class satisfies the following concept requirements: +- +[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator): + The iterator that can be moved can be moved in both directions (i.e. + incremented and decremented). +- [OutputIterator](https://en.cppreference.com/w/cpp/named_req/OutputIterator): + It is possible to write to the pointed-to element (only if @a Base is + @ref iterator). + +@since version 1.0.0 +*/ +template +class json_reverse_iterator : public std::reverse_iterator +{ + public: + using difference_type = std::ptrdiff_t; + /// shortcut to the reverse iterator adapter + using base_iterator = std::reverse_iterator; + /// the reference type for the pointed-to element + using reference = typename Base::reference; + + /// create reverse iterator from iterator + explicit json_reverse_iterator(const typename base_iterator::iterator_type& it) noexcept + : base_iterator(it) {} + + /// create reverse iterator from base class + explicit json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {} + + /// post-increment (it++) + json_reverse_iterator const operator++(int) + { + return static_cast(base_iterator::operator++(1)); + } + + /// pre-increment (++it) + json_reverse_iterator& operator++() + { + return static_cast(base_iterator::operator++()); + } + + /// post-decrement (it--) + json_reverse_iterator const operator--(int) + { + return static_cast(base_iterator::operator--(1)); + } + + /// pre-decrement (--it) + json_reverse_iterator& operator--() + { + return static_cast(base_iterator::operator--()); + } + + /// add to iterator + json_reverse_iterator& operator+=(difference_type i) + { + return static_cast(base_iterator::operator+=(i)); + } + + /// add to iterator + json_reverse_iterator operator+(difference_type i) const + { + return static_cast(base_iterator::operator+(i)); + } + + /// subtract from iterator + json_reverse_iterator operator-(difference_type i) const + { + return static_cast(base_iterator::operator-(i)); + } + + /// return difference + difference_type operator-(const json_reverse_iterator& other) const + { + return base_iterator(*this) - base_iterator(other); + } + + /// access to successor + reference operator[](difference_type n) const + { + return *(this->operator+(n)); + } + + /// return the key of an object iterator + auto key() const -> decltype(std::declval().key()) + { + auto it = --this->base(); + return it.key(); + } + + /// return the value of an iterator + reference value() const + { + auto it = --this->base(); + return it.operator * (); + } +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + + +#include // all_of +#include // isdigit +#include // max +#include // accumulate +#include // string +#include // move +#include // vector + +// #include + +// #include + +// #include + +// #include + + +namespace nlohmann +{ +template +class json_pointer +{ + // allow basic_json to access private members + NLOHMANN_BASIC_JSON_TPL_DECLARATION + friend class basic_json; + + public: + /*! + @brief create JSON pointer + + Create a JSON pointer according to the syntax described in + [Section 3 of RFC6901](https://tools.ietf.org/html/rfc6901#section-3). + + @param[in] s string representing the JSON pointer; if omitted, the empty + string is assumed which references the whole JSON value + + @throw parse_error.107 if the given JSON pointer @a s is nonempty and does + not begin with a slash (`/`); see example below + + @throw parse_error.108 if a tilde (`~`) in the given JSON pointer @a s is + not followed by `0` (representing `~`) or `1` (representing `/`); see + example below + + @liveexample{The example shows the construction several valid JSON pointers + as well as the exceptional behavior.,json_pointer} + + @since version 2.0.0 + */ + explicit json_pointer(const std::string& s = "") + : reference_tokens(split(s)) + {} + + /*! + @brief return a string representation of the JSON pointer + + @invariant For each JSON pointer `ptr`, it holds: + @code {.cpp} + ptr == json_pointer(ptr.to_string()); + @endcode + + @return a string representation of the JSON pointer + + @liveexample{The example shows the result of `to_string`.,json_pointer__to_string} + + @since version 2.0.0 + */ + std::string to_string() const + { + return std::accumulate(reference_tokens.begin(), reference_tokens.end(), + std::string{}, + [](const std::string & a, const std::string & b) + { + return a + "/" + detail::escape(b); + }); + } + + /// @copydoc to_string() + operator std::string() const + { + return to_string(); + } + + /*! + @brief append another JSON pointer at the end of this JSON pointer + + @param[in] ptr JSON pointer to append + @return JSON pointer with @a ptr appended + + @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add} + + @complexity Linear in the length of @a ptr. + + @sa @ref operator/=(std::string) to append a reference token + @sa @ref operator/=(std::size_t) to append an array index + @sa @ref operator/(const json_pointer&, const json_pointer&) for a binary operator + + @since version 3.6.0 + */ + json_pointer& operator/=(const json_pointer& ptr) + { + reference_tokens.insert(reference_tokens.end(), + ptr.reference_tokens.begin(), + ptr.reference_tokens.end()); + return *this; + } + + /*! + @brief append an unescaped reference token at the end of this JSON pointer + + @param[in] token reference token to append + @return JSON pointer with @a token appended without escaping @a token + + @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add} + + @complexity Amortized constant. + + @sa @ref operator/=(const json_pointer&) to append a JSON pointer + @sa @ref operator/=(std::size_t) to append an array index + @sa @ref operator/(const json_pointer&, std::size_t) for a binary operator + + @since version 3.6.0 + */ + json_pointer& operator/=(std::string token) + { + push_back(std::move(token)); + return *this; + } + + /*! + @brief append an array index at the end of this JSON pointer + + @param[in] array_idx array index to append + @return JSON pointer with @a array_idx appended + + @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add} + + @complexity Amortized constant. + + @sa @ref operator/=(const json_pointer&) to append a JSON pointer + @sa @ref operator/=(std::string) to append a reference token + @sa @ref operator/(const json_pointer&, std::string) for a binary operator + + @since version 3.6.0 + */ + json_pointer& operator/=(std::size_t array_idx) + { + return *this /= std::to_string(array_idx); + } + + /*! + @brief create a new JSON pointer by appending the right JSON pointer at the end of the left JSON pointer + + @param[in] lhs JSON pointer + @param[in] rhs JSON pointer + @return a new JSON pointer with @a rhs appended to @a lhs + + @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary} + + @complexity Linear in the length of @a lhs and @a rhs. + + @sa @ref operator/=(const json_pointer&) to append a JSON pointer + + @since version 3.6.0 + */ + friend json_pointer operator/(const json_pointer& lhs, + const json_pointer& rhs) + { + return json_pointer(lhs) /= rhs; + } + + /*! + @brief create a new JSON pointer by appending the unescaped token at the end of the JSON pointer + + @param[in] ptr JSON pointer + @param[in] token reference token + @return a new JSON pointer with unescaped @a token appended to @a ptr + + @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary} + + @complexity Linear in the length of @a ptr. + + @sa @ref operator/=(std::string) to append a reference token + + @since version 3.6.0 + */ + friend json_pointer operator/(const json_pointer& ptr, std::string token) + { + return json_pointer(ptr) /= std::move(token); + } + + /*! + @brief create a new JSON pointer by appending the array-index-token at the end of the JSON pointer + + @param[in] ptr JSON pointer + @param[in] array_idx array index + @return a new JSON pointer with @a array_idx appended to @a ptr + + @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary} + + @complexity Linear in the length of @a ptr. + + @sa @ref operator/=(std::size_t) to append an array index + + @since version 3.6.0 + */ + friend json_pointer operator/(const json_pointer& ptr, std::size_t array_idx) + { + return json_pointer(ptr) /= array_idx; + } + + /*! + @brief returns the parent of this JSON pointer + + @return parent of this JSON pointer; in case this JSON pointer is the root, + the root itself is returned + + @complexity Linear in the length of the JSON pointer. + + @liveexample{The example shows the result of `parent_pointer` for different + JSON Pointers.,json_pointer__parent_pointer} + + @since version 3.6.0 + */ + json_pointer parent_pointer() const + { + if (empty()) + { + return *this; + } + + json_pointer res = *this; + res.pop_back(); + return res; + } + + /*! + @brief remove last reference token + + @pre not `empty()` + + @liveexample{The example shows the usage of `pop_back`.,json_pointer__pop_back} + + @complexity Constant. + + @throw out_of_range.405 if JSON pointer has no parent + + @since version 3.6.0 + */ + void pop_back() + { + if (JSON_HEDLEY_UNLIKELY(empty())) + { + JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent", BasicJsonType())); + } + + reference_tokens.pop_back(); + } + + /*! + @brief return last reference token + + @pre not `empty()` + @return last reference token + + @liveexample{The example shows the usage of `back`.,json_pointer__back} + + @complexity Constant. + + @throw out_of_range.405 if JSON pointer has no parent + + @since version 3.6.0 + */ + const std::string& back() const + { + if (JSON_HEDLEY_UNLIKELY(empty())) + { + JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent", BasicJsonType())); + } + + return reference_tokens.back(); + } + + /*! + @brief append an unescaped token at the end of the reference pointer + + @param[in] token token to add + + @complexity Amortized constant. + + @liveexample{The example shows the result of `push_back` for different + JSON Pointers.,json_pointer__push_back} + + @since version 3.6.0 + */ + void push_back(const std::string& token) + { + reference_tokens.push_back(token); + } + + /// @copydoc push_back(const std::string&) + void push_back(std::string&& token) + { + reference_tokens.push_back(std::move(token)); + } + + /*! + @brief return whether pointer points to the root document + + @return true iff the JSON pointer points to the root document + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example shows the result of `empty` for different JSON + Pointers.,json_pointer__empty} + + @since version 3.6.0 + */ + bool empty() const noexcept + { + return reference_tokens.empty(); + } + + private: + /*! + @param[in] s reference token to be converted into an array index + + @return integer representation of @a s + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index begins not with a digit + @throw out_of_range.404 if string @a s could not be converted to an integer + @throw out_of_range.410 if an array index exceeds size_type + */ + static typename BasicJsonType::size_type array_index(const std::string& s) + { + using size_type = typename BasicJsonType::size_type; + + // error condition (cf. RFC 6901, Sect. 4) + if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && s[0] == '0')) + { + JSON_THROW(detail::parse_error::create(106, 0, "array index '" + s + "' must not begin with '0'", BasicJsonType())); + } + + // error condition (cf. RFC 6901, Sect. 4) + if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && !(s[0] >= '1' && s[0] <= '9'))) + { + JSON_THROW(detail::parse_error::create(109, 0, "array index '" + s + "' is not a number", BasicJsonType())); + } + + std::size_t processed_chars = 0; + unsigned long long res = 0; + JSON_TRY + { + res = std::stoull(s, &processed_chars); + } + JSON_CATCH(std::out_of_range&) + { + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'", BasicJsonType())); + } + + // check if the string was completely read + if (JSON_HEDLEY_UNLIKELY(processed_chars != s.size())) + { + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'", BasicJsonType())); + } + + // only triggered on special platforms (like 32bit), see also + // https://github.com/nlohmann/json/pull/2203 + if (res >= static_cast((std::numeric_limits::max)())) + { + JSON_THROW(detail::out_of_range::create(410, "array index " + s + " exceeds size_type", BasicJsonType())); // LCOV_EXCL_LINE + } + + return static_cast(res); + } + + JSON_PRIVATE_UNLESS_TESTED: + json_pointer top() const + { + if (JSON_HEDLEY_UNLIKELY(empty())) + { + JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent", BasicJsonType())); + } + + json_pointer result = *this; + result.reference_tokens = {reference_tokens[0]}; + return result; + } + + private: + /*! + @brief create and return a reference to the pointed to value + + @complexity Linear in the number of reference tokens. + + @throw parse_error.109 if array index is not a number + @throw type_error.313 if value cannot be unflattened + */ + BasicJsonType& get_and_create(BasicJsonType& j) const + { + auto result = &j; + + // in case no reference tokens exist, return a reference to the JSON value + // j which will be overwritten by a primitive value + for (const auto& reference_token : reference_tokens) + { + switch (result->type()) + { + case detail::value_t::null: + { + if (reference_token == "0") + { + // start a new array if reference token is 0 + result = &result->operator[](0); + } + else + { + // start a new object otherwise + result = &result->operator[](reference_token); + } + break; + } + + case detail::value_t::object: + { + // create an entry in the object + result = &result->operator[](reference_token); + break; + } + + case detail::value_t::array: + { + // create an entry in the array + result = &result->operator[](array_index(reference_token)); + break; + } + + /* + The following code is only reached if there exists a reference + token _and_ the current value is primitive. In this case, we have + an error situation, because primitive values may only occur as + single value; that is, with an empty list of reference tokens. + */ + default: + JSON_THROW(detail::type_error::create(313, "invalid value to unflatten", j)); + } + } + + return *result; + } + + /*! + @brief return a reference to the pointed to value + + @note This version does not throw if a value is not present, but tries to + create nested values instead. For instance, calling this function + with pointer `"/this/that"` on a null value is equivalent to calling + `operator[]("this").operator[]("that")` on that value, effectively + changing the null value to an object. + + @param[in] ptr a JSON value + + @return reference to the JSON value pointed to by the JSON pointer + + @complexity Linear in the length of the JSON pointer. + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.404 if the JSON pointer can not be resolved + */ + BasicJsonType& get_unchecked(BasicJsonType* ptr) const + { + for (const auto& reference_token : reference_tokens) + { + // convert null values to arrays or objects before continuing + if (ptr->is_null()) + { + // check if reference token is a number + const bool nums = + std::all_of(reference_token.begin(), reference_token.end(), + [](const unsigned char x) + { + return std::isdigit(x); + }); + + // change value to array for numbers or "-" or to object otherwise + *ptr = (nums || reference_token == "-") + ? detail::value_t::array + : detail::value_t::object; + } + + switch (ptr->type()) + { + case detail::value_t::object: + { + // use unchecked object access + ptr = &ptr->operator[](reference_token); + break; + } + + case detail::value_t::array: + { + if (reference_token == "-") + { + // explicitly treat "-" as index beyond the end + ptr = &ptr->operator[](ptr->m_value.array->size()); + } + else + { + // convert array index to number; unchecked access + ptr = &ptr->operator[](array_index(reference_token)); + } + break; + } + + default: + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'", *ptr)); + } + } + + return *ptr; + } + + /*! + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.402 if the array index '-' is used + @throw out_of_range.404 if the JSON pointer can not be resolved + */ + BasicJsonType& get_checked(BasicJsonType* ptr) const + { + for (const auto& reference_token : reference_tokens) + { + switch (ptr->type()) + { + case detail::value_t::object: + { + // note: at performs range check + ptr = &ptr->at(reference_token); + break; + } + + case detail::value_t::array: + { + if (JSON_HEDLEY_UNLIKELY(reference_token == "-")) + { + // "-" always fails the range check + JSON_THROW(detail::out_of_range::create(402, + "array index '-' (" + std::to_string(ptr->m_value.array->size()) + + ") is out of range", *ptr)); + } + + // note: at performs range check + ptr = &ptr->at(array_index(reference_token)); + break; + } + + default: + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'", *ptr)); + } + } + + return *ptr; + } + + /*! + @brief return a const reference to the pointed to value + + @param[in] ptr a JSON value + + @return const reference to the JSON value pointed to by the JSON + pointer + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.402 if the array index '-' is used + @throw out_of_range.404 if the JSON pointer can not be resolved + */ + const BasicJsonType& get_unchecked(const BasicJsonType* ptr) const + { + for (const auto& reference_token : reference_tokens) + { + switch (ptr->type()) + { + case detail::value_t::object: + { + // use unchecked object access + ptr = &ptr->operator[](reference_token); + break; + } + + case detail::value_t::array: + { + if (JSON_HEDLEY_UNLIKELY(reference_token == "-")) + { + // "-" cannot be used for const access + JSON_THROW(detail::out_of_range::create(402, "array index '-' (" + std::to_string(ptr->m_value.array->size()) + ") is out of range", *ptr)); + } + + // use unchecked array access + ptr = &ptr->operator[](array_index(reference_token)); + break; + } + + default: + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'", *ptr)); + } + } + + return *ptr; + } + + /*! + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.402 if the array index '-' is used + @throw out_of_range.404 if the JSON pointer can not be resolved + */ + const BasicJsonType& get_checked(const BasicJsonType* ptr) const + { + for (const auto& reference_token : reference_tokens) + { + switch (ptr->type()) + { + case detail::value_t::object: + { + // note: at performs range check + ptr = &ptr->at(reference_token); + break; + } + + case detail::value_t::array: + { + if (JSON_HEDLEY_UNLIKELY(reference_token == "-")) + { + // "-" always fails the range check + JSON_THROW(detail::out_of_range::create(402, + "array index '-' (" + std::to_string(ptr->m_value.array->size()) + + ") is out of range", *ptr)); + } + + // note: at performs range check + ptr = &ptr->at(array_index(reference_token)); + break; + } + + default: + JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'", *ptr)); + } + } + + return *ptr; + } + + /*! + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + */ + bool contains(const BasicJsonType* ptr) const + { + for (const auto& reference_token : reference_tokens) + { + switch (ptr->type()) + { + case detail::value_t::object: + { + if (!ptr->contains(reference_token)) + { + // we did not find the key in the object + return false; + } + + ptr = &ptr->operator[](reference_token); + break; + } + + case detail::value_t::array: + { + if (JSON_HEDLEY_UNLIKELY(reference_token == "-")) + { + // "-" always fails the range check + return false; + } + if (JSON_HEDLEY_UNLIKELY(reference_token.size() == 1 && !("0" <= reference_token && reference_token <= "9"))) + { + // invalid char + return false; + } + if (JSON_HEDLEY_UNLIKELY(reference_token.size() > 1)) + { + if (JSON_HEDLEY_UNLIKELY(!('1' <= reference_token[0] && reference_token[0] <= '9'))) + { + // first char should be between '1' and '9' + return false; + } + for (std::size_t i = 1; i < reference_token.size(); i++) + { + if (JSON_HEDLEY_UNLIKELY(!('0' <= reference_token[i] && reference_token[i] <= '9'))) + { + // other char should be between '0' and '9' + return false; + } + } + } + + const auto idx = array_index(reference_token); + if (idx >= ptr->size()) + { + // index out of range + return false; + } + + ptr = &ptr->operator[](idx); + break; + } + + default: + { + // we do not expect primitive values if there is still a + // reference token to process + return false; + } + } + } + + // no reference token left means we found a primitive value + return true; + } + + /*! + @brief split the string input to reference tokens + + @note This function is only called by the json_pointer constructor. + All exceptions below are documented there. + + @throw parse_error.107 if the pointer is not empty or begins with '/' + @throw parse_error.108 if character '~' is not followed by '0' or '1' + */ + static std::vector split(const std::string& reference_string) + { + std::vector result; + + // special case: empty reference string -> no reference tokens + if (reference_string.empty()) + { + return result; + } + + // check if nonempty reference string begins with slash + if (JSON_HEDLEY_UNLIKELY(reference_string[0] != '/')) + { + JSON_THROW(detail::parse_error::create(107, 1, "JSON pointer must be empty or begin with '/' - was: '" + reference_string + "'", BasicJsonType())); + } + + // extract the reference tokens: + // - slash: position of the last read slash (or end of string) + // - start: position after the previous slash + for ( + // search for the first slash after the first character + std::size_t slash = reference_string.find_first_of('/', 1), + // set the beginning of the first reference token + start = 1; + // we can stop if start == 0 (if slash == std::string::npos) + start != 0; + // set the beginning of the next reference token + // (will eventually be 0 if slash == std::string::npos) + start = (slash == std::string::npos) ? 0 : slash + 1, + // find next slash + slash = reference_string.find_first_of('/', start)) + { + // use the text between the beginning of the reference token + // (start) and the last slash (slash). + auto reference_token = reference_string.substr(start, slash - start); + + // check reference tokens are properly escaped + for (std::size_t pos = reference_token.find_first_of('~'); + pos != std::string::npos; + pos = reference_token.find_first_of('~', pos + 1)) + { + JSON_ASSERT(reference_token[pos] == '~'); + + // ~ must be followed by 0 or 1 + if (JSON_HEDLEY_UNLIKELY(pos == reference_token.size() - 1 || + (reference_token[pos + 1] != '0' && + reference_token[pos + 1] != '1'))) + { + JSON_THROW(detail::parse_error::create(108, 0, "escape character '~' must be followed with '0' or '1'", BasicJsonType())); + } + } + + // finally, store the reference token + detail::unescape(reference_token); + result.push_back(reference_token); + } + + return result; + } + + private: + /*! + @param[in] reference_string the reference string to the current value + @param[in] value the value to consider + @param[in,out] result the result object to insert values to + + @note Empty objects or arrays are flattened to `null`. + */ + static void flatten(const std::string& reference_string, + const BasicJsonType& value, + BasicJsonType& result) + { + switch (value.type()) + { + case detail::value_t::array: + { + if (value.m_value.array->empty()) + { + // flatten empty array as null + result[reference_string] = nullptr; + } + else + { + // iterate array and use index as reference string + for (std::size_t i = 0; i < value.m_value.array->size(); ++i) + { + flatten(reference_string + "/" + std::to_string(i), + value.m_value.array->operator[](i), result); + } + } + break; + } + + case detail::value_t::object: + { + if (value.m_value.object->empty()) + { + // flatten empty object as null + result[reference_string] = nullptr; + } + else + { + // iterate object and use keys as reference string + for (const auto& element : *value.m_value.object) + { + flatten(reference_string + "/" + detail::escape(element.first), element.second, result); + } + } + break; + } + + default: + { + // add primitive value with its reference string + result[reference_string] = value; + break; + } + } + } + + /*! + @param[in] value flattened JSON + + @return unflattened JSON + + @throw parse_error.109 if array index is not a number + @throw type_error.314 if value is not an object + @throw type_error.315 if object values are not primitive + @throw type_error.313 if value cannot be unflattened + */ + static BasicJsonType + unflatten(const BasicJsonType& value) + { + if (JSON_HEDLEY_UNLIKELY(!value.is_object())) + { + JSON_THROW(detail::type_error::create(314, "only objects can be unflattened", value)); + } + + BasicJsonType result; + + // iterate the JSON object values + for (const auto& element : *value.m_value.object) + { + if (JSON_HEDLEY_UNLIKELY(!element.second.is_primitive())) + { + JSON_THROW(detail::type_error::create(315, "values in object must be primitive", element.second)); + } + + // assign value to reference pointed to by JSON pointer; Note that if + // the JSON pointer is "" (i.e., points to the whole value), function + // get_and_create returns a reference to result itself. An assignment + // will then create a primitive value. + json_pointer(element.first).get_and_create(result) = element.second; + } + + return result; + } + + /*! + @brief compares two JSON pointers for equality + + @param[in] lhs JSON pointer to compare + @param[in] rhs JSON pointer to compare + @return whether @a lhs is equal to @a rhs + + @complexity Linear in the length of the JSON pointer + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + */ + friend bool operator==(json_pointer const& lhs, + json_pointer const& rhs) noexcept + { + return lhs.reference_tokens == rhs.reference_tokens; + } + + /*! + @brief compares two JSON pointers for inequality + + @param[in] lhs JSON pointer to compare + @param[in] rhs JSON pointer to compare + @return whether @a lhs is not equal @a rhs + + @complexity Linear in the length of the JSON pointer + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + */ + friend bool operator!=(json_pointer const& lhs, + json_pointer const& rhs) noexcept + { + return !(lhs == rhs); + } + + /// the reference tokens + std::vector reference_tokens; +}; +} // namespace nlohmann + +// #include + + +#include +#include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +template +class json_ref +{ + public: + using value_type = BasicJsonType; + + json_ref(value_type&& value) + : owned_value(std::move(value)) + {} + + json_ref(const value_type& value) + : value_ref(&value) + {} + + json_ref(std::initializer_list init) + : owned_value(init) + {} + + template < + class... Args, + enable_if_t::value, int> = 0 > + json_ref(Args && ... args) + : owned_value(std::forward(args)...) + {} + + // class should be movable only + json_ref(json_ref&&) = default; + json_ref(const json_ref&) = delete; + json_ref& operator=(const json_ref&) = delete; + json_ref& operator=(json_ref&&) = delete; + ~json_ref() = default; + + value_type moved_or_copied() const + { + if (value_ref == nullptr) + { + return std::move(owned_value); + } + return *value_ref; + } + + value_type const& operator*() const + { + return value_ref ? *value_ref : owned_value; + } + + value_type const* operator->() const + { + return &** this; + } + + private: + mutable value_type owned_value = nullptr; + value_type const* value_ref = nullptr; +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + +// #include + +// #include + + +#include // reverse +#include // array +#include // uint8_t, uint16_t, uint32_t, uint64_t +#include // memcpy +#include // numeric_limits +#include // string +#include // isnan, isinf + +// #include + +// #include + +// #include + + +#include // copy +#include // size_t +#include // streamsize +#include // back_inserter +#include // shared_ptr, make_shared +#include // basic_ostream +#include // basic_string +#include // vector +// #include + + +namespace nlohmann +{ +namespace detail +{ +/// abstract output adapter interface +template struct output_adapter_protocol +{ + virtual void write_character(CharType c) = 0; + virtual void write_characters(const CharType* s, std::size_t length) = 0; + virtual ~output_adapter_protocol() = default; +}; + +/// a type to simplify interfaces +template +using output_adapter_t = std::shared_ptr>; + +/// output adapter for byte vectors +template +class output_vector_adapter : public output_adapter_protocol +{ + public: + explicit output_vector_adapter(std::vector& vec) noexcept + : v(vec) + {} + + void write_character(CharType c) override + { + v.push_back(c); + } + + JSON_HEDLEY_NON_NULL(2) + void write_characters(const CharType* s, std::size_t length) override + { + std::copy(s, s + length, std::back_inserter(v)); + } + + private: + std::vector& v; +}; + +/// output adapter for output streams +template +class output_stream_adapter : public output_adapter_protocol +{ + public: + explicit output_stream_adapter(std::basic_ostream& s) noexcept + : stream(s) + {} + + void write_character(CharType c) override + { + stream.put(c); + } + + JSON_HEDLEY_NON_NULL(2) + void write_characters(const CharType* s, std::size_t length) override + { + stream.write(s, static_cast(length)); + } + + private: + std::basic_ostream& stream; +}; + +/// output adapter for basic_string +template> +class output_string_adapter : public output_adapter_protocol +{ + public: + explicit output_string_adapter(StringType& s) noexcept + : str(s) + {} + + void write_character(CharType c) override + { + str.push_back(c); + } + + JSON_HEDLEY_NON_NULL(2) + void write_characters(const CharType* s, std::size_t length) override + { + str.append(s, length); + } + + private: + StringType& str; +}; + +template> +class output_adapter +{ + public: + output_adapter(std::vector& vec) + : oa(std::make_shared>(vec)) {} + + output_adapter(std::basic_ostream& s) + : oa(std::make_shared>(s)) {} + + output_adapter(StringType& s) + : oa(std::make_shared>(s)) {} + + operator output_adapter_t() + { + return oa; + } + + private: + output_adapter_t oa = nullptr; +}; +} // namespace detail +} // namespace nlohmann + + +namespace nlohmann +{ +namespace detail +{ +/////////////////// +// binary writer // +/////////////////// + +/*! +@brief serialization to CBOR and MessagePack values +*/ +template +class binary_writer +{ + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using number_float_t = typename BasicJsonType::number_float_t; + + public: + /*! + @brief create a binary writer + + @param[in] adapter output adapter to write to + */ + explicit binary_writer(output_adapter_t adapter) : oa(adapter) + { + JSON_ASSERT(oa); + } + + /*! + @param[in] j JSON value to serialize + @pre j.type() == value_t::object + */ + void write_bson(const BasicJsonType& j) + { + switch (j.type()) + { + case value_t::object: + { + write_bson_object(*j.m_value.object); + break; + } + + default: + { + JSON_THROW(type_error::create(317, "to serialize to BSON, top-level type must be object, but is " + std::string(j.type_name()), j));; + } + } + } + + /*! + @param[in] j JSON value to serialize + */ + void write_cbor(const BasicJsonType& j) + { + switch (j.type()) + { + case value_t::null: + { + oa->write_character(to_char_type(0xF6)); + break; + } + + case value_t::boolean: + { + oa->write_character(j.m_value.boolean + ? to_char_type(0xF5) + : to_char_type(0xF4)); + break; + } + + case value_t::number_integer: + { + if (j.m_value.number_integer >= 0) + { + // CBOR does not differentiate between positive signed + // integers and unsigned integers. Therefore, we used the + // code from the value_t::number_unsigned case here. + if (j.m_value.number_integer <= 0x17) + { + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x18)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x19)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x1A)); + write_number(static_cast(j.m_value.number_integer)); + } + else + { + oa->write_character(to_char_type(0x1B)); + write_number(static_cast(j.m_value.number_integer)); + } + } + else + { + // The conversions below encode the sign in the first + // byte, and the value is converted to a positive number. + const auto positive_number = -1 - j.m_value.number_integer; + if (j.m_value.number_integer >= -24) + { + write_number(static_cast(0x20 + positive_number)); + } + else if (positive_number <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x38)); + write_number(static_cast(positive_number)); + } + else if (positive_number <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x39)); + write_number(static_cast(positive_number)); + } + else if (positive_number <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x3A)); + write_number(static_cast(positive_number)); + } + else + { + oa->write_character(to_char_type(0x3B)); + write_number(static_cast(positive_number)); + } + } + break; + } + + case value_t::number_unsigned: + { + if (j.m_value.number_unsigned <= 0x17) + { + write_number(static_cast(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x18)); + write_number(static_cast(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x19)); + write_number(static_cast(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x1A)); + write_number(static_cast(j.m_value.number_unsigned)); + } + else + { + oa->write_character(to_char_type(0x1B)); + write_number(static_cast(j.m_value.number_unsigned)); + } + break; + } + + case value_t::number_float: + { + if (std::isnan(j.m_value.number_float)) + { + // NaN is 0xf97e00 in CBOR + oa->write_character(to_char_type(0xF9)); + oa->write_character(to_char_type(0x7E)); + oa->write_character(to_char_type(0x00)); + } + else if (std::isinf(j.m_value.number_float)) + { + // Infinity is 0xf97c00, -Infinity is 0xf9fc00 + oa->write_character(to_char_type(0xf9)); + oa->write_character(j.m_value.number_float > 0 ? to_char_type(0x7C) : to_char_type(0xFC)); + oa->write_character(to_char_type(0x00)); + } + else + { + write_compact_float(j.m_value.number_float, detail::input_format_t::cbor); + } + break; + } + + case value_t::string: + { + // step 1: write control byte and the string length + const auto N = j.m_value.string->size(); + if (N <= 0x17) + { + write_number(static_cast(0x60 + N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x78)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x79)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x7A)); + write_number(static_cast(N)); + } + // LCOV_EXCL_START + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x7B)); + write_number(static_cast(N)); + } + // LCOV_EXCL_STOP + + // step 2: write the string + oa->write_characters( + reinterpret_cast(j.m_value.string->c_str()), + j.m_value.string->size()); + break; + } + + case value_t::array: + { + // step 1: write control byte and the array size + const auto N = j.m_value.array->size(); + if (N <= 0x17) + { + write_number(static_cast(0x80 + N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x98)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x99)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x9A)); + write_number(static_cast(N)); + } + // LCOV_EXCL_START + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x9B)); + write_number(static_cast(N)); + } + // LCOV_EXCL_STOP + + // step 2: write each element + for (const auto& el : *j.m_value.array) + { + write_cbor(el); + } + break; + } + + case value_t::binary: + { + if (j.m_value.binary->has_subtype()) + { + write_number(static_cast(0xd8)); + write_number(j.m_value.binary->subtype()); + } + + // step 1: write control byte and the binary array size + const auto N = j.m_value.binary->size(); + if (N <= 0x17) + { + write_number(static_cast(0x40 + N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x58)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x59)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x5A)); + write_number(static_cast(N)); + } + // LCOV_EXCL_START + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0x5B)); + write_number(static_cast(N)); + } + // LCOV_EXCL_STOP + + // step 2: write each element + oa->write_characters( + reinterpret_cast(j.m_value.binary->data()), + N); + + break; + } + + case value_t::object: + { + // step 1: write control byte and the object size + const auto N = j.m_value.object->size(); + if (N <= 0x17) + { + write_number(static_cast(0xA0 + N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0xB8)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0xB9)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0xBA)); + write_number(static_cast(N)); + } + // LCOV_EXCL_START + else if (N <= (std::numeric_limits::max)()) + { + oa->write_character(to_char_type(0xBB)); + write_number(static_cast(N)); + } + // LCOV_EXCL_STOP + + // step 2: write each element + for (const auto& el : *j.m_value.object) + { + write_cbor(el.first); + write_cbor(el.second); + } + break; + } + + default: + break; + } + } + + /*! + @param[in] j JSON value to serialize + */ + void write_msgpack(const BasicJsonType& j) + { + switch (j.type()) + { + case value_t::null: // nil + { + oa->write_character(to_char_type(0xC0)); + break; + } + + case value_t::boolean: // true and false + { + oa->write_character(j.m_value.boolean + ? to_char_type(0xC3) + : to_char_type(0xC2)); + break; + } + + case value_t::number_integer: + { + if (j.m_value.number_integer >= 0) + { + // MessagePack does not differentiate between positive + // signed integers and unsigned integers. Therefore, we used + // the code from the value_t::number_unsigned case here. + if (j.m_value.number_unsigned < 128) + { + // positive fixnum + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 8 + oa->write_character(to_char_type(0xCC)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 16 + oa->write_character(to_char_type(0xCD)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 32 + oa->write_character(to_char_type(0xCE)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 64 + oa->write_character(to_char_type(0xCF)); + write_number(static_cast(j.m_value.number_integer)); + } + } + else + { + if (j.m_value.number_integer >= -32) + { + // negative fixnum + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits::min)() && + j.m_value.number_integer <= (std::numeric_limits::max)()) + { + // int 8 + oa->write_character(to_char_type(0xD0)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits::min)() && + j.m_value.number_integer <= (std::numeric_limits::max)()) + { + // int 16 + oa->write_character(to_char_type(0xD1)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits::min)() && + j.m_value.number_integer <= (std::numeric_limits::max)()) + { + // int 32 + oa->write_character(to_char_type(0xD2)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_integer >= (std::numeric_limits::min)() && + j.m_value.number_integer <= (std::numeric_limits::max)()) + { + // int 64 + oa->write_character(to_char_type(0xD3)); + write_number(static_cast(j.m_value.number_integer)); + } + } + break; + } + + case value_t::number_unsigned: + { + if (j.m_value.number_unsigned < 128) + { + // positive fixnum + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 8 + oa->write_character(to_char_type(0xCC)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 16 + oa->write_character(to_char_type(0xCD)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 32 + oa->write_character(to_char_type(0xCE)); + write_number(static_cast(j.m_value.number_integer)); + } + else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) + { + // uint 64 + oa->write_character(to_char_type(0xCF)); + write_number(static_cast(j.m_value.number_integer)); + } + break; + } + + case value_t::number_float: + { + write_compact_float(j.m_value.number_float, detail::input_format_t::msgpack); + break; + } + + case value_t::string: + { + // step 1: write control byte and the string length + const auto N = j.m_value.string->size(); + if (N <= 31) + { + // fixstr + write_number(static_cast(0xA0 | N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // str 8 + oa->write_character(to_char_type(0xD9)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // str 16 + oa->write_character(to_char_type(0xDA)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // str 32 + oa->write_character(to_char_type(0xDB)); + write_number(static_cast(N)); + } + + // step 2: write the string + oa->write_characters( + reinterpret_cast(j.m_value.string->c_str()), + j.m_value.string->size()); + break; + } + + case value_t::array: + { + // step 1: write control byte and the array size + const auto N = j.m_value.array->size(); + if (N <= 15) + { + // fixarray + write_number(static_cast(0x90 | N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // array 16 + oa->write_character(to_char_type(0xDC)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // array 32 + oa->write_character(to_char_type(0xDD)); + write_number(static_cast(N)); + } + + // step 2: write each element + for (const auto& el : *j.m_value.array) + { + write_msgpack(el); + } + break; + } + + case value_t::binary: + { + // step 0: determine if the binary type has a set subtype to + // determine whether or not to use the ext or fixext types + const bool use_ext = j.m_value.binary->has_subtype(); + + // step 1: write control byte and the byte string length + const auto N = j.m_value.binary->size(); + if (N <= (std::numeric_limits::max)()) + { + std::uint8_t output_type{}; + bool fixed = true; + if (use_ext) + { + switch (N) + { + case 1: + output_type = 0xD4; // fixext 1 + break; + case 2: + output_type = 0xD5; // fixext 2 + break; + case 4: + output_type = 0xD6; // fixext 4 + break; + case 8: + output_type = 0xD7; // fixext 8 + break; + case 16: + output_type = 0xD8; // fixext 16 + break; + default: + output_type = 0xC7; // ext 8 + fixed = false; + break; + } + + } + else + { + output_type = 0xC4; // bin 8 + fixed = false; + } + + oa->write_character(to_char_type(output_type)); + if (!fixed) + { + write_number(static_cast(N)); + } + } + else if (N <= (std::numeric_limits::max)()) + { + std::uint8_t output_type = use_ext + ? 0xC8 // ext 16 + : 0xC5; // bin 16 + + oa->write_character(to_char_type(output_type)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + std::uint8_t output_type = use_ext + ? 0xC9 // ext 32 + : 0xC6; // bin 32 + + oa->write_character(to_char_type(output_type)); + write_number(static_cast(N)); + } + + // step 1.5: if this is an ext type, write the subtype + if (use_ext) + { + write_number(static_cast(j.m_value.binary->subtype())); + } + + // step 2: write the byte string + oa->write_characters( + reinterpret_cast(j.m_value.binary->data()), + N); + + break; + } + + case value_t::object: + { + // step 1: write control byte and the object size + const auto N = j.m_value.object->size(); + if (N <= 15) + { + // fixmap + write_number(static_cast(0x80 | (N & 0xF))); + } + else if (N <= (std::numeric_limits::max)()) + { + // map 16 + oa->write_character(to_char_type(0xDE)); + write_number(static_cast(N)); + } + else if (N <= (std::numeric_limits::max)()) + { + // map 32 + oa->write_character(to_char_type(0xDF)); + write_number(static_cast(N)); + } + + // step 2: write each element + for (const auto& el : *j.m_value.object) + { + write_msgpack(el.first); + write_msgpack(el.second); + } + break; + } + + default: + break; + } + } + + /*! + @param[in] j JSON value to serialize + @param[in] use_count whether to use '#' prefixes (optimized format) + @param[in] use_type whether to use '$' prefixes (optimized format) + @param[in] add_prefix whether prefixes need to be used for this value + */ + void write_ubjson(const BasicJsonType& j, const bool use_count, + const bool use_type, const bool add_prefix = true) + { + switch (j.type()) + { + case value_t::null: + { + if (add_prefix) + { + oa->write_character(to_char_type('Z')); + } + break; + } + + case value_t::boolean: + { + if (add_prefix) + { + oa->write_character(j.m_value.boolean + ? to_char_type('T') + : to_char_type('F')); + } + break; + } + + case value_t::number_integer: + { + write_number_with_ubjson_prefix(j.m_value.number_integer, add_prefix); + break; + } + + case value_t::number_unsigned: + { + write_number_with_ubjson_prefix(j.m_value.number_unsigned, add_prefix); + break; + } + + case value_t::number_float: + { + write_number_with_ubjson_prefix(j.m_value.number_float, add_prefix); + break; + } + + case value_t::string: + { + if (add_prefix) + { + oa->write_character(to_char_type('S')); + } + write_number_with_ubjson_prefix(j.m_value.string->size(), true); + oa->write_characters( + reinterpret_cast(j.m_value.string->c_str()), + j.m_value.string->size()); + break; + } + + case value_t::array: + { + if (add_prefix) + { + oa->write_character(to_char_type('[')); + } + + bool prefix_required = true; + if (use_type && !j.m_value.array->empty()) + { + JSON_ASSERT(use_count); + const CharType first_prefix = ubjson_prefix(j.front()); + const bool same_prefix = std::all_of(j.begin() + 1, j.end(), + [this, first_prefix](const BasicJsonType & v) + { + return ubjson_prefix(v) == first_prefix; + }); + + if (same_prefix) + { + prefix_required = false; + oa->write_character(to_char_type('$')); + oa->write_character(first_prefix); + } + } + + if (use_count) + { + oa->write_character(to_char_type('#')); + write_number_with_ubjson_prefix(j.m_value.array->size(), true); + } + + for (const auto& el : *j.m_value.array) + { + write_ubjson(el, use_count, use_type, prefix_required); + } + + if (!use_count) + { + oa->write_character(to_char_type(']')); + } + + break; + } + + case value_t::binary: + { + if (add_prefix) + { + oa->write_character(to_char_type('[')); + } + + if (use_type && !j.m_value.binary->empty()) + { + JSON_ASSERT(use_count); + oa->write_character(to_char_type('$')); + oa->write_character('U'); + } + + if (use_count) + { + oa->write_character(to_char_type('#')); + write_number_with_ubjson_prefix(j.m_value.binary->size(), true); + } + + if (use_type) + { + oa->write_characters( + reinterpret_cast(j.m_value.binary->data()), + j.m_value.binary->size()); + } + else + { + for (size_t i = 0; i < j.m_value.binary->size(); ++i) + { + oa->write_character(to_char_type('U')); + oa->write_character(j.m_value.binary->data()[i]); + } + } + + if (!use_count) + { + oa->write_character(to_char_type(']')); + } + + break; + } + + case value_t::object: + { + if (add_prefix) + { + oa->write_character(to_char_type('{')); + } + + bool prefix_required = true; + if (use_type && !j.m_value.object->empty()) + { + JSON_ASSERT(use_count); + const CharType first_prefix = ubjson_prefix(j.front()); + const bool same_prefix = std::all_of(j.begin(), j.end(), + [this, first_prefix](const BasicJsonType & v) + { + return ubjson_prefix(v) == first_prefix; + }); + + if (same_prefix) + { + prefix_required = false; + oa->write_character(to_char_type('$')); + oa->write_character(first_prefix); + } + } + + if (use_count) + { + oa->write_character(to_char_type('#')); + write_number_with_ubjson_prefix(j.m_value.object->size(), true); + } + + for (const auto& el : *j.m_value.object) + { + write_number_with_ubjson_prefix(el.first.size(), true); + oa->write_characters( + reinterpret_cast(el.first.c_str()), + el.first.size()); + write_ubjson(el.second, use_count, use_type, prefix_required); + } + + if (!use_count) + { + oa->write_character(to_char_type('}')); + } + + break; + } + + default: + break; + } + } + + private: + ////////// + // BSON // + ////////// + + /*! + @return The size of a BSON document entry header, including the id marker + and the entry name size (and its null-terminator). + */ + static std::size_t calc_bson_entry_header_size(const string_t& name, const BasicJsonType& j) + { + const auto it = name.find(static_cast(0)); + if (JSON_HEDLEY_UNLIKELY(it != BasicJsonType::string_t::npos)) + { + JSON_THROW(out_of_range::create(409, "BSON key cannot contain code point U+0000 (at byte " + std::to_string(it) + ")", j)); + } + + return /*id*/ 1ul + name.size() + /*zero-terminator*/1u; + } + + /*! + @brief Writes the given @a element_type and @a name to the output adapter + */ + void write_bson_entry_header(const string_t& name, + const std::uint8_t element_type) + { + oa->write_character(to_char_type(element_type)); // boolean + oa->write_characters( + reinterpret_cast(name.c_str()), + name.size() + 1u); + } + + /*! + @brief Writes a BSON element with key @a name and boolean value @a value + */ + void write_bson_boolean(const string_t& name, + const bool value) + { + write_bson_entry_header(name, 0x08); + oa->write_character(value ? to_char_type(0x01) : to_char_type(0x00)); + } + + /*! + @brief Writes a BSON element with key @a name and double value @a value + */ + void write_bson_double(const string_t& name, + const double value) + { + write_bson_entry_header(name, 0x01); + write_number(value); + } + + /*! + @return The size of the BSON-encoded string in @a value + */ + static std::size_t calc_bson_string_size(const string_t& value) + { + return sizeof(std::int32_t) + value.size() + 1ul; + } + + /*! + @brief Writes a BSON element with key @a name and string value @a value + */ + void write_bson_string(const string_t& name, + const string_t& value) + { + write_bson_entry_header(name, 0x02); + + write_number(static_cast(value.size() + 1ul)); + oa->write_characters( + reinterpret_cast(value.c_str()), + value.size() + 1); + } + + /*! + @brief Writes a BSON element with key @a name and null value + */ + void write_bson_null(const string_t& name) + { + write_bson_entry_header(name, 0x0A); + } + + /*! + @return The size of the BSON-encoded integer @a value + */ + static std::size_t calc_bson_integer_size(const std::int64_t value) + { + return (std::numeric_limits::min)() <= value && value <= (std::numeric_limits::max)() + ? sizeof(std::int32_t) + : sizeof(std::int64_t); + } + + /*! + @brief Writes a BSON element with key @a name and integer @a value + */ + void write_bson_integer(const string_t& name, + const std::int64_t value) + { + if ((std::numeric_limits::min)() <= value && value <= (std::numeric_limits::max)()) + { + write_bson_entry_header(name, 0x10); // int32 + write_number(static_cast(value)); + } + else + { + write_bson_entry_header(name, 0x12); // int64 + write_number(static_cast(value)); + } + } + + /*! + @return The size of the BSON-encoded unsigned integer in @a j + */ + static constexpr std::size_t calc_bson_unsigned_size(const std::uint64_t value) noexcept + { + return (value <= static_cast((std::numeric_limits::max)())) + ? sizeof(std::int32_t) + : sizeof(std::int64_t); + } + + /*! + @brief Writes a BSON element with key @a name and unsigned @a value + */ + void write_bson_unsigned(const string_t& name, + const BasicJsonType& j) + { + if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) + { + write_bson_entry_header(name, 0x10 /* int32 */); + write_number(static_cast(j.m_value.number_unsigned)); + } + else if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) + { + write_bson_entry_header(name, 0x12 /* int64 */); + write_number(static_cast(j.m_value.number_unsigned)); + } + else + { + JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(j.m_value.number_unsigned) + " cannot be represented by BSON as it does not fit int64", j)); + } + } + + /*! + @brief Writes a BSON element with key @a name and object @a value + */ + void write_bson_object_entry(const string_t& name, + const typename BasicJsonType::object_t& value) + { + write_bson_entry_header(name, 0x03); // object + write_bson_object(value); + } + + /*! + @return The size of the BSON-encoded array @a value + */ + static std::size_t calc_bson_array_size(const typename BasicJsonType::array_t& value) + { + std::size_t array_index = 0ul; + + const std::size_t embedded_document_size = std::accumulate(std::begin(value), std::end(value), std::size_t(0), [&array_index](std::size_t result, const typename BasicJsonType::array_t::value_type & el) + { + return result + calc_bson_element_size(std::to_string(array_index++), el); + }); + + return sizeof(std::int32_t) + embedded_document_size + 1ul; + } + + /*! + @return The size of the BSON-encoded binary array @a value + */ + static std::size_t calc_bson_binary_size(const typename BasicJsonType::binary_t& value) + { + return sizeof(std::int32_t) + value.size() + 1ul; + } + + /*! + @brief Writes a BSON element with key @a name and array @a value + */ + void write_bson_array(const string_t& name, + const typename BasicJsonType::array_t& value) + { + write_bson_entry_header(name, 0x04); // array + write_number(static_cast(calc_bson_array_size(value))); + + std::size_t array_index = 0ul; + + for (const auto& el : value) + { + write_bson_element(std::to_string(array_index++), el); + } + + oa->write_character(to_char_type(0x00)); + } + + /*! + @brief Writes a BSON element with key @a name and binary value @a value + */ + void write_bson_binary(const string_t& name, + const binary_t& value) + { + write_bson_entry_header(name, 0x05); + + write_number(static_cast(value.size())); + write_number(value.has_subtype() ? value.subtype() : std::uint8_t(0x00)); + + oa->write_characters(reinterpret_cast(value.data()), value.size()); + } + + /*! + @brief Calculates the size necessary to serialize the JSON value @a j with its @a name + @return The calculated size for the BSON document entry for @a j with the given @a name. + */ + static std::size_t calc_bson_element_size(const string_t& name, + const BasicJsonType& j) + { + const auto header_size = calc_bson_entry_header_size(name, j); + switch (j.type()) + { + case value_t::object: + return header_size + calc_bson_object_size(*j.m_value.object); + + case value_t::array: + return header_size + calc_bson_array_size(*j.m_value.array); + + case value_t::binary: + return header_size + calc_bson_binary_size(*j.m_value.binary); + + case value_t::boolean: + return header_size + 1ul; + + case value_t::number_float: + return header_size + 8ul; + + case value_t::number_integer: + return header_size + calc_bson_integer_size(j.m_value.number_integer); + + case value_t::number_unsigned: + return header_size + calc_bson_unsigned_size(j.m_value.number_unsigned); + + case value_t::string: + return header_size + calc_bson_string_size(*j.m_value.string); + + case value_t::null: + return header_size + 0ul; + + // LCOV_EXCL_START + default: + JSON_ASSERT(false); + return 0ul; + // LCOV_EXCL_STOP + } + } + + /*! + @brief Serializes the JSON value @a j to BSON and associates it with the + key @a name. + @param name The name to associate with the JSON entity @a j within the + current BSON document + @return The size of the BSON entry + */ + void write_bson_element(const string_t& name, + const BasicJsonType& j) + { + switch (j.type()) + { + case value_t::object: + return write_bson_object_entry(name, *j.m_value.object); + + case value_t::array: + return write_bson_array(name, *j.m_value.array); + + case value_t::binary: + return write_bson_binary(name, *j.m_value.binary); + + case value_t::boolean: + return write_bson_boolean(name, j.m_value.boolean); + + case value_t::number_float: + return write_bson_double(name, j.m_value.number_float); + + case value_t::number_integer: + return write_bson_integer(name, j.m_value.number_integer); + + case value_t::number_unsigned: + return write_bson_unsigned(name, j); + + case value_t::string: + return write_bson_string(name, *j.m_value.string); + + case value_t::null: + return write_bson_null(name); + + // LCOV_EXCL_START + default: + JSON_ASSERT(false); + return; + // LCOV_EXCL_STOP + } + } + + /*! + @brief Calculates the size of the BSON serialization of the given + JSON-object @a j. + @param[in] j JSON value to serialize + @pre j.type() == value_t::object + */ + static std::size_t calc_bson_object_size(const typename BasicJsonType::object_t& value) + { + std::size_t document_size = std::accumulate(value.begin(), value.end(), std::size_t(0), + [](size_t result, const typename BasicJsonType::object_t::value_type & el) + { + return result += calc_bson_element_size(el.first, el.second); + }); + + return sizeof(std::int32_t) + document_size + 1ul; + } + + /*! + @param[in] j JSON value to serialize + @pre j.type() == value_t::object + */ + void write_bson_object(const typename BasicJsonType::object_t& value) + { + write_number(static_cast(calc_bson_object_size(value))); + + for (const auto& el : value) + { + write_bson_element(el.first, el.second); + } + + oa->write_character(to_char_type(0x00)); + } + + ////////// + // CBOR // + ////////// + + static constexpr CharType get_cbor_float_prefix(float /*unused*/) + { + return to_char_type(0xFA); // Single-Precision Float + } + + static constexpr CharType get_cbor_float_prefix(double /*unused*/) + { + return to_char_type(0xFB); // Double-Precision Float + } + + ///////////// + // MsgPack // + ///////////// + + static constexpr CharType get_msgpack_float_prefix(float /*unused*/) + { + return to_char_type(0xCA); // float 32 + } + + static constexpr CharType get_msgpack_float_prefix(double /*unused*/) + { + return to_char_type(0xCB); // float 64 + } + + //////////// + // UBJSON // + //////////// + + // UBJSON: write number (floating point) + template::value, int>::type = 0> + void write_number_with_ubjson_prefix(const NumberType n, + const bool add_prefix) + { + if (add_prefix) + { + oa->write_character(get_ubjson_float_prefix(n)); + } + write_number(n); + } + + // UBJSON: write number (unsigned integer) + template::value, int>::type = 0> + void write_number_with_ubjson_prefix(const NumberType n, + const bool add_prefix) + { + if (n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('i')); // int8 + } + write_number(static_cast(n)); + } + else if (n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('U')); // uint8 + } + write_number(static_cast(n)); + } + else if (n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('I')); // int16 + } + write_number(static_cast(n)); + } + else if (n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('l')); // int32 + } + write_number(static_cast(n)); + } + else if (n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('L')); // int64 + } + write_number(static_cast(n)); + } + else + { + if (add_prefix) + { + oa->write_character(to_char_type('H')); // high-precision number + } + + const auto number = BasicJsonType(n).dump(); + write_number_with_ubjson_prefix(number.size(), true); + for (std::size_t i = 0; i < number.size(); ++i) + { + oa->write_character(to_char_type(static_cast(number[i]))); + } + } + } + + // UBJSON: write number (signed integer) + template < typename NumberType, typename std::enable_if < + std::is_signed::value&& + !std::is_floating_point::value, int >::type = 0 > + void write_number_with_ubjson_prefix(const NumberType n, + const bool add_prefix) + { + if ((std::numeric_limits::min)() <= n && n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('i')); // int8 + } + write_number(static_cast(n)); + } + else if (static_cast((std::numeric_limits::min)()) <= n && n <= static_cast((std::numeric_limits::max)())) + { + if (add_prefix) + { + oa->write_character(to_char_type('U')); // uint8 + } + write_number(static_cast(n)); + } + else if ((std::numeric_limits::min)() <= n && n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('I')); // int16 + } + write_number(static_cast(n)); + } + else if ((std::numeric_limits::min)() <= n && n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('l')); // int32 + } + write_number(static_cast(n)); + } + else if ((std::numeric_limits::min)() <= n && n <= (std::numeric_limits::max)()) + { + if (add_prefix) + { + oa->write_character(to_char_type('L')); // int64 + } + write_number(static_cast(n)); + } + // LCOV_EXCL_START + else + { + if (add_prefix) + { + oa->write_character(to_char_type('H')); // high-precision number + } + + const auto number = BasicJsonType(n).dump(); + write_number_with_ubjson_prefix(number.size(), true); + for (std::size_t i = 0; i < number.size(); ++i) + { + oa->write_character(to_char_type(static_cast(number[i]))); + } + } + // LCOV_EXCL_STOP + } + + /*! + @brief determine the type prefix of container values + */ + CharType ubjson_prefix(const BasicJsonType& j) const noexcept + { + switch (j.type()) + { + case value_t::null: + return 'Z'; + + case value_t::boolean: + return j.m_value.boolean ? 'T' : 'F'; + + case value_t::number_integer: + { + if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) + { + return 'i'; + } + if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) + { + return 'U'; + } + if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) + { + return 'I'; + } + if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) + { + return 'l'; + } + if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) + { + return 'L'; + } + // anything else is treated as high-precision number + return 'H'; // LCOV_EXCL_LINE + } + + case value_t::number_unsigned: + { + if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) + { + return 'i'; + } + if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) + { + return 'U'; + } + if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) + { + return 'I'; + } + if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) + { + return 'l'; + } + if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) + { + return 'L'; + } + // anything else is treated as high-precision number + return 'H'; // LCOV_EXCL_LINE + } + + case value_t::number_float: + return get_ubjson_float_prefix(j.m_value.number_float); + + case value_t::string: + return 'S'; + + case value_t::array: // fallthrough + case value_t::binary: + return '['; + + case value_t::object: + return '{'; + + default: // discarded values + return 'N'; + } + } + + static constexpr CharType get_ubjson_float_prefix(float /*unused*/) + { + return 'd'; // float 32 + } + + static constexpr CharType get_ubjson_float_prefix(double /*unused*/) + { + return 'D'; // float 64 + } + + /////////////////////// + // Utility functions // + /////////////////////// + + /* + @brief write a number to output input + @param[in] n number of type @a NumberType + @tparam NumberType the type of the number + @tparam OutputIsLittleEndian Set to true if output data is + required to be little endian + + @note This function needs to respect the system's endianess, because bytes + in CBOR, MessagePack, and UBJSON are stored in network order (big + endian) and therefore need reordering on little endian systems. + */ + template + void write_number(const NumberType n) + { + // step 1: write number to array of length NumberType + std::array vec; + std::memcpy(vec.data(), &n, sizeof(NumberType)); + + // step 2: write array to output (with possible reordering) + if (is_little_endian != OutputIsLittleEndian) + { + // reverse byte order prior to conversion if necessary + std::reverse(vec.begin(), vec.end()); + } + + oa->write_characters(vec.data(), sizeof(NumberType)); + } + + void write_compact_float(const number_float_t n, detail::input_format_t format) + { + if (static_cast(n) >= static_cast(std::numeric_limits::lowest()) && + static_cast(n) <= static_cast((std::numeric_limits::max)()) && + static_cast(static_cast(n)) == static_cast(n)) + { + oa->write_character(format == detail::input_format_t::cbor + ? get_cbor_float_prefix(static_cast(n)) + : get_msgpack_float_prefix(static_cast(n))); + write_number(static_cast(n)); + } + else + { + oa->write_character(format == detail::input_format_t::cbor + ? get_cbor_float_prefix(n) + : get_msgpack_float_prefix(n)); + write_number(n); + } + } + + public: + // The following to_char_type functions are implement the conversion + // between uint8_t and CharType. In case CharType is not unsigned, + // such a conversion is required to allow values greater than 128. + // See for a discussion. + template < typename C = CharType, + enable_if_t < std::is_signed::value && std::is_signed::value > * = nullptr > + static constexpr CharType to_char_type(std::uint8_t x) noexcept + { + return *reinterpret_cast(&x); + } + + template < typename C = CharType, + enable_if_t < std::is_signed::value && std::is_unsigned::value > * = nullptr > + static CharType to_char_type(std::uint8_t x) noexcept + { + static_assert(sizeof(std::uint8_t) == sizeof(CharType), "size of CharType must be equal to std::uint8_t"); + static_assert(std::is_trivial::value, "CharType must be trivial"); + CharType result; + std::memcpy(&result, &x, sizeof(x)); + return result; + } + + template::value>* = nullptr> + static constexpr CharType to_char_type(std::uint8_t x) noexcept + { + return x; + } + + template < typename InputCharType, typename C = CharType, + enable_if_t < + std::is_signed::value && + std::is_signed::value && + std::is_same::type>::value + > * = nullptr > + static constexpr CharType to_char_type(InputCharType x) noexcept + { + return x; + } + + private: + /// whether we can assume little endianess + const bool is_little_endian = little_endianess(); + + /// the output + output_adapter_t oa = nullptr; +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + + +#include // reverse, remove, fill, find, none_of +#include // array +#include // localeconv, lconv +#include // labs, isfinite, isnan, signbit +#include // size_t, ptrdiff_t +#include // uint8_t +#include // snprintf +#include // numeric_limits +#include // string, char_traits +#include // is_same +#include // move + +// #include + + +#include // array +#include // signbit, isfinite +#include // intN_t, uintN_t +#include // memcpy, memmove +#include // numeric_limits +#include // conditional + +// #include + + +namespace nlohmann +{ +namespace detail +{ + +/*! +@brief implements the Grisu2 algorithm for binary to decimal floating-point +conversion. + +This implementation is a slightly modified version of the reference +implementation which may be obtained from +http://florian.loitsch.com/publications (bench.tar.gz). + +The code is distributed under the MIT license, Copyright (c) 2009 Florian Loitsch. + +For a detailed description of the algorithm see: + +[1] Loitsch, "Printing Floating-Point Numbers Quickly and Accurately with + Integers", Proceedings of the ACM SIGPLAN 2010 Conference on Programming + Language Design and Implementation, PLDI 2010 +[2] Burger, Dybvig, "Printing Floating-Point Numbers Quickly and Accurately", + Proceedings of the ACM SIGPLAN 1996 Conference on Programming Language + Design and Implementation, PLDI 1996 +*/ +namespace dtoa_impl +{ + +template +Target reinterpret_bits(const Source source) +{ + static_assert(sizeof(Target) == sizeof(Source), "size mismatch"); + + Target target; + std::memcpy(&target, &source, sizeof(Source)); + return target; +} + +struct diyfp // f * 2^e +{ + static constexpr int kPrecision = 64; // = q + + std::uint64_t f = 0; + int e = 0; + + constexpr diyfp(std::uint64_t f_, int e_) noexcept : f(f_), e(e_) {} + + /*! + @brief returns x - y + @pre x.e == y.e and x.f >= y.f + */ + static diyfp sub(const diyfp& x, const diyfp& y) noexcept + { + JSON_ASSERT(x.e == y.e); + JSON_ASSERT(x.f >= y.f); + + return {x.f - y.f, x.e}; + } + + /*! + @brief returns x * y + @note The result is rounded. (Only the upper q bits are returned.) + */ + static diyfp mul(const diyfp& x, const diyfp& y) noexcept + { + static_assert(kPrecision == 64, "internal error"); + + // Computes: + // f = round((x.f * y.f) / 2^q) + // e = x.e + y.e + q + + // Emulate the 64-bit * 64-bit multiplication: + // + // p = u * v + // = (u_lo + 2^32 u_hi) (v_lo + 2^32 v_hi) + // = (u_lo v_lo ) + 2^32 ((u_lo v_hi ) + (u_hi v_lo )) + 2^64 (u_hi v_hi ) + // = (p0 ) + 2^32 ((p1 ) + (p2 )) + 2^64 (p3 ) + // = (p0_lo + 2^32 p0_hi) + 2^32 ((p1_lo + 2^32 p1_hi) + (p2_lo + 2^32 p2_hi)) + 2^64 (p3 ) + // = (p0_lo ) + 2^32 (p0_hi + p1_lo + p2_lo ) + 2^64 (p1_hi + p2_hi + p3) + // = (p0_lo ) + 2^32 (Q ) + 2^64 (H ) + // = (p0_lo ) + 2^32 (Q_lo + 2^32 Q_hi ) + 2^64 (H ) + // + // (Since Q might be larger than 2^32 - 1) + // + // = (p0_lo + 2^32 Q_lo) + 2^64 (Q_hi + H) + // + // (Q_hi + H does not overflow a 64-bit int) + // + // = p_lo + 2^64 p_hi + + const std::uint64_t u_lo = x.f & 0xFFFFFFFFu; + const std::uint64_t u_hi = x.f >> 32u; + const std::uint64_t v_lo = y.f & 0xFFFFFFFFu; + const std::uint64_t v_hi = y.f >> 32u; + + const std::uint64_t p0 = u_lo * v_lo; + const std::uint64_t p1 = u_lo * v_hi; + const std::uint64_t p2 = u_hi * v_lo; + const std::uint64_t p3 = u_hi * v_hi; + + const std::uint64_t p0_hi = p0 >> 32u; + const std::uint64_t p1_lo = p1 & 0xFFFFFFFFu; + const std::uint64_t p1_hi = p1 >> 32u; + const std::uint64_t p2_lo = p2 & 0xFFFFFFFFu; + const std::uint64_t p2_hi = p2 >> 32u; + + std::uint64_t Q = p0_hi + p1_lo + p2_lo; + + // The full product might now be computed as + // + // p_hi = p3 + p2_hi + p1_hi + (Q >> 32) + // p_lo = p0_lo + (Q << 32) + // + // But in this particular case here, the full p_lo is not required. + // Effectively we only need to add the highest bit in p_lo to p_hi (and + // Q_hi + 1 does not overflow). + + Q += std::uint64_t{1} << (64u - 32u - 1u); // round, ties up + + const std::uint64_t h = p3 + p2_hi + p1_hi + (Q >> 32u); + + return {h, x.e + y.e + 64}; + } + + /*! + @brief normalize x such that the significand is >= 2^(q-1) + @pre x.f != 0 + */ + static diyfp normalize(diyfp x) noexcept + { + JSON_ASSERT(x.f != 0); + + while ((x.f >> 63u) == 0) + { + x.f <<= 1u; + x.e--; + } + + return x; + } + + /*! + @brief normalize x such that the result has the exponent E + @pre e >= x.e and the upper e - x.e bits of x.f must be zero. + */ + static diyfp normalize_to(const diyfp& x, const int target_exponent) noexcept + { + const int delta = x.e - target_exponent; + + JSON_ASSERT(delta >= 0); + JSON_ASSERT(((x.f << delta) >> delta) == x.f); + + return {x.f << delta, target_exponent}; + } +}; + +struct boundaries +{ + diyfp w; + diyfp minus; + diyfp plus; +}; + +/*! +Compute the (normalized) diyfp representing the input number 'value' and its +boundaries. + +@pre value must be finite and positive +*/ +template +boundaries compute_boundaries(FloatType value) +{ + JSON_ASSERT(std::isfinite(value)); + JSON_ASSERT(value > 0); + + // Convert the IEEE representation into a diyfp. + // + // If v is denormal: + // value = 0.F * 2^(1 - bias) = ( F) * 2^(1 - bias - (p-1)) + // If v is normalized: + // value = 1.F * 2^(E - bias) = (2^(p-1) + F) * 2^(E - bias - (p-1)) + + static_assert(std::numeric_limits::is_iec559, + "internal error: dtoa_short requires an IEEE-754 floating-point implementation"); + + constexpr int kPrecision = std::numeric_limits::digits; // = p (includes the hidden bit) + constexpr int kBias = std::numeric_limits::max_exponent - 1 + (kPrecision - 1); + constexpr int kMinExp = 1 - kBias; + constexpr std::uint64_t kHiddenBit = std::uint64_t{1} << (kPrecision - 1); // = 2^(p-1) + + using bits_type = typename std::conditional::type; + + const std::uint64_t bits = reinterpret_bits(value); + const std::uint64_t E = bits >> (kPrecision - 1); + const std::uint64_t F = bits & (kHiddenBit - 1); + + const bool is_denormal = E == 0; + const diyfp v = is_denormal + ? diyfp(F, kMinExp) + : diyfp(F + kHiddenBit, static_cast(E) - kBias); + + // Compute the boundaries m- and m+ of the floating-point value + // v = f * 2^e. + // + // Determine v- and v+, the floating-point predecessor and successor if v, + // respectively. + // + // v- = v - 2^e if f != 2^(p-1) or e == e_min (A) + // = v - 2^(e-1) if f == 2^(p-1) and e > e_min (B) + // + // v+ = v + 2^e + // + // Let m- = (v- + v) / 2 and m+ = (v + v+) / 2. All real numbers _strictly_ + // between m- and m+ round to v, regardless of how the input rounding + // algorithm breaks ties. + // + // ---+-------------+-------------+-------------+-------------+--- (A) + // v- m- v m+ v+ + // + // -----------------+------+------+-------------+-------------+--- (B) + // v- m- v m+ v+ + + const bool lower_boundary_is_closer = F == 0 && E > 1; + const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1); + const diyfp m_minus = lower_boundary_is_closer + ? diyfp(4 * v.f - 1, v.e - 2) // (B) + : diyfp(2 * v.f - 1, v.e - 1); // (A) + + // Determine the normalized w+ = m+. + const diyfp w_plus = diyfp::normalize(m_plus); + + // Determine w- = m- such that e_(w-) = e_(w+). + const diyfp w_minus = diyfp::normalize_to(m_minus, w_plus.e); + + return {diyfp::normalize(v), w_minus, w_plus}; +} + +// Given normalized diyfp w, Grisu needs to find a (normalized) cached +// power-of-ten c, such that the exponent of the product c * w = f * 2^e lies +// within a certain range [alpha, gamma] (Definition 3.2 from [1]) +// +// alpha <= e = e_c + e_w + q <= gamma +// +// or +// +// f_c * f_w * 2^alpha <= f_c 2^(e_c) * f_w 2^(e_w) * 2^q +// <= f_c * f_w * 2^gamma +// +// Since c and w are normalized, i.e. 2^(q-1) <= f < 2^q, this implies +// +// 2^(q-1) * 2^(q-1) * 2^alpha <= c * w * 2^q < 2^q * 2^q * 2^gamma +// +// or +// +// 2^(q - 2 + alpha) <= c * w < 2^(q + gamma) +// +// The choice of (alpha,gamma) determines the size of the table and the form of +// the digit generation procedure. Using (alpha,gamma)=(-60,-32) works out well +// in practice: +// +// The idea is to cut the number c * w = f * 2^e into two parts, which can be +// processed independently: An integral part p1, and a fractional part p2: +// +// f * 2^e = ( (f div 2^-e) * 2^-e + (f mod 2^-e) ) * 2^e +// = (f div 2^-e) + (f mod 2^-e) * 2^e +// = p1 + p2 * 2^e +// +// The conversion of p1 into decimal form requires a series of divisions and +// modulos by (a power of) 10. These operations are faster for 32-bit than for +// 64-bit integers, so p1 should ideally fit into a 32-bit integer. This can be +// achieved by choosing +// +// -e >= 32 or e <= -32 := gamma +// +// In order to convert the fractional part +// +// p2 * 2^e = p2 / 2^-e = d[-1] / 10^1 + d[-2] / 10^2 + ... +// +// into decimal form, the fraction is repeatedly multiplied by 10 and the digits +// d[-i] are extracted in order: +// +// (10 * p2) div 2^-e = d[-1] +// (10 * p2) mod 2^-e = d[-2] / 10^1 + ... +// +// The multiplication by 10 must not overflow. It is sufficient to choose +// +// 10 * p2 < 16 * p2 = 2^4 * p2 <= 2^64. +// +// Since p2 = f mod 2^-e < 2^-e, +// +// -e <= 60 or e >= -60 := alpha + +constexpr int kAlpha = -60; +constexpr int kGamma = -32; + +struct cached_power // c = f * 2^e ~= 10^k +{ + std::uint64_t f; + int e; + int k; +}; + +/*! +For a normalized diyfp w = f * 2^e, this function returns a (normalized) cached +power-of-ten c = f_c * 2^e_c, such that the exponent of the product w * c +satisfies (Definition 3.2 from [1]) + + alpha <= e_c + e + q <= gamma. +*/ +inline cached_power get_cached_power_for_binary_exponent(int e) +{ + // Now + // + // alpha <= e_c + e + q <= gamma (1) + // ==> f_c * 2^alpha <= c * 2^e * 2^q + // + // and since the c's are normalized, 2^(q-1) <= f_c, + // + // ==> 2^(q - 1 + alpha) <= c * 2^(e + q) + // ==> 2^(alpha - e - 1) <= c + // + // If c were an exact power of ten, i.e. c = 10^k, one may determine k as + // + // k = ceil( log_10( 2^(alpha - e - 1) ) ) + // = ceil( (alpha - e - 1) * log_10(2) ) + // + // From the paper: + // "In theory the result of the procedure could be wrong since c is rounded, + // and the computation itself is approximated [...]. In practice, however, + // this simple function is sufficient." + // + // For IEEE double precision floating-point numbers converted into + // normalized diyfp's w = f * 2^e, with q = 64, + // + // e >= -1022 (min IEEE exponent) + // -52 (p - 1) + // -52 (p - 1, possibly normalize denormal IEEE numbers) + // -11 (normalize the diyfp) + // = -1137 + // + // and + // + // e <= +1023 (max IEEE exponent) + // -52 (p - 1) + // -11 (normalize the diyfp) + // = 960 + // + // This binary exponent range [-1137,960] results in a decimal exponent + // range [-307,324]. One does not need to store a cached power for each + // k in this range. For each such k it suffices to find a cached power + // such that the exponent of the product lies in [alpha,gamma]. + // This implies that the difference of the decimal exponents of adjacent + // table entries must be less than or equal to + // + // floor( (gamma - alpha) * log_10(2) ) = 8. + // + // (A smaller distance gamma-alpha would require a larger table.) + + // NB: + // Actually this function returns c, such that -60 <= e_c + e + 64 <= -34. + + constexpr int kCachedPowersMinDecExp = -300; + constexpr int kCachedPowersDecStep = 8; + + static constexpr std::array kCachedPowers = + { + { + { 0xAB70FE17C79AC6CA, -1060, -300 }, + { 0xFF77B1FCBEBCDC4F, -1034, -292 }, + { 0xBE5691EF416BD60C, -1007, -284 }, + { 0x8DD01FAD907FFC3C, -980, -276 }, + { 0xD3515C2831559A83, -954, -268 }, + { 0x9D71AC8FADA6C9B5, -927, -260 }, + { 0xEA9C227723EE8BCB, -901, -252 }, + { 0xAECC49914078536D, -874, -244 }, + { 0x823C12795DB6CE57, -847, -236 }, + { 0xC21094364DFB5637, -821, -228 }, + { 0x9096EA6F3848984F, -794, -220 }, + { 0xD77485CB25823AC7, -768, -212 }, + { 0xA086CFCD97BF97F4, -741, -204 }, + { 0xEF340A98172AACE5, -715, -196 }, + { 0xB23867FB2A35B28E, -688, -188 }, + { 0x84C8D4DFD2C63F3B, -661, -180 }, + { 0xC5DD44271AD3CDBA, -635, -172 }, + { 0x936B9FCEBB25C996, -608, -164 }, + { 0xDBAC6C247D62A584, -582, -156 }, + { 0xA3AB66580D5FDAF6, -555, -148 }, + { 0xF3E2F893DEC3F126, -529, -140 }, + { 0xB5B5ADA8AAFF80B8, -502, -132 }, + { 0x87625F056C7C4A8B, -475, -124 }, + { 0xC9BCFF6034C13053, -449, -116 }, + { 0x964E858C91BA2655, -422, -108 }, + { 0xDFF9772470297EBD, -396, -100 }, + { 0xA6DFBD9FB8E5B88F, -369, -92 }, + { 0xF8A95FCF88747D94, -343, -84 }, + { 0xB94470938FA89BCF, -316, -76 }, + { 0x8A08F0F8BF0F156B, -289, -68 }, + { 0xCDB02555653131B6, -263, -60 }, + { 0x993FE2C6D07B7FAC, -236, -52 }, + { 0xE45C10C42A2B3B06, -210, -44 }, + { 0xAA242499697392D3, -183, -36 }, + { 0xFD87B5F28300CA0E, -157, -28 }, + { 0xBCE5086492111AEB, -130, -20 }, + { 0x8CBCCC096F5088CC, -103, -12 }, + { 0xD1B71758E219652C, -77, -4 }, + { 0x9C40000000000000, -50, 4 }, + { 0xE8D4A51000000000, -24, 12 }, + { 0xAD78EBC5AC620000, 3, 20 }, + { 0x813F3978F8940984, 30, 28 }, + { 0xC097CE7BC90715B3, 56, 36 }, + { 0x8F7E32CE7BEA5C70, 83, 44 }, + { 0xD5D238A4ABE98068, 109, 52 }, + { 0x9F4F2726179A2245, 136, 60 }, + { 0xED63A231D4C4FB27, 162, 68 }, + { 0xB0DE65388CC8ADA8, 189, 76 }, + { 0x83C7088E1AAB65DB, 216, 84 }, + { 0xC45D1DF942711D9A, 242, 92 }, + { 0x924D692CA61BE758, 269, 100 }, + { 0xDA01EE641A708DEA, 295, 108 }, + { 0xA26DA3999AEF774A, 322, 116 }, + { 0xF209787BB47D6B85, 348, 124 }, + { 0xB454E4A179DD1877, 375, 132 }, + { 0x865B86925B9BC5C2, 402, 140 }, + { 0xC83553C5C8965D3D, 428, 148 }, + { 0x952AB45CFA97A0B3, 455, 156 }, + { 0xDE469FBD99A05FE3, 481, 164 }, + { 0xA59BC234DB398C25, 508, 172 }, + { 0xF6C69A72A3989F5C, 534, 180 }, + { 0xB7DCBF5354E9BECE, 561, 188 }, + { 0x88FCF317F22241E2, 588, 196 }, + { 0xCC20CE9BD35C78A5, 614, 204 }, + { 0x98165AF37B2153DF, 641, 212 }, + { 0xE2A0B5DC971F303A, 667, 220 }, + { 0xA8D9D1535CE3B396, 694, 228 }, + { 0xFB9B7CD9A4A7443C, 720, 236 }, + { 0xBB764C4CA7A44410, 747, 244 }, + { 0x8BAB8EEFB6409C1A, 774, 252 }, + { 0xD01FEF10A657842C, 800, 260 }, + { 0x9B10A4E5E9913129, 827, 268 }, + { 0xE7109BFBA19C0C9D, 853, 276 }, + { 0xAC2820D9623BF429, 880, 284 }, + { 0x80444B5E7AA7CF85, 907, 292 }, + { 0xBF21E44003ACDD2D, 933, 300 }, + { 0x8E679C2F5E44FF8F, 960, 308 }, + { 0xD433179D9C8CB841, 986, 316 }, + { 0x9E19DB92B4E31BA9, 1013, 324 }, + } + }; + + // This computation gives exactly the same results for k as + // k = ceil((kAlpha - e - 1) * 0.30102999566398114) + // for |e| <= 1500, but doesn't require floating-point operations. + // NB: log_10(2) ~= 78913 / 2^18 + JSON_ASSERT(e >= -1500); + JSON_ASSERT(e <= 1500); + const int f = kAlpha - e - 1; + const int k = (f * 78913) / (1 << 18) + static_cast(f > 0); + + const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep; + JSON_ASSERT(index >= 0); + JSON_ASSERT(static_cast(index) < kCachedPowers.size()); + + const cached_power cached = kCachedPowers[static_cast(index)]; + JSON_ASSERT(kAlpha <= cached.e + e + 64); + JSON_ASSERT(kGamma >= cached.e + e + 64); + + return cached; +} + +/*! +For n != 0, returns k, such that pow10 := 10^(k-1) <= n < 10^k. +For n == 0, returns 1 and sets pow10 := 1. +*/ +inline int find_largest_pow10(const std::uint32_t n, std::uint32_t& pow10) +{ + // LCOV_EXCL_START + if (n >= 1000000000) + { + pow10 = 1000000000; + return 10; + } + // LCOV_EXCL_STOP + if (n >= 100000000) + { + pow10 = 100000000; + return 9; + } + if (n >= 10000000) + { + pow10 = 10000000; + return 8; + } + if (n >= 1000000) + { + pow10 = 1000000; + return 7; + } + if (n >= 100000) + { + pow10 = 100000; + return 6; + } + if (n >= 10000) + { + pow10 = 10000; + return 5; + } + if (n >= 1000) + { + pow10 = 1000; + return 4; + } + if (n >= 100) + { + pow10 = 100; + return 3; + } + if (n >= 10) + { + pow10 = 10; + return 2; + } + + pow10 = 1; + return 1; +} + +inline void grisu2_round(char* buf, int len, std::uint64_t dist, std::uint64_t delta, + std::uint64_t rest, std::uint64_t ten_k) +{ + JSON_ASSERT(len >= 1); + JSON_ASSERT(dist <= delta); + JSON_ASSERT(rest <= delta); + JSON_ASSERT(ten_k > 0); + + // <--------------------------- delta ----> + // <---- dist ---------> + // --------------[------------------+-------------------]-------------- + // M- w M+ + // + // ten_k + // <------> + // <---- rest ----> + // --------------[------------------+----+--------------]-------------- + // w V + // = buf * 10^k + // + // ten_k represents a unit-in-the-last-place in the decimal representation + // stored in buf. + // Decrement buf by ten_k while this takes buf closer to w. + + // The tests are written in this order to avoid overflow in unsigned + // integer arithmetic. + + while (rest < dist + && delta - rest >= ten_k + && (rest + ten_k < dist || dist - rest > rest + ten_k - dist)) + { + JSON_ASSERT(buf[len - 1] != '0'); + buf[len - 1]--; + rest += ten_k; + } +} + +/*! +Generates V = buffer * 10^decimal_exponent, such that M- <= V <= M+. +M- and M+ must be normalized and share the same exponent -60 <= e <= -32. +*/ +inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent, + diyfp M_minus, diyfp w, diyfp M_plus) +{ + static_assert(kAlpha >= -60, "internal error"); + static_assert(kGamma <= -32, "internal error"); + + // Generates the digits (and the exponent) of a decimal floating-point + // number V = buffer * 10^decimal_exponent in the range [M-, M+]. The diyfp's + // w, M- and M+ share the same exponent e, which satisfies alpha <= e <= gamma. + // + // <--------------------------- delta ----> + // <---- dist ---------> + // --------------[------------------+-------------------]-------------- + // M- w M+ + // + // Grisu2 generates the digits of M+ from left to right and stops as soon as + // V is in [M-,M+]. + + JSON_ASSERT(M_plus.e >= kAlpha); + JSON_ASSERT(M_plus.e <= kGamma); + + std::uint64_t delta = diyfp::sub(M_plus, M_minus).f; // (significand of (M+ - M-), implicit exponent is e) + std::uint64_t dist = diyfp::sub(M_plus, w ).f; // (significand of (M+ - w ), implicit exponent is e) + + // Split M+ = f * 2^e into two parts p1 and p2 (note: e < 0): + // + // M+ = f * 2^e + // = ((f div 2^-e) * 2^-e + (f mod 2^-e)) * 2^e + // = ((p1 ) * 2^-e + (p2 )) * 2^e + // = p1 + p2 * 2^e + + const diyfp one(std::uint64_t{1} << -M_plus.e, M_plus.e); + + auto p1 = static_cast(M_plus.f >> -one.e); // p1 = f div 2^-e (Since -e >= 32, p1 fits into a 32-bit int.) + std::uint64_t p2 = M_plus.f & (one.f - 1); // p2 = f mod 2^-e + + // 1) + // + // Generate the digits of the integral part p1 = d[n-1]...d[1]d[0] + + JSON_ASSERT(p1 > 0); + + std::uint32_t pow10; + const int k = find_largest_pow10(p1, pow10); + + // 10^(k-1) <= p1 < 10^k, pow10 = 10^(k-1) + // + // p1 = (p1 div 10^(k-1)) * 10^(k-1) + (p1 mod 10^(k-1)) + // = (d[k-1] ) * 10^(k-1) + (p1 mod 10^(k-1)) + // + // M+ = p1 + p2 * 2^e + // = d[k-1] * 10^(k-1) + (p1 mod 10^(k-1)) + p2 * 2^e + // = d[k-1] * 10^(k-1) + ((p1 mod 10^(k-1)) * 2^-e + p2) * 2^e + // = d[k-1] * 10^(k-1) + ( rest) * 2^e + // + // Now generate the digits d[n] of p1 from left to right (n = k-1,...,0) + // + // p1 = d[k-1]...d[n] * 10^n + d[n-1]...d[0] + // + // but stop as soon as + // + // rest * 2^e = (d[n-1]...d[0] * 2^-e + p2) * 2^e <= delta * 2^e + + int n = k; + while (n > 0) + { + // Invariants: + // M+ = buffer * 10^n + (p1 + p2 * 2^e) (buffer = 0 for n = k) + // pow10 = 10^(n-1) <= p1 < 10^n + // + const std::uint32_t d = p1 / pow10; // d = p1 div 10^(n-1) + const std::uint32_t r = p1 % pow10; // r = p1 mod 10^(n-1) + // + // M+ = buffer * 10^n + (d * 10^(n-1) + r) + p2 * 2^e + // = (buffer * 10 + d) * 10^(n-1) + (r + p2 * 2^e) + // + JSON_ASSERT(d <= 9); + buffer[length++] = static_cast('0' + d); // buffer := buffer * 10 + d + // + // M+ = buffer * 10^(n-1) + (r + p2 * 2^e) + // + p1 = r; + n--; + // + // M+ = buffer * 10^n + (p1 + p2 * 2^e) + // pow10 = 10^n + // + + // Now check if enough digits have been generated. + // Compute + // + // p1 + p2 * 2^e = (p1 * 2^-e + p2) * 2^e = rest * 2^e + // + // Note: + // Since rest and delta share the same exponent e, it suffices to + // compare the significands. + const std::uint64_t rest = (std::uint64_t{p1} << -one.e) + p2; + if (rest <= delta) + { + // V = buffer * 10^n, with M- <= V <= M+. + + decimal_exponent += n; + + // We may now just stop. But instead look if the buffer could be + // decremented to bring V closer to w. + // + // pow10 = 10^n is now 1 ulp in the decimal representation V. + // The rounding procedure works with diyfp's with an implicit + // exponent of e. + // + // 10^n = (10^n * 2^-e) * 2^e = ulp * 2^e + // + const std::uint64_t ten_n = std::uint64_t{pow10} << -one.e; + grisu2_round(buffer, length, dist, delta, rest, ten_n); + + return; + } + + pow10 /= 10; + // + // pow10 = 10^(n-1) <= p1 < 10^n + // Invariants restored. + } + + // 2) + // + // The digits of the integral part have been generated: + // + // M+ = d[k-1]...d[1]d[0] + p2 * 2^e + // = buffer + p2 * 2^e + // + // Now generate the digits of the fractional part p2 * 2^e. + // + // Note: + // No decimal point is generated: the exponent is adjusted instead. + // + // p2 actually represents the fraction + // + // p2 * 2^e + // = p2 / 2^-e + // = d[-1] / 10^1 + d[-2] / 10^2 + ... + // + // Now generate the digits d[-m] of p1 from left to right (m = 1,2,...) + // + // p2 * 2^e = d[-1]d[-2]...d[-m] * 10^-m + // + 10^-m * (d[-m-1] / 10^1 + d[-m-2] / 10^2 + ...) + // + // using + // + // 10^m * p2 = ((10^m * p2) div 2^-e) * 2^-e + ((10^m * p2) mod 2^-e) + // = ( d) * 2^-e + ( r) + // + // or + // 10^m * p2 * 2^e = d + r * 2^e + // + // i.e. + // + // M+ = buffer + p2 * 2^e + // = buffer + 10^-m * (d + r * 2^e) + // = (buffer * 10^m + d) * 10^-m + 10^-m * r * 2^e + // + // and stop as soon as 10^-m * r * 2^e <= delta * 2^e + + JSON_ASSERT(p2 > delta); + + int m = 0; + for (;;) + { + // Invariant: + // M+ = buffer * 10^-m + 10^-m * (d[-m-1] / 10 + d[-m-2] / 10^2 + ...) * 2^e + // = buffer * 10^-m + 10^-m * (p2 ) * 2^e + // = buffer * 10^-m + 10^-m * (1/10 * (10 * p2) ) * 2^e + // = buffer * 10^-m + 10^-m * (1/10 * ((10*p2 div 2^-e) * 2^-e + (10*p2 mod 2^-e)) * 2^e + // + JSON_ASSERT(p2 <= (std::numeric_limits::max)() / 10); + p2 *= 10; + const std::uint64_t d = p2 >> -one.e; // d = (10 * p2) div 2^-e + const std::uint64_t r = p2 & (one.f - 1); // r = (10 * p2) mod 2^-e + // + // M+ = buffer * 10^-m + 10^-m * (1/10 * (d * 2^-e + r) * 2^e + // = buffer * 10^-m + 10^-m * (1/10 * (d + r * 2^e)) + // = (buffer * 10 + d) * 10^(-m-1) + 10^(-m-1) * r * 2^e + // + JSON_ASSERT(d <= 9); + buffer[length++] = static_cast('0' + d); // buffer := buffer * 10 + d + // + // M+ = buffer * 10^(-m-1) + 10^(-m-1) * r * 2^e + // + p2 = r; + m++; + // + // M+ = buffer * 10^-m + 10^-m * p2 * 2^e + // Invariant restored. + + // Check if enough digits have been generated. + // + // 10^-m * p2 * 2^e <= delta * 2^e + // p2 * 2^e <= 10^m * delta * 2^e + // p2 <= 10^m * delta + delta *= 10; + dist *= 10; + if (p2 <= delta) + { + break; + } + } + + // V = buffer * 10^-m, with M- <= V <= M+. + + decimal_exponent -= m; + + // 1 ulp in the decimal representation is now 10^-m. + // Since delta and dist are now scaled by 10^m, we need to do the + // same with ulp in order to keep the units in sync. + // + // 10^m * 10^-m = 1 = 2^-e * 2^e = ten_m * 2^e + // + const std::uint64_t ten_m = one.f; + grisu2_round(buffer, length, dist, delta, p2, ten_m); + + // By construction this algorithm generates the shortest possible decimal + // number (Loitsch, Theorem 6.2) which rounds back to w. + // For an input number of precision p, at least + // + // N = 1 + ceil(p * log_10(2)) + // + // decimal digits are sufficient to identify all binary floating-point + // numbers (Matula, "In-and-Out conversions"). + // This implies that the algorithm does not produce more than N decimal + // digits. + // + // N = 17 for p = 53 (IEEE double precision) + // N = 9 for p = 24 (IEEE single precision) +} + +/*! +v = buf * 10^decimal_exponent +len is the length of the buffer (number of decimal digits) +The buffer must be large enough, i.e. >= max_digits10. +*/ +JSON_HEDLEY_NON_NULL(1) +inline void grisu2(char* buf, int& len, int& decimal_exponent, + diyfp m_minus, diyfp v, diyfp m_plus) +{ + JSON_ASSERT(m_plus.e == m_minus.e); + JSON_ASSERT(m_plus.e == v.e); + + // --------(-----------------------+-----------------------)-------- (A) + // m- v m+ + // + // --------------------(-----------+-----------------------)-------- (B) + // m- v m+ + // + // First scale v (and m- and m+) such that the exponent is in the range + // [alpha, gamma]. + + const cached_power cached = get_cached_power_for_binary_exponent(m_plus.e); + + const diyfp c_minus_k(cached.f, cached.e); // = c ~= 10^-k + + // The exponent of the products is = v.e + c_minus_k.e + q and is in the range [alpha,gamma] + const diyfp w = diyfp::mul(v, c_minus_k); + const diyfp w_minus = diyfp::mul(m_minus, c_minus_k); + const diyfp w_plus = diyfp::mul(m_plus, c_minus_k); + + // ----(---+---)---------------(---+---)---------------(---+---)---- + // w- w w+ + // = c*m- = c*v = c*m+ + // + // diyfp::mul rounds its result and c_minus_k is approximated too. w, w- and + // w+ are now off by a small amount. + // In fact: + // + // w - v * 10^k < 1 ulp + // + // To account for this inaccuracy, add resp. subtract 1 ulp. + // + // --------+---[---------------(---+---)---------------]---+-------- + // w- M- w M+ w+ + // + // Now any number in [M-, M+] (bounds included) will round to w when input, + // regardless of how the input rounding algorithm breaks ties. + // + // And digit_gen generates the shortest possible such number in [M-, M+]. + // Note that this does not mean that Grisu2 always generates the shortest + // possible number in the interval (m-, m+). + const diyfp M_minus(w_minus.f + 1, w_minus.e); + const diyfp M_plus (w_plus.f - 1, w_plus.e ); + + decimal_exponent = -cached.k; // = -(-k) = k + + grisu2_digit_gen(buf, len, decimal_exponent, M_minus, w, M_plus); +} + +/*! +v = buf * 10^decimal_exponent +len is the length of the buffer (number of decimal digits) +The buffer must be large enough, i.e. >= max_digits10. +*/ +template +JSON_HEDLEY_NON_NULL(1) +void grisu2(char* buf, int& len, int& decimal_exponent, FloatType value) +{ + static_assert(diyfp::kPrecision >= std::numeric_limits::digits + 3, + "internal error: not enough precision"); + + JSON_ASSERT(std::isfinite(value)); + JSON_ASSERT(value > 0); + + // If the neighbors (and boundaries) of 'value' are always computed for double-precision + // numbers, all float's can be recovered using strtod (and strtof). However, the resulting + // decimal representations are not exactly "short". + // + // The documentation for 'std::to_chars' (https://en.cppreference.com/w/cpp/utility/to_chars) + // says "value is converted to a string as if by std::sprintf in the default ("C") locale" + // and since sprintf promotes float's to double's, I think this is exactly what 'std::to_chars' + // does. + // On the other hand, the documentation for 'std::to_chars' requires that "parsing the + // representation using the corresponding std::from_chars function recovers value exactly". That + // indicates that single precision floating-point numbers should be recovered using + // 'std::strtof'. + // + // NB: If the neighbors are computed for single-precision numbers, there is a single float + // (7.0385307e-26f) which can't be recovered using strtod. The resulting double precision + // value is off by 1 ulp. +#if 0 + const boundaries w = compute_boundaries(static_cast(value)); +#else + const boundaries w = compute_boundaries(value); +#endif + + grisu2(buf, len, decimal_exponent, w.minus, w.w, w.plus); +} + +/*! +@brief appends a decimal representation of e to buf +@return a pointer to the element following the exponent. +@pre -1000 < e < 1000 +*/ +JSON_HEDLEY_NON_NULL(1) +JSON_HEDLEY_RETURNS_NON_NULL +inline char* append_exponent(char* buf, int e) +{ + JSON_ASSERT(e > -1000); + JSON_ASSERT(e < 1000); + + if (e < 0) + { + e = -e; + *buf++ = '-'; + } + else + { + *buf++ = '+'; + } + + auto k = static_cast(e); + if (k < 10) + { + // Always print at least two digits in the exponent. + // This is for compatibility with printf("%g"). + *buf++ = '0'; + *buf++ = static_cast('0' + k); + } + else if (k < 100) + { + *buf++ = static_cast('0' + k / 10); + k %= 10; + *buf++ = static_cast('0' + k); + } + else + { + *buf++ = static_cast('0' + k / 100); + k %= 100; + *buf++ = static_cast('0' + k / 10); + k %= 10; + *buf++ = static_cast('0' + k); + } + + return buf; +} + +/*! +@brief prettify v = buf * 10^decimal_exponent + +If v is in the range [10^min_exp, 10^max_exp) it will be printed in fixed-point +notation. Otherwise it will be printed in exponential notation. + +@pre min_exp < 0 +@pre max_exp > 0 +*/ +JSON_HEDLEY_NON_NULL(1) +JSON_HEDLEY_RETURNS_NON_NULL +inline char* format_buffer(char* buf, int len, int decimal_exponent, + int min_exp, int max_exp) +{ + JSON_ASSERT(min_exp < 0); + JSON_ASSERT(max_exp > 0); + + const int k = len; + const int n = len + decimal_exponent; + + // v = buf * 10^(n-k) + // k is the length of the buffer (number of decimal digits) + // n is the position of the decimal point relative to the start of the buffer. + + if (k <= n && n <= max_exp) + { + // digits[000] + // len <= max_exp + 2 + + std::memset(buf + k, '0', static_cast(n) - static_cast(k)); + // Make it look like a floating-point number (#362, #378) + buf[n + 0] = '.'; + buf[n + 1] = '0'; + return buf + (static_cast(n) + 2); + } + + if (0 < n && n <= max_exp) + { + // dig.its + // len <= max_digits10 + 1 + + JSON_ASSERT(k > n); + + std::memmove(buf + (static_cast(n) + 1), buf + n, static_cast(k) - static_cast(n)); + buf[n] = '.'; + return buf + (static_cast(k) + 1U); + } + + if (min_exp < n && n <= 0) + { + // 0.[000]digits + // len <= 2 + (-min_exp - 1) + max_digits10 + + std::memmove(buf + (2 + static_cast(-n)), buf, static_cast(k)); + buf[0] = '0'; + buf[1] = '.'; + std::memset(buf + 2, '0', static_cast(-n)); + return buf + (2U + static_cast(-n) + static_cast(k)); + } + + if (k == 1) + { + // dE+123 + // len <= 1 + 5 + + buf += 1; + } + else + { + // d.igitsE+123 + // len <= max_digits10 + 1 + 5 + + std::memmove(buf + 2, buf + 1, static_cast(k) - 1); + buf[1] = '.'; + buf += 1 + static_cast(k); + } + + *buf++ = 'e'; + return append_exponent(buf, n - 1); +} + +} // namespace dtoa_impl + +/*! +@brief generates a decimal representation of the floating-point number value in [first, last). + +The format of the resulting decimal representation is similar to printf's %g +format. Returns an iterator pointing past-the-end of the decimal representation. + +@note The input number must be finite, i.e. NaN's and Inf's are not supported. +@note The buffer must be large enough. +@note The result is NOT null-terminated. +*/ +template +JSON_HEDLEY_NON_NULL(1, 2) +JSON_HEDLEY_RETURNS_NON_NULL +char* to_chars(char* first, const char* last, FloatType value) +{ + static_cast(last); // maybe unused - fix warning + JSON_ASSERT(std::isfinite(value)); + + // Use signbit(value) instead of (value < 0) since signbit works for -0. + if (std::signbit(value)) + { + value = -value; + *first++ = '-'; + } + + if (value == 0) // +-0 + { + *first++ = '0'; + // Make it look like a floating-point number (#362, #378) + *first++ = '.'; + *first++ = '0'; + return first; + } + + JSON_ASSERT(last - first >= std::numeric_limits::max_digits10); + + // Compute v = buffer * 10^decimal_exponent. + // The decimal digits are stored in the buffer, which needs to be interpreted + // as an unsigned decimal integer. + // len is the length of the buffer, i.e. the number of decimal digits. + int len = 0; + int decimal_exponent = 0; + dtoa_impl::grisu2(first, len, decimal_exponent, value); + + JSON_ASSERT(len <= std::numeric_limits::max_digits10); + + // Format the buffer like printf("%.*g", prec, value) + constexpr int kMinExp = -4; + // Use digits10 here to increase compatibility with version 2. + constexpr int kMaxExp = std::numeric_limits::digits10; + + JSON_ASSERT(last - first >= kMaxExp + 2); + JSON_ASSERT(last - first >= 2 + (-kMinExp - 1) + std::numeric_limits::max_digits10); + JSON_ASSERT(last - first >= std::numeric_limits::max_digits10 + 6); + + return dtoa_impl::format_buffer(first, len, decimal_exponent, kMinExp, kMaxExp); +} + +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + +// #include + +// #include + +// #include + +// #include + + +namespace nlohmann +{ +namespace detail +{ +/////////////////// +// serialization // +/////////////////// + +/// how to treat decoding errors +enum class error_handler_t +{ + strict, ///< throw a type_error exception in case of invalid UTF-8 + replace, ///< replace invalid UTF-8 sequences with U+FFFD + ignore ///< ignore invalid UTF-8 sequences +}; + +template +class serializer +{ + using string_t = typename BasicJsonType::string_t; + using number_float_t = typename BasicJsonType::number_float_t; + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using binary_char_t = typename BasicJsonType::binary_t::value_type; + static constexpr std::uint8_t UTF8_ACCEPT = 0; + static constexpr std::uint8_t UTF8_REJECT = 1; + + public: + /*! + @param[in] s output stream to serialize to + @param[in] ichar indentation character to use + @param[in] error_handler_ how to react on decoding errors + */ + serializer(output_adapter_t s, const char ichar, + error_handler_t error_handler_ = error_handler_t::strict) + : o(std::move(s)) + , loc(std::localeconv()) + , thousands_sep(loc->thousands_sep == nullptr ? '\0' : std::char_traits::to_char_type(* (loc->thousands_sep))) + , decimal_point(loc->decimal_point == nullptr ? '\0' : std::char_traits::to_char_type(* (loc->decimal_point))) + , indent_char(ichar) + , indent_string(512, indent_char) + , error_handler(error_handler_) + {} + + // delete because of pointer members + serializer(const serializer&) = delete; + serializer& operator=(const serializer&) = delete; + serializer(serializer&&) = delete; + serializer& operator=(serializer&&) = delete; + ~serializer() = default; + + /*! + @brief internal implementation of the serialization function + + This function is called by the public member function dump and organizes + the serialization internally. The indentation level is propagated as + additional parameter. In case of arrays and objects, the function is + called recursively. + + - strings and object keys are escaped using `escape_string()` + - integer numbers are converted implicitly via `operator<<` + - floating-point numbers are converted to a string using `"%g"` format + - binary values are serialized as objects containing the subtype and the + byte array + + @param[in] val value to serialize + @param[in] pretty_print whether the output shall be pretty-printed + @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters + in the output are escaped with `\uXXXX` sequences, and the result consists + of ASCII characters only. + @param[in] indent_step the indent level + @param[in] current_indent the current indent level (only used internally) + */ + void dump(const BasicJsonType& val, + const bool pretty_print, + const bool ensure_ascii, + const unsigned int indent_step, + const unsigned int current_indent = 0) + { + switch (val.m_type) + { + case value_t::object: + { + if (val.m_value.object->empty()) + { + o->write_characters("{}", 2); + return; + } + + if (pretty_print) + { + o->write_characters("{\n", 2); + + // variable to hold indentation for recursive calls + const auto new_indent = current_indent + indent_step; + if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent)) + { + indent_string.resize(indent_string.size() * 2, ' '); + } + + // first n-1 elements + auto i = val.m_value.object->cbegin(); + for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) + { + o->write_characters(indent_string.c_str(), new_indent); + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\": ", 3); + dump(i->second, true, ensure_ascii, indent_step, new_indent); + o->write_characters(",\n", 2); + } + + // last element + JSON_ASSERT(i != val.m_value.object->cend()); + JSON_ASSERT(std::next(i) == val.m_value.object->cend()); + o->write_characters(indent_string.c_str(), new_indent); + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\": ", 3); + dump(i->second, true, ensure_ascii, indent_step, new_indent); + + o->write_character('\n'); + o->write_characters(indent_string.c_str(), current_indent); + o->write_character('}'); + } + else + { + o->write_character('{'); + + // first n-1 elements + auto i = val.m_value.object->cbegin(); + for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) + { + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\":", 2); + dump(i->second, false, ensure_ascii, indent_step, current_indent); + o->write_character(','); + } + + // last element + JSON_ASSERT(i != val.m_value.object->cend()); + JSON_ASSERT(std::next(i) == val.m_value.object->cend()); + o->write_character('\"'); + dump_escaped(i->first, ensure_ascii); + o->write_characters("\":", 2); + dump(i->second, false, ensure_ascii, indent_step, current_indent); + + o->write_character('}'); + } + + return; + } + + case value_t::array: + { + if (val.m_value.array->empty()) + { + o->write_characters("[]", 2); + return; + } + + if (pretty_print) + { + o->write_characters("[\n", 2); + + // variable to hold indentation for recursive calls + const auto new_indent = current_indent + indent_step; + if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent)) + { + indent_string.resize(indent_string.size() * 2, ' '); + } + + // first n-1 elements + for (auto i = val.m_value.array->cbegin(); + i != val.m_value.array->cend() - 1; ++i) + { + o->write_characters(indent_string.c_str(), new_indent); + dump(*i, true, ensure_ascii, indent_step, new_indent); + o->write_characters(",\n", 2); + } + + // last element + JSON_ASSERT(!val.m_value.array->empty()); + o->write_characters(indent_string.c_str(), new_indent); + dump(val.m_value.array->back(), true, ensure_ascii, indent_step, new_indent); + + o->write_character('\n'); + o->write_characters(indent_string.c_str(), current_indent); + o->write_character(']'); + } + else + { + o->write_character('['); + + // first n-1 elements + for (auto i = val.m_value.array->cbegin(); + i != val.m_value.array->cend() - 1; ++i) + { + dump(*i, false, ensure_ascii, indent_step, current_indent); + o->write_character(','); + } + + // last element + JSON_ASSERT(!val.m_value.array->empty()); + dump(val.m_value.array->back(), false, ensure_ascii, indent_step, current_indent); + + o->write_character(']'); + } + + return; + } + + case value_t::string: + { + o->write_character('\"'); + dump_escaped(*val.m_value.string, ensure_ascii); + o->write_character('\"'); + return; + } + + case value_t::binary: + { + if (pretty_print) + { + o->write_characters("{\n", 2); + + // variable to hold indentation for recursive calls + const auto new_indent = current_indent + indent_step; + if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent)) + { + indent_string.resize(indent_string.size() * 2, ' '); + } + + o->write_characters(indent_string.c_str(), new_indent); + + o->write_characters("\"bytes\": [", 10); + + if (!val.m_value.binary->empty()) + { + for (auto i = val.m_value.binary->cbegin(); + i != val.m_value.binary->cend() - 1; ++i) + { + dump_integer(*i); + o->write_characters(", ", 2); + } + dump_integer(val.m_value.binary->back()); + } + + o->write_characters("],\n", 3); + o->write_characters(indent_string.c_str(), new_indent); + + o->write_characters("\"subtype\": ", 11); + if (val.m_value.binary->has_subtype()) + { + dump_integer(val.m_value.binary->subtype()); + } + else + { + o->write_characters("null", 4); + } + o->write_character('\n'); + o->write_characters(indent_string.c_str(), current_indent); + o->write_character('}'); + } + else + { + o->write_characters("{\"bytes\":[", 10); + + if (!val.m_value.binary->empty()) + { + for (auto i = val.m_value.binary->cbegin(); + i != val.m_value.binary->cend() - 1; ++i) + { + dump_integer(*i); + o->write_character(','); + } + dump_integer(val.m_value.binary->back()); + } + + o->write_characters("],\"subtype\":", 12); + if (val.m_value.binary->has_subtype()) + { + dump_integer(val.m_value.binary->subtype()); + o->write_character('}'); + } + else + { + o->write_characters("null}", 5); + } + } + return; + } + + case value_t::boolean: + { + if (val.m_value.boolean) + { + o->write_characters("true", 4); + } + else + { + o->write_characters("false", 5); + } + return; + } + + case value_t::number_integer: + { + dump_integer(val.m_value.number_integer); + return; + } + + case value_t::number_unsigned: + { + dump_integer(val.m_value.number_unsigned); + return; + } + + case value_t::number_float: + { + dump_float(val.m_value.number_float); + return; + } + + case value_t::discarded: + { + o->write_characters("", 11); + return; + } + + case value_t::null: + { + o->write_characters("null", 4); + return; + } + + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + } + } + + JSON_PRIVATE_UNLESS_TESTED: + /*! + @brief dump escaped string + + Escape a string by replacing certain special characters by a sequence of an + escape character (backslash) and another character and other control + characters by a sequence of "\u" followed by a four-digit hex + representation. The escaped string is written to output stream @a o. + + @param[in] s the string to escape + @param[in] ensure_ascii whether to escape non-ASCII characters with + \uXXXX sequences + + @complexity Linear in the length of string @a s. + */ + void dump_escaped(const string_t& s, const bool ensure_ascii) + { + std::uint32_t codepoint; + std::uint8_t state = UTF8_ACCEPT; + std::size_t bytes = 0; // number of bytes written to string_buffer + + // number of bytes written at the point of the last valid byte + std::size_t bytes_after_last_accept = 0; + std::size_t undumped_chars = 0; + + for (std::size_t i = 0; i < s.size(); ++i) + { + const auto byte = static_cast(s[i]); + + switch (decode(state, codepoint, byte)) + { + case UTF8_ACCEPT: // decode found a new code point + { + switch (codepoint) + { + case 0x08: // backspace + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'b'; + break; + } + + case 0x09: // horizontal tab + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 't'; + break; + } + + case 0x0A: // newline + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'n'; + break; + } + + case 0x0C: // formfeed + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'f'; + break; + } + + case 0x0D: // carriage return + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'r'; + break; + } + + case 0x22: // quotation mark + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = '\"'; + break; + } + + case 0x5C: // reverse solidus + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = '\\'; + break; + } + + default: + { + // escape control characters (0x00..0x1F) or, if + // ensure_ascii parameter is used, non-ASCII characters + if ((codepoint <= 0x1F) || (ensure_ascii && (codepoint >= 0x7F))) + { + if (codepoint <= 0xFFFF) + { + (std::snprintf)(string_buffer.data() + bytes, 7, "\\u%04x", + static_cast(codepoint)); + bytes += 6; + } + else + { + (std::snprintf)(string_buffer.data() + bytes, 13, "\\u%04x\\u%04x", + static_cast(0xD7C0u + (codepoint >> 10u)), + static_cast(0xDC00u + (codepoint & 0x3FFu))); + bytes += 12; + } + } + else + { + // copy byte to buffer (all previous bytes + // been copied have in default case above) + string_buffer[bytes++] = s[i]; + } + break; + } + } + + // write buffer and reset index; there must be 13 bytes + // left, as this is the maximal number of bytes to be + // written ("\uxxxx\uxxxx\0") for one code point + if (string_buffer.size() - bytes < 13) + { + o->write_characters(string_buffer.data(), bytes); + bytes = 0; + } + + // remember the byte position of this accept + bytes_after_last_accept = bytes; + undumped_chars = 0; + break; + } + + case UTF8_REJECT: // decode found invalid UTF-8 byte + { + switch (error_handler) + { + case error_handler_t::strict: + { + std::string sn(3, '\0'); + (std::snprintf)(&sn[0], sn.size(), "%.2X", byte); + JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + sn, BasicJsonType())); + } + + case error_handler_t::ignore: + case error_handler_t::replace: + { + // in case we saw this character the first time, we + // would like to read it again, because the byte + // may be OK for itself, but just not OK for the + // previous sequence + if (undumped_chars > 0) + { + --i; + } + + // reset length buffer to the last accepted index; + // thus removing/ignoring the invalid characters + bytes = bytes_after_last_accept; + + if (error_handler == error_handler_t::replace) + { + // add a replacement character + if (ensure_ascii) + { + string_buffer[bytes++] = '\\'; + string_buffer[bytes++] = 'u'; + string_buffer[bytes++] = 'f'; + string_buffer[bytes++] = 'f'; + string_buffer[bytes++] = 'f'; + string_buffer[bytes++] = 'd'; + } + else + { + string_buffer[bytes++] = detail::binary_writer::to_char_type('\xEF'); + string_buffer[bytes++] = detail::binary_writer::to_char_type('\xBF'); + string_buffer[bytes++] = detail::binary_writer::to_char_type('\xBD'); + } + + // write buffer and reset index; there must be 13 bytes + // left, as this is the maximal number of bytes to be + // written ("\uxxxx\uxxxx\0") for one code point + if (string_buffer.size() - bytes < 13) + { + o->write_characters(string_buffer.data(), bytes); + bytes = 0; + } + + bytes_after_last_accept = bytes; + } + + undumped_chars = 0; + + // continue processing the string + state = UTF8_ACCEPT; + break; + } + + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + } + break; + } + + default: // decode found yet incomplete multi-byte code point + { + if (!ensure_ascii) + { + // code point will not be escaped - copy byte to buffer + string_buffer[bytes++] = s[i]; + } + ++undumped_chars; + break; + } + } + } + + // we finished processing the string + if (JSON_HEDLEY_LIKELY(state == UTF8_ACCEPT)) + { + // write buffer + if (bytes > 0) + { + o->write_characters(string_buffer.data(), bytes); + } + } + else + { + // we finish reading, but do not accept: string was incomplete + switch (error_handler) + { + case error_handler_t::strict: + { + std::string sn(3, '\0'); + (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast(s.back())); + JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + sn, BasicJsonType())); + } + + case error_handler_t::ignore: + { + // write all accepted bytes + o->write_characters(string_buffer.data(), bytes_after_last_accept); + break; + } + + case error_handler_t::replace: + { + // write all accepted bytes + o->write_characters(string_buffer.data(), bytes_after_last_accept); + // add a replacement character + if (ensure_ascii) + { + o->write_characters("\\ufffd", 6); + } + else + { + o->write_characters("\xEF\xBF\xBD", 3); + } + break; + } + + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + } + } + } + + private: + /*! + @brief count digits + + Count the number of decimal (base 10) digits for an input unsigned integer. + + @param[in] x unsigned integer number to count its digits + @return number of decimal digits + */ + inline unsigned int count_digits(number_unsigned_t x) noexcept + { + unsigned int n_digits = 1; + for (;;) + { + if (x < 10) + { + return n_digits; + } + if (x < 100) + { + return n_digits + 1; + } + if (x < 1000) + { + return n_digits + 2; + } + if (x < 10000) + { + return n_digits + 3; + } + x = x / 10000u; + n_digits += 4; + } + } + + /*! + @brief dump an integer + + Dump a given integer to output stream @a o. Works internally with + @a number_buffer. + + @param[in] x integer number (signed or unsigned) to dump + @tparam NumberType either @a number_integer_t or @a number_unsigned_t + */ + template < typename NumberType, detail::enable_if_t < + std::is_same::value || + std::is_same::value || + std::is_same::value, + int > = 0 > + void dump_integer(NumberType x) + { + static constexpr std::array, 100> digits_to_99 + { + { + {{'0', '0'}}, {{'0', '1'}}, {{'0', '2'}}, {{'0', '3'}}, {{'0', '4'}}, {{'0', '5'}}, {{'0', '6'}}, {{'0', '7'}}, {{'0', '8'}}, {{'0', '9'}}, + {{'1', '0'}}, {{'1', '1'}}, {{'1', '2'}}, {{'1', '3'}}, {{'1', '4'}}, {{'1', '5'}}, {{'1', '6'}}, {{'1', '7'}}, {{'1', '8'}}, {{'1', '9'}}, + {{'2', '0'}}, {{'2', '1'}}, {{'2', '2'}}, {{'2', '3'}}, {{'2', '4'}}, {{'2', '5'}}, {{'2', '6'}}, {{'2', '7'}}, {{'2', '8'}}, {{'2', '9'}}, + {{'3', '0'}}, {{'3', '1'}}, {{'3', '2'}}, {{'3', '3'}}, {{'3', '4'}}, {{'3', '5'}}, {{'3', '6'}}, {{'3', '7'}}, {{'3', '8'}}, {{'3', '9'}}, + {{'4', '0'}}, {{'4', '1'}}, {{'4', '2'}}, {{'4', '3'}}, {{'4', '4'}}, {{'4', '5'}}, {{'4', '6'}}, {{'4', '7'}}, {{'4', '8'}}, {{'4', '9'}}, + {{'5', '0'}}, {{'5', '1'}}, {{'5', '2'}}, {{'5', '3'}}, {{'5', '4'}}, {{'5', '5'}}, {{'5', '6'}}, {{'5', '7'}}, {{'5', '8'}}, {{'5', '9'}}, + {{'6', '0'}}, {{'6', '1'}}, {{'6', '2'}}, {{'6', '3'}}, {{'6', '4'}}, {{'6', '5'}}, {{'6', '6'}}, {{'6', '7'}}, {{'6', '8'}}, {{'6', '9'}}, + {{'7', '0'}}, {{'7', '1'}}, {{'7', '2'}}, {{'7', '3'}}, {{'7', '4'}}, {{'7', '5'}}, {{'7', '6'}}, {{'7', '7'}}, {{'7', '8'}}, {{'7', '9'}}, + {{'8', '0'}}, {{'8', '1'}}, {{'8', '2'}}, {{'8', '3'}}, {{'8', '4'}}, {{'8', '5'}}, {{'8', '6'}}, {{'8', '7'}}, {{'8', '8'}}, {{'8', '9'}}, + {{'9', '0'}}, {{'9', '1'}}, {{'9', '2'}}, {{'9', '3'}}, {{'9', '4'}}, {{'9', '5'}}, {{'9', '6'}}, {{'9', '7'}}, {{'9', '8'}}, {{'9', '9'}}, + } + }; + + // special case for "0" + if (x == 0) + { + o->write_character('0'); + return; + } + + // use a pointer to fill the buffer + auto buffer_ptr = number_buffer.begin(); + + const bool is_negative = std::is_same::value && !(x >= 0); // see issue #755 + number_unsigned_t abs_value; + + unsigned int n_chars; + + if (is_negative) + { + *buffer_ptr = '-'; + abs_value = remove_sign(static_cast(x)); + + // account one more byte for the minus sign + n_chars = 1 + count_digits(abs_value); + } + else + { + abs_value = static_cast(x); + n_chars = count_digits(abs_value); + } + + // spare 1 byte for '\0' + JSON_ASSERT(n_chars < number_buffer.size() - 1); + + // jump to the end to generate the string from backward + // so we later avoid reversing the result + buffer_ptr += n_chars; + + // Fast int2ascii implementation inspired by "Fastware" talk by Andrei Alexandrescu + // See: https://www.youtube.com/watch?v=o4-CwDo2zpg + while (abs_value >= 100) + { + const auto digits_index = static_cast((abs_value % 100)); + abs_value /= 100; + *(--buffer_ptr) = digits_to_99[digits_index][1]; + *(--buffer_ptr) = digits_to_99[digits_index][0]; + } + + if (abs_value >= 10) + { + const auto digits_index = static_cast(abs_value); + *(--buffer_ptr) = digits_to_99[digits_index][1]; + *(--buffer_ptr) = digits_to_99[digits_index][0]; + } + else + { + *(--buffer_ptr) = static_cast('0' + abs_value); + } + + o->write_characters(number_buffer.data(), n_chars); + } + + /*! + @brief dump a floating-point number + + Dump a given floating-point number to output stream @a o. Works internally + with @a number_buffer. + + @param[in] x floating-point number to dump + */ + void dump_float(number_float_t x) + { + // NaN / inf + if (!std::isfinite(x)) + { + o->write_characters("null", 4); + return; + } + + // If number_float_t is an IEEE-754 single or double precision number, + // use the Grisu2 algorithm to produce short numbers which are + // guaranteed to round-trip, using strtof and strtod, resp. + // + // NB: The test below works if == . + static constexpr bool is_ieee_single_or_double + = (std::numeric_limits::is_iec559 && std::numeric_limits::digits == 24 && std::numeric_limits::max_exponent == 128) || + (std::numeric_limits::is_iec559 && std::numeric_limits::digits == 53 && std::numeric_limits::max_exponent == 1024); + + dump_float(x, std::integral_constant()); + } + + void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_double*/) + { + char* begin = number_buffer.data(); + char* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x); + + o->write_characters(begin, static_cast(end - begin)); + } + + void dump_float(number_float_t x, std::false_type /*is_ieee_single_or_double*/) + { + // get number of digits for a float -> text -> float round-trip + static constexpr auto d = std::numeric_limits::max_digits10; + + // the actual conversion + std::ptrdiff_t len = (std::snprintf)(number_buffer.data(), number_buffer.size(), "%.*g", d, x); + + // negative value indicates an error + JSON_ASSERT(len > 0); + // check if buffer was large enough + JSON_ASSERT(static_cast(len) < number_buffer.size()); + + // erase thousands separator + if (thousands_sep != '\0') + { + const auto end = std::remove(number_buffer.begin(), + number_buffer.begin() + len, thousands_sep); + std::fill(end, number_buffer.end(), '\0'); + JSON_ASSERT((end - number_buffer.begin()) <= len); + len = (end - number_buffer.begin()); + } + + // convert decimal point to '.' + if (decimal_point != '\0' && decimal_point != '.') + { + const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point); + if (dec_pos != number_buffer.end()) + { + *dec_pos = '.'; + } + } + + o->write_characters(number_buffer.data(), static_cast(len)); + + // determine if need to append ".0" + const bool value_is_int_like = + std::none_of(number_buffer.begin(), number_buffer.begin() + len + 1, + [](char c) + { + return c == '.' || c == 'e'; + }); + + if (value_is_int_like) + { + o->write_characters(".0", 2); + } + } + + /*! + @brief check whether a string is UTF-8 encoded + + The function checks each byte of a string whether it is UTF-8 encoded. The + result of the check is stored in the @a state parameter. The function must + be called initially with state 0 (accept). State 1 means the string must + be rejected, because the current byte is not allowed. If the string is + completely processed, but the state is non-zero, the string ended + prematurely; that is, the last byte indicated more bytes should have + followed. + + @param[in,out] state the state of the decoding + @param[in,out] codep codepoint (valid only if resulting state is UTF8_ACCEPT) + @param[in] byte next byte to decode + @return new state + + @note The function has been edited: a std::array is used. + + @copyright Copyright (c) 2008-2009 Bjoern Hoehrmann + @sa http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + */ + static std::uint8_t decode(std::uint8_t& state, std::uint32_t& codep, const std::uint8_t byte) noexcept + { + static const std::array utf8d = + { + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7F + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9F + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // A0..BF + 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0..DF + 0xA, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // E0..EF + 0xB, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // F0..FF + 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 + 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 + 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 + 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // s7..s8 + } + }; + + JSON_ASSERT(byte < utf8d.size()); + const std::uint8_t type = utf8d[byte]; + + codep = (state != UTF8_ACCEPT) + ? (byte & 0x3fu) | (codep << 6u) + : (0xFFu >> type) & (byte); + + std::size_t index = 256u + static_cast(state) * 16u + static_cast(type); + JSON_ASSERT(index < 400); + state = utf8d[index]; + return state; + } + + /* + * Overload to make the compiler happy while it is instantiating + * dump_integer for number_unsigned_t. + * Must never be called. + */ + number_unsigned_t remove_sign(number_unsigned_t x) + { + JSON_ASSERT(false); // LCOV_EXCL_LINE + return x; // LCOV_EXCL_LINE + } + + /* + * Helper function for dump_integer + * + * This function takes a negative signed integer and returns its absolute + * value as unsigned integer. The plus/minus shuffling is necessary as we can + * not directly remove the sign of an arbitrary signed integer as the + * absolute values of INT_MIN and INT_MAX are usually not the same. See + * #1708 for details. + */ + inline number_unsigned_t remove_sign(number_integer_t x) noexcept + { + JSON_ASSERT(x < 0 && x < (std::numeric_limits::max)()); + return static_cast(-(x + 1)) + 1; + } + + private: + /// the output of the serializer + output_adapter_t o = nullptr; + + /// a (hopefully) large enough character buffer + std::array number_buffer{{}}; + + /// the locale + const std::lconv* loc = nullptr; + /// the locale's thousand separator character + const char thousands_sep = '\0'; + /// the locale's decimal point character + const char decimal_point = '\0'; + + /// string buffer + std::array string_buffer{{}}; + + /// the indentation character + const char indent_char; + /// the indentation string + string_t indent_string; + + /// error_handler how to react on decoding errors + const error_handler_t error_handler; +}; +} // namespace detail +} // namespace nlohmann + +// #include + +// #include + +// #include + + +#include // less +#include // allocator +#include // pair +#include // vector + +// #include + + +namespace nlohmann +{ + +/// ordered_map: a minimal map-like container that preserves insertion order +/// for use within nlohmann::basic_json +template , + class Allocator = std::allocator>> + struct ordered_map : std::vector, Allocator> +{ + using key_type = Key; + using mapped_type = T; + using Container = std::vector, Allocator>; + using typename Container::iterator; + using typename Container::const_iterator; + using typename Container::size_type; + using typename Container::value_type; + + // Explicit constructors instead of `using Container::Container` + // otherwise older compilers choke on it (GCC <= 5.5, xcode <= 9.4) + ordered_map(const Allocator& alloc = Allocator()) : Container{alloc} {} + template + ordered_map(It first, It last, const Allocator& alloc = Allocator()) + : Container{first, last, alloc} {} + ordered_map(std::initializer_list init, const Allocator& alloc = Allocator() ) + : Container{init, alloc} {} + + std::pair emplace(const key_type& key, T&& t) + { + for (auto it = this->begin(); it != this->end(); ++it) + { + if (it->first == key) + { + return {it, false}; + } + } + Container::emplace_back(key, t); + return {--this->end(), true}; + } + + T& operator[](const Key& key) + { + return emplace(key, T{}).first->second; + } + + const T& operator[](const Key& key) const + { + return at(key); + } + + T& at(const Key& key) + { + for (auto it = this->begin(); it != this->end(); ++it) + { + if (it->first == key) + { + return it->second; + } + } + + JSON_THROW(std::out_of_range("key not found")); + } + + const T& at(const Key& key) const + { + for (auto it = this->begin(); it != this->end(); ++it) + { + if (it->first == key) + { + return it->second; + } + } + + JSON_THROW(std::out_of_range("key not found")); + } + + size_type erase(const Key& key) + { + for (auto it = this->begin(); it != this->end(); ++it) + { + if (it->first == key) + { + // Since we cannot move const Keys, re-construct them in place + for (auto next = it; ++next != this->end(); ++it) + { + it->~value_type(); // Destroy but keep allocation + new (&*it) value_type{std::move(*next)}; + } + Container::pop_back(); + return 1; + } + } + return 0; + } + + iterator erase(iterator pos) + { + auto it = pos; + + // Since we cannot move const Keys, re-construct them in place + for (auto next = it; ++next != this->end(); ++it) + { + it->~value_type(); // Destroy but keep allocation + new (&*it) value_type{std::move(*next)}; + } + Container::pop_back(); + return pos; + } + + size_type count(const Key& key) const + { + for (auto it = this->begin(); it != this->end(); ++it) + { + if (it->first == key) + { + return 1; + } + } + return 0; + } + + iterator find(const Key& key) + { + for (auto it = this->begin(); it != this->end(); ++it) + { + if (it->first == key) + { + return it; + } + } + return Container::end(); + } + + const_iterator find(const Key& key) const + { + for (auto it = this->begin(); it != this->end(); ++it) + { + if (it->first == key) + { + return it; + } + } + return Container::end(); + } + + std::pair insert( value_type&& value ) + { + return emplace(value.first, std::move(value.second)); + } + + std::pair insert( const value_type& value ) + { + for (auto it = this->begin(); it != this->end(); ++it) + { + if (it->first == value.first) + { + return {it, false}; + } + } + Container::push_back(value); + return {--this->end(), true}; + } + + template + using require_input_iter = typename std::enable_if::iterator_category, + std::input_iterator_tag>::value>::type; + + template> + void insert(InputIt first, InputIt last) + { + for (auto it = first; it != last; ++it) + { + insert(*it); + } + } +}; + +} // namespace nlohmann + + +#if defined(JSON_HAS_CPP_17) + #include +#endif + +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +namespace nlohmann +{ + +/*! +@brief a class to store JSON values + +@tparam ObjectType type for JSON objects (`std::map` by default; will be used +in @ref object_t) +@tparam ArrayType type for JSON arrays (`std::vector` by default; will be used +in @ref array_t) +@tparam StringType type for JSON strings and object keys (`std::string` by +default; will be used in @ref string_t) +@tparam BooleanType type for JSON booleans (`bool` by default; will be used +in @ref boolean_t) +@tparam NumberIntegerType type for JSON integer numbers (`int64_t` by +default; will be used in @ref number_integer_t) +@tparam NumberUnsignedType type for JSON unsigned integer numbers (@c +`uint64_t` by default; will be used in @ref number_unsigned_t) +@tparam NumberFloatType type for JSON floating-point numbers (`double` by +default; will be used in @ref number_float_t) +@tparam BinaryType type for packed binary data for compatibility with binary +serialization formats (`std::vector` by default; will be used in +@ref binary_t) +@tparam AllocatorType type of the allocator to use (`std::allocator` by +default) +@tparam JSONSerializer the serializer to resolve internal calls to `to_json()` +and `from_json()` (@ref adl_serializer by default) + +@requirement The class satisfies the following concept requirements: +- Basic + - [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible): + JSON values can be default constructed. The result will be a JSON null + value. + - [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible): + A JSON value can be constructed from an rvalue argument. + - [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible): + A JSON value can be copy-constructed from an lvalue expression. + - [MoveAssignable](https://en.cppreference.com/w/cpp/named_req/MoveAssignable): + A JSON value van be assigned from an rvalue argument. + - [CopyAssignable](https://en.cppreference.com/w/cpp/named_req/CopyAssignable): + A JSON value can be copy-assigned from an lvalue expression. + - [Destructible](https://en.cppreference.com/w/cpp/named_req/Destructible): + JSON values can be destructed. +- Layout + - [StandardLayoutType](https://en.cppreference.com/w/cpp/named_req/StandardLayoutType): + JSON values have + [standard layout](https://en.cppreference.com/w/cpp/language/data_members#Standard_layout): + All non-static data members are private and standard layout types, the + class has no virtual functions or (virtual) base classes. +- Library-wide + - [EqualityComparable](https://en.cppreference.com/w/cpp/named_req/EqualityComparable): + JSON values can be compared with `==`, see @ref + operator==(const_reference,const_reference). + - [LessThanComparable](https://en.cppreference.com/w/cpp/named_req/LessThanComparable): + JSON values can be compared with `<`, see @ref + operator<(const_reference,const_reference). + - [Swappable](https://en.cppreference.com/w/cpp/named_req/Swappable): + Any JSON lvalue or rvalue of can be swapped with any lvalue or rvalue of + other compatible types, using unqualified function call @ref swap(). + - [NullablePointer](https://en.cppreference.com/w/cpp/named_req/NullablePointer): + JSON values can be compared against `std::nullptr_t` objects which are used + to model the `null` value. +- Container + - [Container](https://en.cppreference.com/w/cpp/named_req/Container): + JSON values can be used like STL containers and provide iterator access. + - [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer); + JSON values can be used like STL containers and provide reverse iterator + access. + +@invariant The member variables @a m_value and @a m_type have the following +relationship: +- If `m_type == value_t::object`, then `m_value.object != nullptr`. +- If `m_type == value_t::array`, then `m_value.array != nullptr`. +- If `m_type == value_t::string`, then `m_value.string != nullptr`. +The invariants are checked by member function assert_invariant(). + +@internal +@note ObjectType trick from https://stackoverflow.com/a/9860911 +@endinternal + +@see [RFC 7159: The JavaScript Object Notation (JSON) Data Interchange +Format](http://rfc7159.net/rfc7159) + +@since version 1.0.0 + +@nosubgrouping +*/ +NLOHMANN_BASIC_JSON_TPL_DECLARATION +class basic_json +{ + private: + template friend struct detail::external_constructor; + friend ::nlohmann::json_pointer; + + template + friend class ::nlohmann::detail::parser; + friend ::nlohmann::detail::serializer; + template + friend class ::nlohmann::detail::iter_impl; + template + friend class ::nlohmann::detail::binary_writer; + template + friend class ::nlohmann::detail::binary_reader; + template + friend class ::nlohmann::detail::json_sax_dom_parser; + template + friend class ::nlohmann::detail::json_sax_dom_callback_parser; + friend class ::nlohmann::detail::exception; + + /// workaround type for MSVC + using basic_json_t = NLOHMANN_BASIC_JSON_TPL; + + JSON_PRIVATE_UNLESS_TESTED: + // convenience aliases for types residing in namespace detail; + using lexer = ::nlohmann::detail::lexer_base; + + template + static ::nlohmann::detail::parser parser( + InputAdapterType adapter, + detail::parser_callback_tcb = nullptr, + const bool allow_exceptions = true, + const bool ignore_comments = false + ) + { + return ::nlohmann::detail::parser(std::move(adapter), + std::move(cb), allow_exceptions, ignore_comments); + } + + private: + using primitive_iterator_t = ::nlohmann::detail::primitive_iterator_t; + template + using internal_iterator = ::nlohmann::detail::internal_iterator; + template + using iter_impl = ::nlohmann::detail::iter_impl; + template + using iteration_proxy = ::nlohmann::detail::iteration_proxy; + template using json_reverse_iterator = ::nlohmann::detail::json_reverse_iterator; + + template + using output_adapter_t = ::nlohmann::detail::output_adapter_t; + + template + using binary_reader = ::nlohmann::detail::binary_reader; + template using binary_writer = ::nlohmann::detail::binary_writer; + + JSON_PRIVATE_UNLESS_TESTED: + using serializer = ::nlohmann::detail::serializer; + + public: + using value_t = detail::value_t; + /// JSON Pointer, see @ref nlohmann::json_pointer + using json_pointer = ::nlohmann::json_pointer; + template + using json_serializer = JSONSerializer; + /// how to treat decoding errors + using error_handler_t = detail::error_handler_t; + /// how to treat CBOR tags + using cbor_tag_handler_t = detail::cbor_tag_handler_t; + /// helper type for initializer lists of basic_json values + using initializer_list_t = std::initializer_list>; + + using input_format_t = detail::input_format_t; + /// SAX interface type, see @ref nlohmann::json_sax + using json_sax_t = json_sax; + + //////////////// + // exceptions // + //////////////// + + /// @name exceptions + /// Classes to implement user-defined exceptions. + /// @{ + + /// @copydoc detail::exception + using exception = detail::exception; + /// @copydoc detail::parse_error + using parse_error = detail::parse_error; + /// @copydoc detail::invalid_iterator + using invalid_iterator = detail::invalid_iterator; + /// @copydoc detail::type_error + using type_error = detail::type_error; + /// @copydoc detail::out_of_range + using out_of_range = detail::out_of_range; + /// @copydoc detail::other_error + using other_error = detail::other_error; + + /// @} + + + ///////////////////// + // container types // + ///////////////////// + + /// @name container types + /// The canonic container types to use @ref basic_json like any other STL + /// container. + /// @{ + + /// the type of elements in a basic_json container + using value_type = basic_json; + + /// the type of an element reference + using reference = value_type&; + /// the type of an element const reference + using const_reference = const value_type&; + + /// a type to represent differences between iterators + using difference_type = std::ptrdiff_t; + /// a type to represent container sizes + using size_type = std::size_t; + + /// the allocator type + using allocator_type = AllocatorType; + + /// the type of an element pointer + using pointer = typename std::allocator_traits::pointer; + /// the type of an element const pointer + using const_pointer = typename std::allocator_traits::const_pointer; + + /// an iterator for a basic_json container + using iterator = iter_impl; + /// a const iterator for a basic_json container + using const_iterator = iter_impl; + /// a reverse iterator for a basic_json container + using reverse_iterator = json_reverse_iterator; + /// a const reverse iterator for a basic_json container + using const_reverse_iterator = json_reverse_iterator; + + /// @} + + + /*! + @brief returns the allocator associated with the container + */ + static allocator_type get_allocator() + { + return allocator_type(); + } + + /*! + @brief returns version information on the library + + This function returns a JSON object with information about the library, + including the version number and information on the platform and compiler. + + @return JSON object holding version information + key | description + ----------- | --------------- + `compiler` | Information on the used compiler. It is an object with the following keys: `c++` (the used C++ standard), `family` (the compiler family; possible values are `clang`, `icc`, `gcc`, `ilecpp`, `msvc`, `pgcpp`, `sunpro`, and `unknown`), and `version` (the compiler version). + `copyright` | The copyright line for the library as string. + `name` | The name of the library as string. + `platform` | The used platform as string. Possible values are `win32`, `linux`, `apple`, `unix`, and `unknown`. + `url` | The URL of the project as string. + `version` | The version of the library. It is an object with the following keys: `major`, `minor`, and `patch` as defined by [Semantic Versioning](http://semver.org), and `string` (the version string). + + @liveexample{The following code shows an example output of the `meta()` + function.,meta} + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @complexity Constant. + + @since 2.1.0 + */ + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json meta() + { + basic_json result; + + result["copyright"] = "(C) 2013-2021 Niels Lohmann"; + result["name"] = "JSON for Modern C++"; + result["url"] = "https://github.com/nlohmann/json"; + result["version"]["string"] = + std::to_string(NLOHMANN_JSON_VERSION_MAJOR) + "." + + std::to_string(NLOHMANN_JSON_VERSION_MINOR) + "." + + std::to_string(NLOHMANN_JSON_VERSION_PATCH); + result["version"]["major"] = NLOHMANN_JSON_VERSION_MAJOR; + result["version"]["minor"] = NLOHMANN_JSON_VERSION_MINOR; + result["version"]["patch"] = NLOHMANN_JSON_VERSION_PATCH; + +#ifdef _WIN32 + result["platform"] = "win32"; +#elif defined __linux__ + result["platform"] = "linux"; +#elif defined __APPLE__ + result["platform"] = "apple"; +#elif defined __unix__ + result["platform"] = "unix"; +#else + result["platform"] = "unknown"; +#endif + +#if defined(__ICC) || defined(__INTEL_COMPILER) + result["compiler"] = {{"family", "icc"}, {"version", __INTEL_COMPILER}}; +#elif defined(__clang__) + result["compiler"] = {{"family", "clang"}, {"version", __clang_version__}}; +#elif defined(__GNUC__) || defined(__GNUG__) + result["compiler"] = {{"family", "gcc"}, {"version", std::to_string(__GNUC__) + "." + std::to_string(__GNUC_MINOR__) + "." + std::to_string(__GNUC_PATCHLEVEL__)}}; +#elif defined(__HP_cc) || defined(__HP_aCC) + result["compiler"] = "hp" +#elif defined(__IBMCPP__) + result["compiler"] = {{"family", "ilecpp"}, {"version", __IBMCPP__}}; +#elif defined(_MSC_VER) + result["compiler"] = {{"family", "msvc"}, {"version", _MSC_VER}}; +#elif defined(__PGI) + result["compiler"] = {{"family", "pgcpp"}, {"version", __PGI}}; +#elif defined(__SUNPRO_CC) + result["compiler"] = {{"family", "sunpro"}, {"version", __SUNPRO_CC}}; +#else + result["compiler"] = {{"family", "unknown"}, {"version", "unknown"}}; +#endif + +#ifdef __cplusplus + result["compiler"]["c++"] = std::to_string(__cplusplus); +#else + result["compiler"]["c++"] = "unknown"; +#endif + return result; + } + + + /////////////////////////// + // JSON value data types // + /////////////////////////// + + /// @name JSON value data types + /// The data types to store a JSON value. These types are derived from + /// the template arguments passed to class @ref basic_json. + /// @{ + +#if defined(JSON_HAS_CPP_14) + // Use transparent comparator if possible, combined with perfect forwarding + // on find() and count() calls prevents unnecessary string construction. + using object_comparator_t = std::less<>; +#else + using object_comparator_t = std::less; +#endif + + /*! + @brief a type for an object + + [RFC 7159](http://rfc7159.net/rfc7159) describes JSON objects as follows: + > An object is an unordered collection of zero or more name/value pairs, + > where a name is a string and a value is a string, number, boolean, null, + > object, or array. + + To store objects in C++, a type is defined by the template parameters + described below. + + @tparam ObjectType the container to store objects (e.g., `std::map` or + `std::unordered_map`) + @tparam StringType the type of the keys or names (e.g., `std::string`). + The comparison function `std::less` is used to order elements + inside the container. + @tparam AllocatorType the allocator to use for objects (e.g., + `std::allocator`) + + #### Default type + + With the default values for @a ObjectType (`std::map`), @a StringType + (`std::string`), and @a AllocatorType (`std::allocator`), the default + value for @a object_t is: + + @code {.cpp} + std::map< + std::string, // key_type + basic_json, // value_type + std::less, // key_compare + std::allocator> // allocator_type + > + @endcode + + #### Behavior + + The choice of @a object_t influences the behavior of the JSON class. With + the default type, objects have the following behavior: + + - When all names are unique, objects will be interoperable in the sense + that all software implementations receiving that object will agree on + the name-value mappings. + - When the names within an object are not unique, it is unspecified which + one of the values for a given key will be chosen. For instance, + `{"key": 2, "key": 1}` could be equal to either `{"key": 1}` or + `{"key": 2}`. + - Internally, name/value pairs are stored in lexicographical order of the + names. Objects will also be serialized (see @ref dump) in this order. + For instance, `{"b": 1, "a": 2}` and `{"a": 2, "b": 1}` will be stored + and serialized as `{"a": 2, "b": 1}`. + - When comparing objects, the order of the name/value pairs is irrelevant. + This makes objects interoperable in the sense that they will not be + affected by these differences. For instance, `{"b": 1, "a": 2}` and + `{"a": 2, "b": 1}` will be treated as equal. + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) specifies: + > An implementation may set limits on the maximum depth of nesting. + + In this class, the object's limit of nesting is not explicitly constrained. + However, a maximum depth of nesting may be introduced by the compiler or + runtime environment. A theoretical limit can be queried by calling the + @ref max_size function of a JSON object. + + #### Storage + + Objects are stored as pointers in a @ref basic_json type. That is, for any + access to object values, a pointer of type `object_t*` must be + dereferenced. + + @sa @ref array_t -- type for an array value + + @since version 1.0.0 + + @note The order name/value pairs are added to the object is *not* + preserved by the library. Therefore, iterating an object may return + name/value pairs in a different order than they were originally stored. In + fact, keys will be traversed in alphabetical order as `std::map` with + `std::less` is used by default. Please note this behavior conforms to [RFC + 7159](http://rfc7159.net/rfc7159), because any order implements the + specified "unordered" nature of JSON objects. + */ + using object_t = ObjectType>>; + + /*! + @brief a type for an array + + [RFC 7159](http://rfc7159.net/rfc7159) describes JSON arrays as follows: + > An array is an ordered sequence of zero or more values. + + To store objects in C++, a type is defined by the template parameters + explained below. + + @tparam ArrayType container type to store arrays (e.g., `std::vector` or + `std::list`) + @tparam AllocatorType allocator to use for arrays (e.g., `std::allocator`) + + #### Default type + + With the default values for @a ArrayType (`std::vector`) and @a + AllocatorType (`std::allocator`), the default value for @a array_t is: + + @code {.cpp} + std::vector< + basic_json, // value_type + std::allocator // allocator_type + > + @endcode + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) specifies: + > An implementation may set limits on the maximum depth of nesting. + + In this class, the array's limit of nesting is not explicitly constrained. + However, a maximum depth of nesting may be introduced by the compiler or + runtime environment. A theoretical limit can be queried by calling the + @ref max_size function of a JSON array. + + #### Storage + + Arrays are stored as pointers in a @ref basic_json type. That is, for any + access to array values, a pointer of type `array_t*` must be dereferenced. + + @sa @ref object_t -- type for an object value + + @since version 1.0.0 + */ + using array_t = ArrayType>; + + /*! + @brief a type for a string + + [RFC 7159](http://rfc7159.net/rfc7159) describes JSON strings as follows: + > A string is a sequence of zero or more Unicode characters. + + To store objects in C++, a type is defined by the template parameter + described below. Unicode values are split by the JSON class into + byte-sized characters during deserialization. + + @tparam StringType the container to store strings (e.g., `std::string`). + Note this container is used for keys/names in objects, see @ref object_t. + + #### Default type + + With the default values for @a StringType (`std::string`), the default + value for @a string_t is: + + @code {.cpp} + std::string + @endcode + + #### Encoding + + Strings are stored in UTF-8 encoding. Therefore, functions like + `std::string::size()` or `std::string::length()` return the number of + bytes in the string rather than the number of characters or glyphs. + + #### String comparison + + [RFC 7159](http://rfc7159.net/rfc7159) states: + > Software implementations are typically required to test names of object + > members for equality. Implementations that transform the textual + > representation into sequences of Unicode code units and then perform the + > comparison numerically, code unit by code unit, are interoperable in the + > sense that implementations will agree in all cases on equality or + > inequality of two strings. For example, implementations that compare + > strings with escaped characters unconverted may incorrectly find that + > `"a\\b"` and `"a\u005Cb"` are not equal. + + This implementation is interoperable as it does compare strings code unit + by code unit. + + #### Storage + + String values are stored as pointers in a @ref basic_json type. That is, + for any access to string values, a pointer of type `string_t*` must be + dereferenced. + + @since version 1.0.0 + */ + using string_t = StringType; + + /*! + @brief a type for a boolean + + [RFC 7159](http://rfc7159.net/rfc7159) implicitly describes a boolean as a + type which differentiates the two literals `true` and `false`. + + To store objects in C++, a type is defined by the template parameter @a + BooleanType which chooses the type to use. + + #### Default type + + With the default values for @a BooleanType (`bool`), the default value for + @a boolean_t is: + + @code {.cpp} + bool + @endcode + + #### Storage + + Boolean values are stored directly inside a @ref basic_json type. + + @since version 1.0.0 + */ + using boolean_t = BooleanType; + + /*! + @brief a type for a number (integer) + + [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows: + > The representation of numbers is similar to that used in most + > programming languages. A number is represented in base 10 using decimal + > digits. It contains an integer component that may be prefixed with an + > optional minus sign, which may be followed by a fraction part and/or an + > exponent part. Leading zeros are not allowed. (...) Numeric values that + > cannot be represented in the grammar below (such as Infinity and NaN) + > are not permitted. + + This description includes both integer and floating-point numbers. + However, C++ allows more precise storage if it is known whether the number + is a signed integer, an unsigned integer or a floating-point number. + Therefore, three different types, @ref number_integer_t, @ref + number_unsigned_t and @ref number_float_t are used. + + To store integer numbers in C++, a type is defined by the template + parameter @a NumberIntegerType which chooses the type to use. + + #### Default type + + With the default values for @a NumberIntegerType (`int64_t`), the default + value for @a number_integer_t is: + + @code {.cpp} + int64_t + @endcode + + #### Default behavior + + - The restrictions about leading zeros is not enforced in C++. Instead, + leading zeros in integer literals lead to an interpretation as octal + number. Internally, the value will be stored as decimal number. For + instance, the C++ integer literal `010` will be serialized to `8`. + During deserialization, leading zeros yield an error. + - Not-a-number (NaN) values will be serialized to `null`. + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) specifies: + > An implementation may set limits on the range and precision of numbers. + + When the default type is used, the maximal integer number that can be + stored is `9223372036854775807` (INT64_MAX) and the minimal integer number + that can be stored is `-9223372036854775808` (INT64_MIN). Integer numbers + that are out of range will yield over/underflow when used in a + constructor. During deserialization, too large or small integer numbers + will be automatically be stored as @ref number_unsigned_t or @ref + number_float_t. + + [RFC 7159](http://rfc7159.net/rfc7159) further states: + > Note that when such software is used, numbers that are integers and are + > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense + > that implementations will agree exactly on their numeric values. + + As this range is a subrange of the exactly supported range [INT64_MIN, + INT64_MAX], this class's integer type is interoperable. + + #### Storage + + Integer number values are stored directly inside a @ref basic_json type. + + @sa @ref number_float_t -- type for number values (floating-point) + + @sa @ref number_unsigned_t -- type for number values (unsigned integer) + + @since version 1.0.0 + */ + using number_integer_t = NumberIntegerType; + + /*! + @brief a type for a number (unsigned) + + [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows: + > The representation of numbers is similar to that used in most + > programming languages. A number is represented in base 10 using decimal + > digits. It contains an integer component that may be prefixed with an + > optional minus sign, which may be followed by a fraction part and/or an + > exponent part. Leading zeros are not allowed. (...) Numeric values that + > cannot be represented in the grammar below (such as Infinity and NaN) + > are not permitted. + + This description includes both integer and floating-point numbers. + However, C++ allows more precise storage if it is known whether the number + is a signed integer, an unsigned integer or a floating-point number. + Therefore, three different types, @ref number_integer_t, @ref + number_unsigned_t and @ref number_float_t are used. + + To store unsigned integer numbers in C++, a type is defined by the + template parameter @a NumberUnsignedType which chooses the type to use. + + #### Default type + + With the default values for @a NumberUnsignedType (`uint64_t`), the + default value for @a number_unsigned_t is: + + @code {.cpp} + uint64_t + @endcode + + #### Default behavior + + - The restrictions about leading zeros is not enforced in C++. Instead, + leading zeros in integer literals lead to an interpretation as octal + number. Internally, the value will be stored as decimal number. For + instance, the C++ integer literal `010` will be serialized to `8`. + During deserialization, leading zeros yield an error. + - Not-a-number (NaN) values will be serialized to `null`. + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) specifies: + > An implementation may set limits on the range and precision of numbers. + + When the default type is used, the maximal integer number that can be + stored is `18446744073709551615` (UINT64_MAX) and the minimal integer + number that can be stored is `0`. Integer numbers that are out of range + will yield over/underflow when used in a constructor. During + deserialization, too large or small integer numbers will be automatically + be stored as @ref number_integer_t or @ref number_float_t. + + [RFC 7159](http://rfc7159.net/rfc7159) further states: + > Note that when such software is used, numbers that are integers and are + > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense + > that implementations will agree exactly on their numeric values. + + As this range is a subrange (when considered in conjunction with the + number_integer_t type) of the exactly supported range [0, UINT64_MAX], + this class's integer type is interoperable. + + #### Storage + + Integer number values are stored directly inside a @ref basic_json type. + + @sa @ref number_float_t -- type for number values (floating-point) + @sa @ref number_integer_t -- type for number values (integer) + + @since version 2.0.0 + */ + using number_unsigned_t = NumberUnsignedType; + + /*! + @brief a type for a number (floating-point) + + [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows: + > The representation of numbers is similar to that used in most + > programming languages. A number is represented in base 10 using decimal + > digits. It contains an integer component that may be prefixed with an + > optional minus sign, which may be followed by a fraction part and/or an + > exponent part. Leading zeros are not allowed. (...) Numeric values that + > cannot be represented in the grammar below (such as Infinity and NaN) + > are not permitted. + + This description includes both integer and floating-point numbers. + However, C++ allows more precise storage if it is known whether the number + is a signed integer, an unsigned integer or a floating-point number. + Therefore, three different types, @ref number_integer_t, @ref + number_unsigned_t and @ref number_float_t are used. + + To store floating-point numbers in C++, a type is defined by the template + parameter @a NumberFloatType which chooses the type to use. + + #### Default type + + With the default values for @a NumberFloatType (`double`), the default + value for @a number_float_t is: + + @code {.cpp} + double + @endcode + + #### Default behavior + + - The restrictions about leading zeros is not enforced in C++. Instead, + leading zeros in floating-point literals will be ignored. Internally, + the value will be stored as decimal number. For instance, the C++ + floating-point literal `01.2` will be serialized to `1.2`. During + deserialization, leading zeros yield an error. + - Not-a-number (NaN) values will be serialized to `null`. + + #### Limits + + [RFC 7159](http://rfc7159.net/rfc7159) states: + > This specification allows implementations to set limits on the range and + > precision of numbers accepted. Since software that implements IEEE + > 754-2008 binary64 (double precision) numbers is generally available and + > widely used, good interoperability can be achieved by implementations + > that expect no more precision or range than these provide, in the sense + > that implementations will approximate JSON numbers within the expected + > precision. + + This implementation does exactly follow this approach, as it uses double + precision floating-point numbers. Note values smaller than + `-1.79769313486232e+308` and values greater than `1.79769313486232e+308` + will be stored as NaN internally and be serialized to `null`. + + #### Storage + + Floating-point number values are stored directly inside a @ref basic_json + type. + + @sa @ref number_integer_t -- type for number values (integer) + + @sa @ref number_unsigned_t -- type for number values (unsigned integer) + + @since version 1.0.0 + */ + using number_float_t = NumberFloatType; + + /*! + @brief a type for a packed binary type + + This type is a type designed to carry binary data that appears in various + serialized formats, such as CBOR's Major Type 2, MessagePack's bin, and + BSON's generic binary subtype. This type is NOT a part of standard JSON and + exists solely for compatibility with these binary types. As such, it is + simply defined as an ordered sequence of zero or more byte values. + + Additionally, as an implementation detail, the subtype of the binary data is + carried around as a `std::uint8_t`, which is compatible with both of the + binary data formats that use binary subtyping, (though the specific + numbering is incompatible with each other, and it is up to the user to + translate between them). + + [CBOR's RFC 7049](https://tools.ietf.org/html/rfc7049) describes this type + as: + > Major type 2: a byte string. The string's length in bytes is represented + > following the rules for positive integers (major type 0). + + [MessagePack's documentation on the bin type + family](https://github.com/msgpack/msgpack/blob/master/spec.md#bin-format-family) + describes this type as: + > Bin format family stores an byte array in 2, 3, or 5 bytes of extra bytes + > in addition to the size of the byte array. + + [BSON's specifications](http://bsonspec.org/spec.html) describe several + binary types; however, this type is intended to represent the generic binary + type which has the description: + > Generic binary subtype - This is the most commonly used binary subtype and + > should be the 'default' for drivers and tools. + + None of these impose any limitations on the internal representation other + than the basic unit of storage be some type of array whose parts are + decomposable into bytes. + + The default representation of this binary format is a + `std::vector`, which is a very common way to represent a byte + array in modern C++. + + #### Default type + + The default values for @a BinaryType is `std::vector` + + #### Storage + + Binary Arrays are stored as pointers in a @ref basic_json type. That is, + for any access to array values, a pointer of the type `binary_t*` must be + dereferenced. + + #### Notes on subtypes + + - CBOR + - Binary values are represented as byte strings. No subtypes are + supported and will be ignored when CBOR is written. + - MessagePack + - If a subtype is given and the binary array contains exactly 1, 2, 4, 8, + or 16 elements, the fixext family (fixext1, fixext2, fixext4, fixext8) + is used. For other sizes, the ext family (ext8, ext16, ext32) is used. + The subtype is then added as singed 8-bit integer. + - If no subtype is given, the bin family (bin8, bin16, bin32) is used. + - BSON + - If a subtype is given, it is used and added as unsigned 8-bit integer. + - If no subtype is given, the generic binary subtype 0x00 is used. + + @sa @ref binary -- create a binary array + + @since version 3.8.0 + */ + using binary_t = nlohmann::byte_container_with_subtype; + /// @} + + private: + + /// helper for exception-safe object creation + template + JSON_HEDLEY_RETURNS_NON_NULL + static T* create(Args&& ... args) + { + AllocatorType alloc; + using AllocatorTraits = std::allocator_traits>; + + auto deleter = [&](T * obj) + { + AllocatorTraits::deallocate(alloc, obj, 1); + }; + std::unique_ptr obj(AllocatorTraits::allocate(alloc, 1), deleter); + AllocatorTraits::construct(alloc, obj.get(), std::forward(args)...); + JSON_ASSERT(obj != nullptr); + return obj.release(); + } + + //////////////////////// + // JSON value storage // + //////////////////////// + + JSON_PRIVATE_UNLESS_TESTED: + /*! + @brief a JSON value + + The actual storage for a JSON value of the @ref basic_json class. This + union combines the different storage types for the JSON value types + defined in @ref value_t. + + JSON type | value_t type | used type + --------- | --------------- | ------------------------ + object | object | pointer to @ref object_t + array | array | pointer to @ref array_t + string | string | pointer to @ref string_t + boolean | boolean | @ref boolean_t + number | number_integer | @ref number_integer_t + number | number_unsigned | @ref number_unsigned_t + number | number_float | @ref number_float_t + binary | binary | pointer to @ref binary_t + null | null | *no value is stored* + + @note Variable-length types (objects, arrays, and strings) are stored as + pointers. The size of the union should not exceed 64 bits if the default + value types are used. + + @since version 1.0.0 + */ + union json_value + { + /// object (stored with pointer to save storage) + object_t* object; + /// array (stored with pointer to save storage) + array_t* array; + /// string (stored with pointer to save storage) + string_t* string; + /// binary (stored with pointer to save storage) + binary_t* binary; + /// boolean + boolean_t boolean; + /// number (integer) + number_integer_t number_integer; + /// number (unsigned integer) + number_unsigned_t number_unsigned; + /// number (floating-point) + number_float_t number_float; + + /// default constructor (for null values) + json_value() = default; + /// constructor for booleans + json_value(boolean_t v) noexcept : boolean(v) {} + /// constructor for numbers (integer) + json_value(number_integer_t v) noexcept : number_integer(v) {} + /// constructor for numbers (unsigned) + json_value(number_unsigned_t v) noexcept : number_unsigned(v) {} + /// constructor for numbers (floating-point) + json_value(number_float_t v) noexcept : number_float(v) {} + /// constructor for empty values of a given type + json_value(value_t t) + { + switch (t) + { + case value_t::object: + { + object = create(); + break; + } + + case value_t::array: + { + array = create(); + break; + } + + case value_t::string: + { + string = create(""); + break; + } + + case value_t::binary: + { + binary = create(); + break; + } + + case value_t::boolean: + { + boolean = boolean_t(false); + break; + } + + case value_t::number_integer: + { + number_integer = number_integer_t(0); + break; + } + + case value_t::number_unsigned: + { + number_unsigned = number_unsigned_t(0); + break; + } + + case value_t::number_float: + { + number_float = number_float_t(0.0); + break; + } + + case value_t::null: + { + object = nullptr; // silence warning, see #821 + break; + } + + default: + { + object = nullptr; // silence warning, see #821 + if (JSON_HEDLEY_UNLIKELY(t == value_t::null)) + { + JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.9.1", basic_json())); // LCOV_EXCL_LINE + } + break; + } + } + } + + /// constructor for strings + json_value(const string_t& value) + { + string = create(value); + } + + /// constructor for rvalue strings + json_value(string_t&& value) + { + string = create(std::move(value)); + } + + /// constructor for objects + json_value(const object_t& value) + { + object = create(value); + } + + /// constructor for rvalue objects + json_value(object_t&& value) + { + object = create(std::move(value)); + } + + /// constructor for arrays + json_value(const array_t& value) + { + array = create(value); + } + + /// constructor for rvalue arrays + json_value(array_t&& value) + { + array = create(std::move(value)); + } + + /// constructor for binary arrays + json_value(const typename binary_t::container_type& value) + { + binary = create(value); + } + + /// constructor for rvalue binary arrays + json_value(typename binary_t::container_type&& value) + { + binary = create(std::move(value)); + } + + /// constructor for binary arrays (internal type) + json_value(const binary_t& value) + { + binary = create(value); + } + + /// constructor for rvalue binary arrays (internal type) + json_value(binary_t&& value) + { + binary = create(std::move(value)); + } + + void destroy(value_t t) noexcept + { + // flatten the current json_value to a heap-allocated stack + std::vector stack; + + // move the top-level items to stack + if (t == value_t::array) + { + stack.reserve(array->size()); + std::move(array->begin(), array->end(), std::back_inserter(stack)); + } + else if (t == value_t::object) + { + stack.reserve(object->size()); + for (auto&& it : *object) + { + stack.push_back(std::move(it.second)); + } + } + + while (!stack.empty()) + { + // move the last item to local variable to be processed + basic_json current_item(std::move(stack.back())); + stack.pop_back(); + + // if current_item is array/object, move + // its children to the stack to be processed later + if (current_item.is_array()) + { + std::move(current_item.m_value.array->begin(), current_item.m_value.array->end(), + std::back_inserter(stack)); + + current_item.m_value.array->clear(); + } + else if (current_item.is_object()) + { + for (auto&& it : *current_item.m_value.object) + { + stack.push_back(std::move(it.second)); + } + + current_item.m_value.object->clear(); + } + + // it's now safe that current_item get destructed + // since it doesn't have any children + } + + switch (t) + { + case value_t::object: + { + AllocatorType alloc; + std::allocator_traits::destroy(alloc, object); + std::allocator_traits::deallocate(alloc, object, 1); + break; + } + + case value_t::array: + { + AllocatorType alloc; + std::allocator_traits::destroy(alloc, array); + std::allocator_traits::deallocate(alloc, array, 1); + break; + } + + case value_t::string: + { + AllocatorType alloc; + std::allocator_traits::destroy(alloc, string); + std::allocator_traits::deallocate(alloc, string, 1); + break; + } + + case value_t::binary: + { + AllocatorType alloc; + std::allocator_traits::destroy(alloc, binary); + std::allocator_traits::deallocate(alloc, binary, 1); + break; + } + + default: + { + break; + } + } + } + }; + + private: + /*! + @brief checks the class invariants + + This function asserts the class invariants. It needs to be called at the + end of every constructor to make sure that created objects respect the + invariant. Furthermore, it has to be called each time the type of a JSON + value is changed, because the invariant expresses a relationship between + @a m_type and @a m_value. + + Furthermore, the parent relation is checked for arrays and objects: If + @a check_parents true and the value is an array or object, then the + container's elements must have the current value as parent. + + @param[in] check_parents whether the parent relation should be checked. + The value is true by default and should only be set to false + during destruction of objects when the invariant does not + need to hold. + */ + void assert_invariant(bool check_parents = true) const noexcept + { + JSON_ASSERT(m_type != value_t::object || m_value.object != nullptr); + JSON_ASSERT(m_type != value_t::array || m_value.array != nullptr); + JSON_ASSERT(m_type != value_t::string || m_value.string != nullptr); + JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); + +#if JSON_DIAGNOSTICS + JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) + { + return j.m_parent == this; + })); +#else + static_cast(check_parents); +#endif + } + + void set_parents() + { +#if JSON_DIAGNOSTICS + switch (m_type) + { + case value_t::array: + { + for (auto& element : *m_value.array) + { + element.m_parent = this; + } + break; + } + + case value_t::object: + { + for (auto& element : *m_value.object) + { + element.second.m_parent = this; + } + break; + } + + default: + break; + } +#endif + } + + iterator set_parents(iterator it, typename iterator::difference_type count) + { +#if JSON_DIAGNOSTICS + for (typename iterator::difference_type i = 0; i < count; ++i) + { + (it + i)->m_parent = this; + } +#else + static_cast(count); +#endif + return it; + } + + reference set_parent(reference j) + { +#if JSON_DIAGNOSTICS + j.m_parent = this; +#else + static_cast(j); +#endif + return j; + } + + public: + ////////////////////////// + // JSON parser callback // + ////////////////////////// + + /*! + @brief parser event types + + The parser callback distinguishes the following events: + - `object_start`: the parser read `{` and started to process a JSON object + - `key`: the parser read a key of a value in an object + - `object_end`: the parser read `}` and finished processing a JSON object + - `array_start`: the parser read `[` and started to process a JSON array + - `array_end`: the parser read `]` and finished processing a JSON array + - `value`: the parser finished reading a JSON value + + @image html callback_events.png "Example when certain parse events are triggered" + + @sa @ref parser_callback_t for more information and examples + */ + using parse_event_t = detail::parse_event_t; + + /*! + @brief per-element parser callback type + + With a parser callback function, the result of parsing a JSON text can be + influenced. When passed to @ref parse, it is called on certain events + (passed as @ref parse_event_t via parameter @a event) with a set recursion + depth @a depth and context JSON value @a parsed. The return value of the + callback function is a boolean indicating whether the element that emitted + the callback shall be kept or not. + + We distinguish six scenarios (determined by the event type) in which the + callback function can be called. The following table describes the values + of the parameters @a depth, @a event, and @a parsed. + + parameter @a event | description | parameter @a depth | parameter @a parsed + ------------------ | ----------- | ------------------ | ------------------- + parse_event_t::object_start | the parser read `{` and started to process a JSON object | depth of the parent of the JSON object | a JSON value with type discarded + parse_event_t::key | the parser read a key of a value in an object | depth of the currently parsed JSON object | a JSON string containing the key + parse_event_t::object_end | the parser read `}` and finished processing a JSON object | depth of the parent of the JSON object | the parsed JSON object + parse_event_t::array_start | the parser read `[` and started to process a JSON array | depth of the parent of the JSON array | a JSON value with type discarded + parse_event_t::array_end | the parser read `]` and finished processing a JSON array | depth of the parent of the JSON array | the parsed JSON array + parse_event_t::value | the parser finished reading a JSON value | depth of the value | the parsed JSON value + + @image html callback_events.png "Example when certain parse events are triggered" + + Discarding a value (i.e., returning `false`) has different effects + depending on the context in which function was called: + + - Discarded values in structured types are skipped. That is, the parser + will behave as if the discarded value was never read. + - In case a value outside a structured type is skipped, it is replaced + with `null`. This case happens if the top-level element is skipped. + + @param[in] depth the depth of the recursion during parsing + + @param[in] event an event of type parse_event_t indicating the context in + the callback function has been called + + @param[in,out] parsed the current intermediate parse result; note that + writing to this value has no effect for parse_event_t::key events + + @return Whether the JSON value which called the function during parsing + should be kept (`true`) or not (`false`). In the latter case, it is either + skipped completely or replaced by an empty discarded object. + + @sa @ref parse for examples + + @since version 1.0.0 + */ + using parser_callback_t = detail::parser_callback_t; + + ////////////////// + // constructors // + ////////////////// + + /// @name constructors and destructors + /// Constructors of class @ref basic_json, copy/move constructor, copy + /// assignment, static functions creating objects, and the destructor. + /// @{ + + /*! + @brief create an empty value with a given type + + Create an empty JSON value with a given type. The value will be default + initialized with an empty value which depends on the type: + + Value type | initial value + ----------- | ------------- + null | `null` + boolean | `false` + string | `""` + number | `0` + object | `{}` + array | `[]` + binary | empty array + + @param[in] v the type of the value to create + + @complexity Constant. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The following code shows the constructor for different @ref + value_t values,basic_json__value_t} + + @sa @ref clear() -- restores the postcondition of this constructor + + @since version 1.0.0 + */ + basic_json(const value_t v) + : m_type(v), m_value(v) + { + assert_invariant(); + } + + /*! + @brief create a null object + + Create a `null` JSON value. It either takes a null pointer as parameter + (explicitly creating `null`) or no parameter (implicitly creating `null`). + The passed null pointer itself is not read -- it is only used to choose + the right constructor. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this constructor never throws + exceptions. + + @liveexample{The following code shows the constructor with and without a + null pointer parameter.,basic_json__nullptr_t} + + @since version 1.0.0 + */ + basic_json(std::nullptr_t = nullptr) noexcept + : basic_json(value_t::null) + { + assert_invariant(); + } + + /*! + @brief create a JSON value + + This is a "catch all" constructor for all compatible JSON types; that is, + types for which a `to_json()` method exists. The constructor forwards the + parameter @a val to that method (to `json_serializer::to_json` method + with `U = uncvref_t`, to be exact). + + Template type @a CompatibleType includes, but is not limited to, the + following types: + - **arrays**: @ref array_t and all kinds of compatible containers such as + `std::vector`, `std::deque`, `std::list`, `std::forward_list`, + `std::array`, `std::valarray`, `std::set`, `std::unordered_set`, + `std::multiset`, and `std::unordered_multiset` with a `value_type` from + which a @ref basic_json value can be constructed. + - **objects**: @ref object_t and all kinds of compatible associative + containers such as `std::map`, `std::unordered_map`, `std::multimap`, + and `std::unordered_multimap` with a `key_type` compatible to + @ref string_t and a `value_type` from which a @ref basic_json value can + be constructed. + - **strings**: @ref string_t, string literals, and all compatible string + containers can be used. + - **numbers**: @ref number_integer_t, @ref number_unsigned_t, + @ref number_float_t, and all convertible number types such as `int`, + `size_t`, `int64_t`, `float` or `double` can be used. + - **boolean**: @ref boolean_t / `bool` can be used. + - **binary**: @ref binary_t / `std::vector` may be used, + unfortunately because string literals cannot be distinguished from binary + character arrays by the C++ type system, all types compatible with `const + char*` will be directed to the string constructor instead. This is both + for backwards compatibility, and due to the fact that a binary type is not + a standard JSON type. + + See the examples below. + + @tparam CompatibleType a type such that: + - @a CompatibleType is not derived from `std::istream`, + - @a CompatibleType is not @ref basic_json (to avoid hijacking copy/move + constructors), + - @a CompatibleType is not a different @ref basic_json type (i.e. with different template arguments) + - @a CompatibleType is not a @ref basic_json nested type (e.g., + @ref json_pointer, @ref iterator, etc ...) + - @ref @ref json_serializer has a + `to_json(basic_json_t&, CompatibleType&&)` method + + @tparam U = `uncvref_t` + + @param[in] val the value to be forwarded to the respective constructor + + @complexity Usually linear in the size of the passed @a val, also + depending on the implementation of the called `to_json()` + method. + + @exceptionsafety Depends on the called constructor. For types directly + supported by the library (i.e., all types for which no `to_json()` function + was provided), strong guarantee holds: if an exception is thrown, there are + no changes to any JSON value. + + @liveexample{The following code shows the constructor with several + compatible types.,basic_json__CompatibleType} + + @since version 2.1.0 + */ + template < typename CompatibleType, + typename U = detail::uncvref_t, + detail::enable_if_t < + !detail::is_basic_json::value && detail::is_compatible_type::value, int > = 0 > + basic_json(CompatibleType && val) noexcept(noexcept( + JSONSerializer::to_json(std::declval(), + std::forward(val)))) + { + JSONSerializer::to_json(*this, std::forward(val)); + set_parents(); + assert_invariant(); + } + + /*! + @brief create a JSON value from an existing one + + This is a constructor for existing @ref basic_json types. + It does not hijack copy/move constructors, since the parameter has different + template arguments than the current ones. + + The constructor tries to convert the internal @ref m_value of the parameter. + + @tparam BasicJsonType a type such that: + - @a BasicJsonType is a @ref basic_json type. + - @a BasicJsonType has different template arguments than @ref basic_json_t. + + @param[in] val the @ref basic_json value to be converted. + + @complexity Usually linear in the size of the passed @a val, also + depending on the implementation of the called `to_json()` + method. + + @exceptionsafety Depends on the called constructor. For types directly + supported by the library (i.e., all types for which no `to_json()` function + was provided), strong guarantee holds: if an exception is thrown, there are + no changes to any JSON value. + + @since version 3.2.0 + */ + template < typename BasicJsonType, + detail::enable_if_t < + detail::is_basic_json::value&& !std::is_same::value, int > = 0 > + basic_json(const BasicJsonType& val) + { + using other_boolean_t = typename BasicJsonType::boolean_t; + using other_number_float_t = typename BasicJsonType::number_float_t; + using other_number_integer_t = typename BasicJsonType::number_integer_t; + using other_number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using other_string_t = typename BasicJsonType::string_t; + using other_object_t = typename BasicJsonType::object_t; + using other_array_t = typename BasicJsonType::array_t; + using other_binary_t = typename BasicJsonType::binary_t; + + switch (val.type()) + { + case value_t::boolean: + JSONSerializer::to_json(*this, val.template get()); + break; + case value_t::number_float: + JSONSerializer::to_json(*this, val.template get()); + break; + case value_t::number_integer: + JSONSerializer::to_json(*this, val.template get()); + break; + case value_t::number_unsigned: + JSONSerializer::to_json(*this, val.template get()); + break; + case value_t::string: + JSONSerializer::to_json(*this, val.template get_ref()); + break; + case value_t::object: + JSONSerializer::to_json(*this, val.template get_ref()); + break; + case value_t::array: + JSONSerializer::to_json(*this, val.template get_ref()); + break; + case value_t::binary: + JSONSerializer::to_json(*this, val.template get_ref()); + break; + case value_t::null: + *this = nullptr; + break; + case value_t::discarded: + m_type = value_t::discarded; + break; + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + } + set_parents(); + assert_invariant(); + } + + /*! + @brief create a container (array or object) from an initializer list + + Creates a JSON value of type array or object from the passed initializer + list @a init. In case @a type_deduction is `true` (default), the type of + the JSON value to be created is deducted from the initializer list @a init + according to the following rules: + + 1. If the list is empty, an empty JSON object value `{}` is created. + 2. If the list consists of pairs whose first element is a string, a JSON + object value is created where the first elements of the pairs are + treated as keys and the second elements are as values. + 3. In all other cases, an array is created. + + The rules aim to create the best fit between a C++ initializer list and + JSON values. The rationale is as follows: + + 1. The empty initializer list is written as `{}` which is exactly an empty + JSON object. + 2. C++ has no way of describing mapped types other than to list a list of + pairs. As JSON requires that keys must be of type string, rule 2 is the + weakest constraint one can pose on initializer lists to interpret them + as an object. + 3. In all other cases, the initializer list could not be interpreted as + JSON object type, so interpreting it as JSON array type is safe. + + With the rules described above, the following JSON values cannot be + expressed by an initializer list: + + - the empty array (`[]`): use @ref array(initializer_list_t) + with an empty initializer list in this case + - arrays whose elements satisfy rule 2: use @ref + array(initializer_list_t) with the same initializer list + in this case + + @note When used without parentheses around an empty initializer list, @ref + basic_json() is called instead of this function, yielding the JSON null + value. + + @param[in] init initializer list with JSON values + + @param[in] type_deduction internal parameter; when set to `true`, the type + of the JSON value is deducted from the initializer list @a init; when set + to `false`, the type provided via @a manual_type is forced. This mode is + used by the functions @ref array(initializer_list_t) and + @ref object(initializer_list_t). + + @param[in] manual_type internal parameter; when @a type_deduction is set + to `false`, the created JSON value will use the provided type (only @ref + value_t::array and @ref value_t::object are valid); when @a type_deduction + is set to `true`, this parameter has no effect + + @throw type_error.301 if @a type_deduction is `false`, @a manual_type is + `value_t::object`, but @a init contains an element which is not a pair + whose first element is a string. In this case, the constructor could not + create an object. If @a type_deduction would have be `true`, an array + would have been created. See @ref object(initializer_list_t) + for an example. + + @complexity Linear in the size of the initializer list @a init. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The example below shows how JSON values are created from + initializer lists.,basic_json__list_init_t} + + @sa @ref array(initializer_list_t) -- create a JSON array + value from an initializer list + @sa @ref object(initializer_list_t) -- create a JSON object + value from an initializer list + + @since version 1.0.0 + */ + basic_json(initializer_list_t init, + bool type_deduction = true, + value_t manual_type = value_t::array) + { + // check if each element is an array with two elements whose first + // element is a string + bool is_an_object = std::all_of(init.begin(), init.end(), + [](const detail::json_ref& element_ref) + { + return element_ref->is_array() && element_ref->size() == 2 && (*element_ref)[0].is_string(); + }); + + // adjust type if type deduction is not wanted + if (!type_deduction) + { + // if array is wanted, do not create an object though possible + if (manual_type == value_t::array) + { + is_an_object = false; + } + + // if object is wanted but impossible, throw an exception + if (JSON_HEDLEY_UNLIKELY(manual_type == value_t::object && !is_an_object)) + { + JSON_THROW(type_error::create(301, "cannot create object from initializer list", basic_json())); + } + } + + if (is_an_object) + { + // the initializer list is a list of pairs -> create object + m_type = value_t::object; + m_value = value_t::object; + + for (auto& element_ref : init) + { + auto element = element_ref.moved_or_copied(); + m_value.object->emplace( + std::move(*((*element.m_value.array)[0].m_value.string)), + std::move((*element.m_value.array)[1])); + } + } + else + { + // the initializer list describes an array -> create array + m_type = value_t::array; + m_value.array = create(init.begin(), init.end()); + } + + set_parents(); + assert_invariant(); + } + + /*! + @brief explicitly create a binary array (without subtype) + + Creates a JSON binary array value from a given binary container. Binary + values are part of various binary formats, such as CBOR, MessagePack, and + BSON. This constructor is used to create a value for serialization to those + formats. + + @note Note, this function exists because of the difficulty in correctly + specifying the correct template overload in the standard value ctor, as both + JSON arrays and JSON binary arrays are backed with some form of a + `std::vector`. Because JSON binary arrays are a non-standard extension it + was decided that it would be best to prevent automatic initialization of a + binary array type, for backwards compatibility and so it does not happen on + accident. + + @param[in] init container containing bytes to use as binary type + + @return JSON binary array value + + @complexity Linear in the size of @a init. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @since version 3.8.0 + */ + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json binary(const typename binary_t::container_type& init) + { + auto res = basic_json(); + res.m_type = value_t::binary; + res.m_value = init; + return res; + } + + /*! + @brief explicitly create a binary array (with subtype) + + Creates a JSON binary array value from a given binary container. Binary + values are part of various binary formats, such as CBOR, MessagePack, and + BSON. This constructor is used to create a value for serialization to those + formats. + + @note Note, this function exists because of the difficulty in correctly + specifying the correct template overload in the standard value ctor, as both + JSON arrays and JSON binary arrays are backed with some form of a + `std::vector`. Because JSON binary arrays are a non-standard extension it + was decided that it would be best to prevent automatic initialization of a + binary array type, for backwards compatibility and so it does not happen on + accident. + + @param[in] init container containing bytes to use as binary type + @param[in] subtype subtype to use in MessagePack and BSON + + @return JSON binary array value + + @complexity Linear in the size of @a init. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @since version 3.8.0 + */ + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json binary(const typename binary_t::container_type& init, std::uint8_t subtype) + { + auto res = basic_json(); + res.m_type = value_t::binary; + res.m_value = binary_t(init, subtype); + return res; + } + + /// @copydoc binary(const typename binary_t::container_type&) + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json binary(typename binary_t::container_type&& init) + { + auto res = basic_json(); + res.m_type = value_t::binary; + res.m_value = std::move(init); + return res; + } + + /// @copydoc binary(const typename binary_t::container_type&, std::uint8_t) + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json binary(typename binary_t::container_type&& init, std::uint8_t subtype) + { + auto res = basic_json(); + res.m_type = value_t::binary; + res.m_value = binary_t(std::move(init), subtype); + return res; + } + + /*! + @brief explicitly create an array from an initializer list + + Creates a JSON array value from a given initializer list. That is, given a + list of values `a, b, c`, creates the JSON value `[a, b, c]`. If the + initializer list is empty, the empty array `[]` is created. + + @note This function is only needed to express two edge cases that cannot + be realized with the initializer list constructor (@ref + basic_json(initializer_list_t, bool, value_t)). These cases + are: + 1. creating an array whose elements are all pairs whose first element is a + string -- in this case, the initializer list constructor would create an + object, taking the first elements as keys + 2. creating an empty array -- passing the empty initializer list to the + initializer list constructor yields an empty object + + @param[in] init initializer list with JSON values to create an array from + (optional) + + @return JSON array value + + @complexity Linear in the size of @a init. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The following code shows an example for the `array` + function.,array} + + @sa @ref basic_json(initializer_list_t, bool, value_t) -- + create a JSON value from an initializer list + @sa @ref object(initializer_list_t) -- create a JSON object + value from an initializer list + + @since version 1.0.0 + */ + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json array(initializer_list_t init = {}) + { + return basic_json(init, false, value_t::array); + } + + /*! + @brief explicitly create an object from an initializer list + + Creates a JSON object value from a given initializer list. The initializer + lists elements must be pairs, and their first elements must be strings. If + the initializer list is empty, the empty object `{}` is created. + + @note This function is only added for symmetry reasons. In contrast to the + related function @ref array(initializer_list_t), there are + no cases which can only be expressed by this function. That is, any + initializer list @a init can also be passed to the initializer list + constructor @ref basic_json(initializer_list_t, bool, value_t). + + @param[in] init initializer list to create an object from (optional) + + @return JSON object value + + @throw type_error.301 if @a init is not a list of pairs whose first + elements are strings. In this case, no object can be created. When such a + value is passed to @ref basic_json(initializer_list_t, bool, value_t), + an array would have been created from the passed initializer list @a init. + See example below. + + @complexity Linear in the size of @a init. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The following code shows an example for the `object` + function.,object} + + @sa @ref basic_json(initializer_list_t, bool, value_t) -- + create a JSON value from an initializer list + @sa @ref array(initializer_list_t) -- create a JSON array + value from an initializer list + + @since version 1.0.0 + */ + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json object(initializer_list_t init = {}) + { + return basic_json(init, false, value_t::object); + } + + /*! + @brief construct an array with count copies of given value + + Constructs a JSON array value by creating @a cnt copies of a passed value. + In case @a cnt is `0`, an empty array is created. + + @param[in] cnt the number of JSON copies of @a val to create + @param[in] val the JSON value to copy + + @post `std::distance(begin(),end()) == cnt` holds. + + @complexity Linear in @a cnt. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The following code shows examples for the @ref + basic_json(size_type\, const basic_json&) + constructor.,basic_json__size_type_basic_json} + + @since version 1.0.0 + */ + basic_json(size_type cnt, const basic_json& val) + : m_type(value_t::array) + { + m_value.array = create(cnt, val); + set_parents(); + assert_invariant(); + } + + /*! + @brief construct a JSON container given an iterator range + + Constructs the JSON value with the contents of the range `[first, last)`. + The semantics depends on the different types a JSON value can have: + - In case of a null type, invalid_iterator.206 is thrown. + - In case of other primitive types (number, boolean, or string), @a first + must be `begin()` and @a last must be `end()`. In this case, the value is + copied. Otherwise, invalid_iterator.204 is thrown. + - In case of structured types (array, object), the constructor behaves as + similar versions for `std::vector` or `std::map`; that is, a JSON array + or object is constructed from the values in the range. + + @tparam InputIT an input iterator type (@ref iterator or @ref + const_iterator) + + @param[in] first begin of the range to copy from (included) + @param[in] last end of the range to copy from (excluded) + + @pre Iterators @a first and @a last must be initialized. **This + precondition is enforced with an assertion (see warning).** If + assertions are switched off, a violation of this precondition yields + undefined behavior. + + @pre Range `[first, last)` is valid. Usually, this precondition cannot be + checked efficiently. Only certain edge cases are detected; see the + description of the exceptions below. A violation of this precondition + yields undefined behavior. + + @warning A precondition is enforced with a runtime assertion that will + result in calling `std::abort` if this precondition is not met. + Assertions can be disabled by defining `NDEBUG` at compile time. + See https://en.cppreference.com/w/cpp/error/assert for more + information. + + @throw invalid_iterator.201 if iterators @a first and @a last are not + compatible (i.e., do not belong to the same JSON value). In this case, + the range `[first, last)` is undefined. + @throw invalid_iterator.204 if iterators @a first and @a last belong to a + primitive type (number, boolean, or string), but @a first does not point + to the first element any more. In this case, the range `[first, last)` is + undefined. See example code below. + @throw invalid_iterator.206 if iterators @a first and @a last belong to a + null value. In this case, the range `[first, last)` is undefined. + + @complexity Linear in distance between @a first and @a last. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @liveexample{The example below shows several ways to create JSON values by + specifying a subrange with iterators.,basic_json__InputIt_InputIt} + + @since version 1.0.0 + */ + template < class InputIT, typename std::enable_if < + std::is_same::value || + std::is_same::value, int >::type = 0 > + basic_json(InputIT first, InputIT last) + { + JSON_ASSERT(first.m_object != nullptr); + JSON_ASSERT(last.m_object != nullptr); + + // make sure iterator fits the current value + if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object)) + { + JSON_THROW(invalid_iterator::create(201, "iterators are not compatible", basic_json())); + } + + // copy type from first iterator + m_type = first.m_object->m_type; + + // check if iterator range is complete for primitive values + switch (m_type) + { + case value_t::boolean: + case value_t::number_float: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::string: + { + if (JSON_HEDLEY_UNLIKELY(!first.m_it.primitive_iterator.is_begin() + || !last.m_it.primitive_iterator.is_end())) + { + JSON_THROW(invalid_iterator::create(204, "iterators out of range", *first.m_object)); + } + break; + } + + default: + break; + } + + switch (m_type) + { + case value_t::number_integer: + { + m_value.number_integer = first.m_object->m_value.number_integer; + break; + } + + case value_t::number_unsigned: + { + m_value.number_unsigned = first.m_object->m_value.number_unsigned; + break; + } + + case value_t::number_float: + { + m_value.number_float = first.m_object->m_value.number_float; + break; + } + + case value_t::boolean: + { + m_value.boolean = first.m_object->m_value.boolean; + break; + } + + case value_t::string: + { + m_value = *first.m_object->m_value.string; + break; + } + + case value_t::object: + { + m_value.object = create(first.m_it.object_iterator, + last.m_it.object_iterator); + break; + } + + case value_t::array: + { + m_value.array = create(first.m_it.array_iterator, + last.m_it.array_iterator); + break; + } + + case value_t::binary: + { + m_value = *first.m_object->m_value.binary; + break; + } + + default: + JSON_THROW(invalid_iterator::create(206, "cannot construct with iterators from " + std::string(first.m_object->type_name()), *first.m_object)); + } + + set_parents(); + assert_invariant(); + } + + + /////////////////////////////////////// + // other constructors and destructor // + /////////////////////////////////////// + + template, + std::is_same>::value, int> = 0 > + basic_json(const JsonRef& ref) : basic_json(ref.moved_or_copied()) {} + + /*! + @brief copy constructor + + Creates a copy of a given JSON value. + + @param[in] other the JSON value to copy + + @post `*this == other` + + @complexity Linear in the size of @a other. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes to any JSON value. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is linear. + - As postcondition, it holds: `other == basic_json(other)`. + + @liveexample{The following code shows an example for the copy + constructor.,basic_json__basic_json} + + @since version 1.0.0 + */ + basic_json(const basic_json& other) + : m_type(other.m_type) + { + // check of passed value is valid + other.assert_invariant(); + + switch (m_type) + { + case value_t::object: + { + m_value = *other.m_value.object; + break; + } + + case value_t::array: + { + m_value = *other.m_value.array; + break; + } + + case value_t::string: + { + m_value = *other.m_value.string; + break; + } + + case value_t::boolean: + { + m_value = other.m_value.boolean; + break; + } + + case value_t::number_integer: + { + m_value = other.m_value.number_integer; + break; + } + + case value_t::number_unsigned: + { + m_value = other.m_value.number_unsigned; + break; + } + + case value_t::number_float: + { + m_value = other.m_value.number_float; + break; + } + + case value_t::binary: + { + m_value = *other.m_value.binary; + break; + } + + default: + break; + } + + set_parents(); + assert_invariant(); + } + + /*! + @brief move constructor + + Move constructor. Constructs a JSON value with the contents of the given + value @a other using move semantics. It "steals" the resources from @a + other and leaves it as JSON null value. + + @param[in,out] other value to move to this object + + @post `*this` has the same value as @a other before the call. + @post @a other is a JSON null value. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this constructor never throws + exceptions. + + @requirement This function helps `basic_json` satisfying the + [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible) + requirements. + + @liveexample{The code below shows the move constructor explicitly called + via std::move.,basic_json__moveconstructor} + + @since version 1.0.0 + */ + basic_json(basic_json&& other) noexcept + : m_type(std::move(other.m_type)), + m_value(std::move(other.m_value)) + { + // check that passed value is valid + other.assert_invariant(false); + + // invalidate payload + other.m_type = value_t::null; + other.m_value = {}; + + set_parents(); + assert_invariant(); + } + + /*! + @brief copy assignment + + Copy assignment operator. Copies a JSON value via the "copy and swap" + strategy: It is expressed in terms of the copy constructor, destructor, + and the `swap()` member function. + + @param[in] other value to copy from + + @complexity Linear. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is linear. + + @liveexample{The code below shows and example for the copy assignment. It + creates a copy of value `a` which is then swapped with `b`. Finally\, the + copy of `a` (which is the null value after the swap) is + destroyed.,basic_json__copyassignment} + + @since version 1.0.0 + */ + basic_json& operator=(basic_json other) noexcept ( + std::is_nothrow_move_constructible::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_constructible::value&& + std::is_nothrow_move_assignable::value + ) + { + // check that passed value is valid + other.assert_invariant(); + + using std::swap; + swap(m_type, other.m_type); + swap(m_value, other.m_value); + + set_parents(); + assert_invariant(); + return *this; + } + + /*! + @brief destructor + + Destroys the JSON value and frees all allocated memory. + + @complexity Linear. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is linear. + - All stored elements are destroyed and all memory is freed. + + @since version 1.0.0 + */ + ~basic_json() noexcept + { + assert_invariant(false); + m_value.destroy(m_type); + } + + /// @} + + public: + /////////////////////// + // object inspection // + /////////////////////// + + /// @name object inspection + /// Functions to inspect the type of a JSON value. + /// @{ + + /*! + @brief serialization + + Serialization function for JSON values. The function tries to mimic + Python's `json.dumps()` function, and currently supports its @a indent + and @a ensure_ascii parameters. + + @param[in] indent If indent is nonnegative, then array elements and object + members will be pretty-printed with that indent level. An indent level of + `0` will only insert newlines. `-1` (the default) selects the most compact + representation. + @param[in] indent_char The character to use for indentation if @a indent is + greater than `0`. The default is ` ` (space). + @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters + in the output are escaped with `\uXXXX` sequences, and the result consists + of ASCII characters only. + @param[in] error_handler how to react on decoding errors; there are three + possible values: `strict` (throws and exception in case a decoding error + occurs; default), `replace` (replace invalid UTF-8 sequences with U+FFFD), + and `ignore` (ignore invalid UTF-8 sequences during serialization; all + bytes are copied to the output unchanged). + + @return string containing the serialization of the JSON value + + @throw type_error.316 if a string stored inside the JSON value is not + UTF-8 encoded and @a error_handler is set to strict + + @note Binary values are serialized as object containing two keys: + - "bytes": an array of bytes as integers + - "subtype": the subtype as integer or "null" if the binary has no subtype + + @complexity Linear. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @liveexample{The following example shows the effect of different @a indent\, + @a indent_char\, and @a ensure_ascii parameters to the result of the + serialization.,dump} + + @see https://docs.python.org/2/library/json.html#json.dump + + @since version 1.0.0; indentation character @a indent_char, option + @a ensure_ascii and exceptions added in version 3.0.0; error + handlers added in version 3.4.0; serialization of binary values added + in version 3.8.0. + */ + string_t dump(const int indent = -1, + const char indent_char = ' ', + const bool ensure_ascii = false, + const error_handler_t error_handler = error_handler_t::strict) const + { + string_t result; + serializer s(detail::output_adapter(result), indent_char, error_handler); + + if (indent >= 0) + { + s.dump(*this, true, ensure_ascii, static_cast(indent)); + } + else + { + s.dump(*this, false, ensure_ascii, 0); + } + + return result; + } + + /*! + @brief return the type of the JSON value (explicit) + + Return the type of the JSON value as a value from the @ref value_t + enumeration. + + @return the type of the JSON value + Value type | return value + ------------------------- | ------------------------- + null | value_t::null + boolean | value_t::boolean + string | value_t::string + number (integer) | value_t::number_integer + number (unsigned integer) | value_t::number_unsigned + number (floating-point) | value_t::number_float + object | value_t::object + array | value_t::array + binary | value_t::binary + discarded | value_t::discarded + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `type()` for all JSON + types.,type} + + @sa @ref operator value_t() -- return the type of the JSON value (implicit) + @sa @ref type_name() -- return the type as string + + @since version 1.0.0 + */ + constexpr value_t type() const noexcept + { + return m_type; + } + + /*! + @brief return whether type is primitive + + This function returns true if and only if the JSON type is primitive + (string, number, boolean, or null). + + @return `true` if type is primitive (string, number, boolean, or null), + `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_primitive()` for all JSON + types.,is_primitive} + + @sa @ref is_structured() -- returns whether JSON value is structured + @sa @ref is_null() -- returns whether JSON value is `null` + @sa @ref is_string() -- returns whether JSON value is a string + @sa @ref is_boolean() -- returns whether JSON value is a boolean + @sa @ref is_number() -- returns whether JSON value is a number + @sa @ref is_binary() -- returns whether JSON value is a binary array + + @since version 1.0.0 + */ + constexpr bool is_primitive() const noexcept + { + return is_null() || is_string() || is_boolean() || is_number() || is_binary(); + } + + /*! + @brief return whether type is structured + + This function returns true if and only if the JSON type is structured + (array or object). + + @return `true` if type is structured (array or object), `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_structured()` for all JSON + types.,is_structured} + + @sa @ref is_primitive() -- returns whether value is primitive + @sa @ref is_array() -- returns whether value is an array + @sa @ref is_object() -- returns whether value is an object + + @since version 1.0.0 + */ + constexpr bool is_structured() const noexcept + { + return is_array() || is_object(); + } + + /*! + @brief return whether value is null + + This function returns true if and only if the JSON value is null. + + @return `true` if type is null, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_null()` for all JSON + types.,is_null} + + @since version 1.0.0 + */ + constexpr bool is_null() const noexcept + { + return m_type == value_t::null; + } + + /*! + @brief return whether value is a boolean + + This function returns true if and only if the JSON value is a boolean. + + @return `true` if type is boolean, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_boolean()` for all JSON + types.,is_boolean} + + @since version 1.0.0 + */ + constexpr bool is_boolean() const noexcept + { + return m_type == value_t::boolean; + } + + /*! + @brief return whether value is a number + + This function returns true if and only if the JSON value is a number. This + includes both integer (signed and unsigned) and floating-point values. + + @return `true` if type is number (regardless whether integer, unsigned + integer or floating-type), `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_number()` for all JSON + types.,is_number} + + @sa @ref is_number_integer() -- check if value is an integer or unsigned + integer number + @sa @ref is_number_unsigned() -- check if value is an unsigned integer + number + @sa @ref is_number_float() -- check if value is a floating-point number + + @since version 1.0.0 + */ + constexpr bool is_number() const noexcept + { + return is_number_integer() || is_number_float(); + } + + /*! + @brief return whether value is an integer number + + This function returns true if and only if the JSON value is a signed or + unsigned integer number. This excludes floating-point values. + + @return `true` if type is an integer or unsigned integer number, `false` + otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_number_integer()` for all + JSON types.,is_number_integer} + + @sa @ref is_number() -- check if value is a number + @sa @ref is_number_unsigned() -- check if value is an unsigned integer + number + @sa @ref is_number_float() -- check if value is a floating-point number + + @since version 1.0.0 + */ + constexpr bool is_number_integer() const noexcept + { + return m_type == value_t::number_integer || m_type == value_t::number_unsigned; + } + + /*! + @brief return whether value is an unsigned integer number + + This function returns true if and only if the JSON value is an unsigned + integer number. This excludes floating-point and signed integer values. + + @return `true` if type is an unsigned integer number, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_number_unsigned()` for all + JSON types.,is_number_unsigned} + + @sa @ref is_number() -- check if value is a number + @sa @ref is_number_integer() -- check if value is an integer or unsigned + integer number + @sa @ref is_number_float() -- check if value is a floating-point number + + @since version 2.0.0 + */ + constexpr bool is_number_unsigned() const noexcept + { + return m_type == value_t::number_unsigned; + } + + /*! + @brief return whether value is a floating-point number + + This function returns true if and only if the JSON value is a + floating-point number. This excludes signed and unsigned integer values. + + @return `true` if type is a floating-point number, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_number_float()` for all + JSON types.,is_number_float} + + @sa @ref is_number() -- check if value is number + @sa @ref is_number_integer() -- check if value is an integer number + @sa @ref is_number_unsigned() -- check if value is an unsigned integer + number + + @since version 1.0.0 + */ + constexpr bool is_number_float() const noexcept + { + return m_type == value_t::number_float; + } + + /*! + @brief return whether value is an object + + This function returns true if and only if the JSON value is an object. + + @return `true` if type is object, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_object()` for all JSON + types.,is_object} + + @since version 1.0.0 + */ + constexpr bool is_object() const noexcept + { + return m_type == value_t::object; + } + + /*! + @brief return whether value is an array + + This function returns true if and only if the JSON value is an array. + + @return `true` if type is array, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_array()` for all JSON + types.,is_array} + + @since version 1.0.0 + */ + constexpr bool is_array() const noexcept + { + return m_type == value_t::array; + } + + /*! + @brief return whether value is a string + + This function returns true if and only if the JSON value is a string. + + @return `true` if type is string, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_string()` for all JSON + types.,is_string} + + @since version 1.0.0 + */ + constexpr bool is_string() const noexcept + { + return m_type == value_t::string; + } + + /*! + @brief return whether value is a binary array + + This function returns true if and only if the JSON value is a binary array. + + @return `true` if type is binary array, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_binary()` for all JSON + types.,is_binary} + + @since version 3.8.0 + */ + constexpr bool is_binary() const noexcept + { + return m_type == value_t::binary; + } + + /*! + @brief return whether value is discarded + + This function returns true if and only if the JSON value was discarded + during parsing with a callback function (see @ref parser_callback_t). + + @note This function will always be `false` for JSON values after parsing. + That is, discarded values can only occur during parsing, but will be + removed when inside a structured value or replaced by null in other cases. + + @return `true` if type is discarded, `false` otherwise. + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies `is_discarded()` for all JSON + types.,is_discarded} + + @since version 1.0.0 + */ + constexpr bool is_discarded() const noexcept + { + return m_type == value_t::discarded; + } + + /*! + @brief return the type of the JSON value (implicit) + + Implicitly return the type of the JSON value as a value from the @ref + value_t enumeration. + + @return the type of the JSON value + + @complexity Constant. + + @exceptionsafety No-throw guarantee: this member function never throws + exceptions. + + @liveexample{The following code exemplifies the @ref value_t operator for + all JSON types.,operator__value_t} + + @sa @ref type() -- return the type of the JSON value (explicit) + @sa @ref type_name() -- return the type as string + + @since version 1.0.0 + */ + constexpr operator value_t() const noexcept + { + return m_type; + } + + /// @} + + private: + ////////////////// + // value access // + ////////////////// + + /// get a boolean (explicit) + boolean_t get_impl(boolean_t* /*unused*/) const + { + if (JSON_HEDLEY_LIKELY(is_boolean())) + { + return m_value.boolean; + } + + JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(type_name()), *this)); + } + + /// get a pointer to the value (object) + object_t* get_impl_ptr(object_t* /*unused*/) noexcept + { + return is_object() ? m_value.object : nullptr; + } + + /// get a pointer to the value (object) + constexpr const object_t* get_impl_ptr(const object_t* /*unused*/) const noexcept + { + return is_object() ? m_value.object : nullptr; + } + + /// get a pointer to the value (array) + array_t* get_impl_ptr(array_t* /*unused*/) noexcept + { + return is_array() ? m_value.array : nullptr; + } + + /// get a pointer to the value (array) + constexpr const array_t* get_impl_ptr(const array_t* /*unused*/) const noexcept + { + return is_array() ? m_value.array : nullptr; + } + + /// get a pointer to the value (string) + string_t* get_impl_ptr(string_t* /*unused*/) noexcept + { + return is_string() ? m_value.string : nullptr; + } + + /// get a pointer to the value (string) + constexpr const string_t* get_impl_ptr(const string_t* /*unused*/) const noexcept + { + return is_string() ? m_value.string : nullptr; + } + + /// get a pointer to the value (boolean) + boolean_t* get_impl_ptr(boolean_t* /*unused*/) noexcept + { + return is_boolean() ? &m_value.boolean : nullptr; + } + + /// get a pointer to the value (boolean) + constexpr const boolean_t* get_impl_ptr(const boolean_t* /*unused*/) const noexcept + { + return is_boolean() ? &m_value.boolean : nullptr; + } + + /// get a pointer to the value (integer number) + number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept + { + return is_number_integer() ? &m_value.number_integer : nullptr; + } + + /// get a pointer to the value (integer number) + constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /*unused*/) const noexcept + { + return is_number_integer() ? &m_value.number_integer : nullptr; + } + + /// get a pointer to the value (unsigned number) + number_unsigned_t* get_impl_ptr(number_unsigned_t* /*unused*/) noexcept + { + return is_number_unsigned() ? &m_value.number_unsigned : nullptr; + } + + /// get a pointer to the value (unsigned number) + constexpr const number_unsigned_t* get_impl_ptr(const number_unsigned_t* /*unused*/) const noexcept + { + return is_number_unsigned() ? &m_value.number_unsigned : nullptr; + } + + /// get a pointer to the value (floating-point number) + number_float_t* get_impl_ptr(number_float_t* /*unused*/) noexcept + { + return is_number_float() ? &m_value.number_float : nullptr; + } + + /// get a pointer to the value (floating-point number) + constexpr const number_float_t* get_impl_ptr(const number_float_t* /*unused*/) const noexcept + { + return is_number_float() ? &m_value.number_float : nullptr; + } + + /// get a pointer to the value (binary) + binary_t* get_impl_ptr(binary_t* /*unused*/) noexcept + { + return is_binary() ? m_value.binary : nullptr; + } + + /// get a pointer to the value (binary) + constexpr const binary_t* get_impl_ptr(const binary_t* /*unused*/) const noexcept + { + return is_binary() ? m_value.binary : nullptr; + } + + /*! + @brief helper function to implement get_ref() + + This function helps to implement get_ref() without code duplication for + const and non-const overloads + + @tparam ThisType will be deduced as `basic_json` or `const basic_json` + + @throw type_error.303 if ReferenceType does not match underlying value + type of the current JSON + */ + template + static ReferenceType get_ref_impl(ThisType& obj) + { + // delegate the call to get_ptr<>() + auto ptr = obj.template get_ptr::type>(); + + if (JSON_HEDLEY_LIKELY(ptr != nullptr)) + { + return *ptr; + } + + JSON_THROW(type_error::create(303, "incompatible ReferenceType for get_ref, actual type is " + std::string(obj.type_name()), obj)); + } + + public: + /// @name value access + /// Direct access to the stored value of a JSON value. + /// @{ + + /*! + @brief get special-case overload + + This overloads avoids a lot of template boilerplate, it can be seen as the + identity method + + @tparam BasicJsonType == @ref basic_json + + @return a copy of *this + + @complexity Constant. + + @since version 2.1.0 + */ + template::type, basic_json_t>::value, + int> = 0> + basic_json get() const + { + return *this; + } + + /*! + @brief get special-case overload + + This overloads converts the current @ref basic_json in a different + @ref basic_json type + + @tparam BasicJsonType == @ref basic_json + + @return a copy of *this, converted into @tparam BasicJsonType + + @complexity Depending on the implementation of the called `from_json()` + method. + + @since version 3.2.0 + */ + template < typename BasicJsonType, detail::enable_if_t < + !std::is_same::value&& + detail::is_basic_json::value, int > = 0 > + BasicJsonType get() const + { + return *this; + } + + /*! + @brief get a value (explicit) + + Explicit type conversion between the JSON value and a compatible value + which is [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible) + and [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible). + The value is converted by calling the @ref json_serializer + `from_json()` method. + + The function is equivalent to executing + @code {.cpp} + ValueType ret; + JSONSerializer::from_json(*this, ret); + return ret; + @endcode + + This overloads is chosen if: + - @a ValueType is not @ref basic_json, + - @ref json_serializer has a `from_json()` method of the form + `void from_json(const basic_json&, ValueType&)`, and + - @ref json_serializer does not have a `from_json()` method of + the form `ValueType from_json(const basic_json&)` + + @tparam ValueTypeCV the provided value type + @tparam ValueType the returned value type + + @return copy of the JSON value, converted to @a ValueType + + @throw what @ref json_serializer `from_json()` method throws + + @liveexample{The example below shows several conversions from JSON values + to other types. There a few things to note: (1) Floating-point numbers can + be converted to integers\, (2) A JSON array can be converted to a standard + `std::vector`\, (3) A JSON object can be converted to C++ + associative containers such as `std::unordered_map`.,get__ValueType_const} + + @since version 2.1.0 + */ + template < typename ValueTypeCV, typename ValueType = detail::uncvref_t, + detail::enable_if_t < + !detail::is_basic_json::value && + detail::has_from_json::value && + !detail::has_non_default_from_json::value, + int > = 0 > + ValueType get() const noexcept(noexcept( + JSONSerializer::from_json(std::declval(), std::declval()))) + { + // we cannot static_assert on ValueTypeCV being non-const, because + // there is support for get(), which is why we + // still need the uncvref + static_assert(!std::is_reference::value, + "get() cannot be used with reference types, you might want to use get_ref()"); + static_assert(std::is_default_constructible::value, + "types must be DefaultConstructible when used with get()"); + + ValueType ret; + JSONSerializer::from_json(*this, ret); + return ret; + } + + /*! + @brief get a value (explicit); special case + + Explicit type conversion between the JSON value and a compatible value + which is **not** [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible) + and **not** [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible). + The value is converted by calling the @ref json_serializer + `from_json()` method. + + The function is equivalent to executing + @code {.cpp} + return JSONSerializer::from_json(*this); + @endcode + + This overloads is chosen if: + - @a ValueType is not @ref basic_json and + - @ref json_serializer has a `from_json()` method of the form + `ValueType from_json(const basic_json&)` + + @note If @ref json_serializer has both overloads of + `from_json()`, this one is chosen. + + @tparam ValueTypeCV the provided value type + @tparam ValueType the returned value type + + @return copy of the JSON value, converted to @a ValueType + + @throw what @ref json_serializer `from_json()` method throws + + @since version 2.1.0 + */ + template < typename ValueTypeCV, typename ValueType = detail::uncvref_t, + detail::enable_if_t < !std::is_same::value && + detail::has_non_default_from_json::value, + int > = 0 > + ValueType get() const noexcept(noexcept( + JSONSerializer::from_json(std::declval()))) + { + static_assert(!std::is_reference::value, + "get() cannot be used with reference types, you might want to use get_ref()"); + return JSONSerializer::from_json(*this); + } + + /*! + @brief get a value (explicit) + + Explicit type conversion between the JSON value and a compatible value. + The value is filled into the input parameter by calling the @ref json_serializer + `from_json()` method. + + The function is equivalent to executing + @code {.cpp} + ValueType v; + JSONSerializer::from_json(*this, v); + @endcode + + This overloads is chosen if: + - @a ValueType is not @ref basic_json, + - @ref json_serializer has a `from_json()` method of the form + `void from_json(const basic_json&, ValueType&)`, and + + @tparam ValueType the input parameter type. + + @return the input parameter, allowing chaining calls. + + @throw what @ref json_serializer `from_json()` method throws + + @liveexample{The example below shows several conversions from JSON values + to other types. There a few things to note: (1) Floating-point numbers can + be converted to integers\, (2) A JSON array can be converted to a standard + `std::vector`\, (3) A JSON object can be converted to C++ + associative containers such as `std::unordered_map`.,get_to} + + @since version 3.3.0 + */ + template < typename ValueType, + detail::enable_if_t < + !detail::is_basic_json::value&& + detail::has_from_json::value, + int > = 0 > + ValueType & get_to(ValueType& v) const noexcept(noexcept( + JSONSerializer::from_json(std::declval(), v))) + { + JSONSerializer::from_json(*this, v); + return v; + } + + // specialization to allow to call get_to with a basic_json value + // see https://github.com/nlohmann/json/issues/2175 + template::value, + int> = 0> + ValueType & get_to(ValueType& v) const + { + v = *this; + return v; + } + + template < + typename T, std::size_t N, + typename Array = T (&)[N], + detail::enable_if_t < + detail::has_from_json::value, int > = 0 > + Array get_to(T (&v)[N]) const + noexcept(noexcept(JSONSerializer::from_json( + std::declval(), v))) + { + JSONSerializer::from_json(*this, v); + return v; + } + + + /*! + @brief get a pointer value (implicit) + + Implicit pointer access to the internally stored JSON value. No copies are + made. + + @warning Writing data to the pointee of the result yields an undefined + state. + + @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref + object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, + @ref number_unsigned_t, or @ref number_float_t. Enforced by a static + assertion. + + @return pointer to the internally stored JSON value if the requested + pointer type @a PointerType fits to the JSON value; `nullptr` otherwise + + @complexity Constant. + + @liveexample{The example below shows how pointers to internal values of a + JSON value can be requested. Note that no type conversions are made and a + `nullptr` is returned if the value and the requested pointer type does not + match.,get_ptr} + + @since version 1.0.0 + */ + template::value, int>::type = 0> + auto get_ptr() noexcept -> decltype(std::declval().get_impl_ptr(std::declval())) + { + // delegate the call to get_impl_ptr<>() + return get_impl_ptr(static_cast(nullptr)); + } + + /*! + @brief get a pointer value (implicit) + @copydoc get_ptr() + */ + template < typename PointerType, typename std::enable_if < + std::is_pointer::value&& + std::is_const::type>::value, int >::type = 0 > + constexpr auto get_ptr() const noexcept -> decltype(std::declval().get_impl_ptr(std::declval())) + { + // delegate the call to get_impl_ptr<>() const + return get_impl_ptr(static_cast(nullptr)); + } + + /*! + @brief get a pointer value (explicit) + + Explicit pointer access to the internally stored JSON value. No copies are + made. + + @warning The pointer becomes invalid if the underlying JSON object + changes. + + @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref + object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, + @ref number_unsigned_t, or @ref number_float_t. + + @return pointer to the internally stored JSON value if the requested + pointer type @a PointerType fits to the JSON value; `nullptr` otherwise + + @complexity Constant. + + @liveexample{The example below shows how pointers to internal values of a + JSON value can be requested. Note that no type conversions are made and a + `nullptr` is returned if the value and the requested pointer type does not + match.,get__PointerType} + + @sa @ref get_ptr() for explicit pointer-member access + + @since version 1.0.0 + */ + template::value, int>::type = 0> + auto get() noexcept -> decltype(std::declval().template get_ptr()) + { + // delegate the call to get_ptr + return get_ptr(); + } + + /*! + @brief get a pointer value (explicit) + @copydoc get() + */ + template::value, int>::type = 0> + constexpr auto get() const noexcept -> decltype(std::declval().template get_ptr()) + { + // delegate the call to get_ptr + return get_ptr(); + } + + /*! + @brief get a reference value (implicit) + + Implicit reference access to the internally stored JSON value. No copies + are made. + + @warning Writing data to the referee of the result yields an undefined + state. + + @tparam ReferenceType reference type; must be a reference to @ref array_t, + @ref object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, or + @ref number_float_t. Enforced by static assertion. + + @return reference to the internally stored JSON value if the requested + reference type @a ReferenceType fits to the JSON value; throws + type_error.303 otherwise + + @throw type_error.303 in case passed type @a ReferenceType is incompatible + with the stored JSON value; see example below + + @complexity Constant. + + @liveexample{The example shows several calls to `get_ref()`.,get_ref} + + @since version 1.1.0 + */ + template::value, int>::type = 0> + ReferenceType get_ref() + { + // delegate call to get_ref_impl + return get_ref_impl(*this); + } + + /*! + @brief get a reference value (implicit) + @copydoc get_ref() + */ + template < typename ReferenceType, typename std::enable_if < + std::is_reference::value&& + std::is_const::type>::value, int >::type = 0 > + ReferenceType get_ref() const + { + // delegate call to get_ref_impl + return get_ref_impl(*this); + } + + /*! + @brief get a value (implicit) + + Implicit type conversion between the JSON value and a compatible value. + The call is realized by calling @ref get() const. + + @tparam ValueType non-pointer type compatible to the JSON value, for + instance `int` for JSON integer numbers, `bool` for JSON booleans, or + `std::vector` types for JSON arrays. The character type of @ref string_t + as well as an initializer list of this type is excluded to avoid + ambiguities as these types implicitly convert to `std::string`. + + @return copy of the JSON value, converted to type @a ValueType + + @throw type_error.302 in case passed type @a ValueType is incompatible + to the JSON value type (e.g., the JSON value is of type boolean, but a + string is requested); see example below + + @complexity Linear in the size of the JSON value. + + @liveexample{The example below shows several conversions from JSON values + to other types. There a few things to note: (1) Floating-point numbers can + be converted to integers\, (2) A JSON array can be converted to a standard + `std::vector`\, (3) A JSON object can be converted to C++ + associative containers such as `std::unordered_map`.,operator__ValueType} + + @since version 1.0.0 + */ + template < typename ValueType, typename std::enable_if < + !std::is_pointer::value&& + !std::is_same>::value&& + !std::is_same::value&& + !detail::is_basic_json::value + && !std::is_same>::value +#if defined(JSON_HAS_CPP_17) && (defined(__GNUC__) || (defined(_MSC_VER) && _MSC_VER >= 1910 && _MSC_VER <= 1914)) + && !std::is_same::value +#endif + && detail::is_detected::value + , int >::type = 0 > + JSON_EXPLICIT operator ValueType() const + { + // delegate the call to get<>() const + return get(); + } + + /*! + @return reference to the binary value + + @throw type_error.302 if the value is not binary + + @sa @ref is_binary() to check if the value is binary + + @since version 3.8.0 + */ + binary_t& get_binary() + { + if (!is_binary()) + { + JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(type_name()), *this)); + } + + return *get_ptr(); + } + + /// @copydoc get_binary() + const binary_t& get_binary() const + { + if (!is_binary()) + { + JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(type_name()), *this)); + } + + return *get_ptr(); + } + + /// @} + + + //////////////////// + // element access // + //////////////////// + + /// @name element access + /// Access to the JSON value. + /// @{ + + /*! + @brief access specified array element with bounds checking + + Returns a reference to the element at specified location @a idx, with + bounds checking. + + @param[in] idx index of the element to access + + @return reference to the element at index @a idx + + @throw type_error.304 if the JSON value is not an array; in this case, + calling `at` with an index makes no sense. See example below. + @throw out_of_range.401 if the index @a idx is out of range of the array; + that is, `idx >= size()`. See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @since version 1.0.0 + + @liveexample{The example below shows how array elements can be read and + written using `at()`. It also demonstrates the different exceptions that + can be thrown.,at__size_type} + */ + reference at(size_type idx) + { + // at only works for arrays + if (JSON_HEDLEY_LIKELY(is_array())) + { + JSON_TRY + { + return set_parent(m_value.array->at(idx)); + } + JSON_CATCH (std::out_of_range&) + { + // create better exception explanation + JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range", *this)); + } + } + else + { + JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()), *this)); + } + } + + /*! + @brief access specified array element with bounds checking + + Returns a const reference to the element at specified location @a idx, + with bounds checking. + + @param[in] idx index of the element to access + + @return const reference to the element at index @a idx + + @throw type_error.304 if the JSON value is not an array; in this case, + calling `at` with an index makes no sense. See example below. + @throw out_of_range.401 if the index @a idx is out of range of the array; + that is, `idx >= size()`. See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @since version 1.0.0 + + @liveexample{The example below shows how array elements can be read using + `at()`. It also demonstrates the different exceptions that can be thrown., + at__size_type_const} + */ + const_reference at(size_type idx) const + { + // at only works for arrays + if (JSON_HEDLEY_LIKELY(is_array())) + { + JSON_TRY + { + return m_value.array->at(idx); + } + JSON_CATCH (std::out_of_range&) + { + // create better exception explanation + JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range", *this)); + } + } + else + { + JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()), *this)); + } + } + + /*! + @brief access specified object element with bounds checking + + Returns a reference to the element at with specified key @a key, with + bounds checking. + + @param[in] key key of the element to access + + @return reference to the element at key @a key + + @throw type_error.304 if the JSON value is not an object; in this case, + calling `at` with a key makes no sense. See example below. + @throw out_of_range.403 if the key @a key is is not stored in the object; + that is, `find(key) == end()`. See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Logarithmic in the size of the container. + + @sa @ref operator[](const typename object_t::key_type&) for unchecked + access by reference + @sa @ref value() for access by value with a default value + + @since version 1.0.0 + + @liveexample{The example below shows how object elements can be read and + written using `at()`. It also demonstrates the different exceptions that + can be thrown.,at__object_t_key_type} + */ + reference at(const typename object_t::key_type& key) + { + // at only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + JSON_TRY + { + return set_parent(m_value.object->at(key)); + } + JSON_CATCH (std::out_of_range&) + { + // create better exception explanation + JSON_THROW(out_of_range::create(403, "key '" + key + "' not found", *this)); + } + } + else + { + JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()), *this)); + } + } + + /*! + @brief access specified object element with bounds checking + + Returns a const reference to the element at with specified key @a key, + with bounds checking. + + @param[in] key key of the element to access + + @return const reference to the element at key @a key + + @throw type_error.304 if the JSON value is not an object; in this case, + calling `at` with a key makes no sense. See example below. + @throw out_of_range.403 if the key @a key is is not stored in the object; + that is, `find(key) == end()`. See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Logarithmic in the size of the container. + + @sa @ref operator[](const typename object_t::key_type&) for unchecked + access by reference + @sa @ref value() for access by value with a default value + + @since version 1.0.0 + + @liveexample{The example below shows how object elements can be read using + `at()`. It also demonstrates the different exceptions that can be thrown., + at__object_t_key_type_const} + */ + const_reference at(const typename object_t::key_type& key) const + { + // at only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + JSON_TRY + { + return m_value.object->at(key); + } + JSON_CATCH (std::out_of_range&) + { + // create better exception explanation + JSON_THROW(out_of_range::create(403, "key '" + key + "' not found", *this)); + } + } + else + { + JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()), *this)); + } + } + + /*! + @brief access specified array element + + Returns a reference to the element at specified location @a idx. + + @note If @a idx is beyond the range of the array (i.e., `idx >= size()`), + then the array is silently filled up with `null` values to make `idx` a + valid reference to the last stored element. + + @param[in] idx index of the element to access + + @return reference to the element at index @a idx + + @throw type_error.305 if the JSON value is not an array or null; in that + cases, using the [] operator with an index makes no sense. + + @complexity Constant if @a idx is in the range of the array. Otherwise + linear in `idx - size()`. + + @liveexample{The example below shows how array elements can be read and + written using `[]` operator. Note the addition of `null` + values.,operatorarray__size_type} + + @since version 1.0.0 + */ + reference operator[](size_type idx) + { + // implicitly convert null value to an empty array + if (is_null()) + { + m_type = value_t::array; + m_value.array = create(); + assert_invariant(); + } + + // operator[] only works for arrays + if (JSON_HEDLEY_LIKELY(is_array())) + { + // fill up array with null values if given idx is outside range + if (idx >= m_value.array->size()) + { +#if JSON_DIAGNOSTICS + // remember array size before resizing + const auto previous_size = m_value.array->size(); +#endif + m_value.array->resize(idx + 1); + +#if JSON_DIAGNOSTICS + // set parent for values added above + set_parents(begin() + static_cast(previous_size), static_cast(idx + 1 - previous_size)); +#endif + } + + return m_value.array->operator[](idx); + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with a numeric argument with " + std::string(type_name()), *this)); + } + + /*! + @brief access specified array element + + Returns a const reference to the element at specified location @a idx. + + @param[in] idx index of the element to access + + @return const reference to the element at index @a idx + + @throw type_error.305 if the JSON value is not an array; in that case, + using the [] operator with an index makes no sense. + + @complexity Constant. + + @liveexample{The example below shows how array elements can be read using + the `[]` operator.,operatorarray__size_type_const} + + @since version 1.0.0 + */ + const_reference operator[](size_type idx) const + { + // const operator[] only works for arrays + if (JSON_HEDLEY_LIKELY(is_array())) + { + return m_value.array->operator[](idx); + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with a numeric argument with " + std::string(type_name()), *this)); + } + + /*! + @brief access specified object element + + Returns a reference to the element at with specified key @a key. + + @note If @a key is not found in the object, then it is silently added to + the object and filled with a `null` value to make `key` a valid reference. + In case the value was `null` before, it is converted to an object. + + @param[in] key key of the element to access + + @return reference to the element at key @a key + + @throw type_error.305 if the JSON value is not an object or null; in that + cases, using the [] operator with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be read and + written using the `[]` operator.,operatorarray__key_type} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref value() for access by value with a default value + + @since version 1.0.0 + */ + reference operator[](const typename object_t::key_type& key) + { + // implicitly convert null value to an empty object + if (is_null()) + { + m_type = value_t::object; + m_value.object = create(); + assert_invariant(); + } + + // operator[] only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + return set_parent(m_value.object->operator[](key)); + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name()), *this)); + } + + /*! + @brief read-only access specified object element + + Returns a const reference to the element at with specified key @a key. No + bounds checking is performed. + + @warning If the element with key @a key does not exist, the behavior is + undefined. + + @param[in] key key of the element to access + + @return const reference to the element at key @a key + + @pre The element with key @a key must exist. **This precondition is + enforced with an assertion.** + + @throw type_error.305 if the JSON value is not an object; in that case, + using the [] operator with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be read using + the `[]` operator.,operatorarray__key_type_const} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref value() for access by value with a default value + + @since version 1.0.0 + */ + const_reference operator[](const typename object_t::key_type& key) const + { + // const operator[] only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + JSON_ASSERT(m_value.object->find(key) != m_value.object->end()); + return m_value.object->find(key)->second; + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name()), *this)); + } + + /*! + @brief access specified object element + + Returns a reference to the element at with specified key @a key. + + @note If @a key is not found in the object, then it is silently added to + the object and filled with a `null` value to make `key` a valid reference. + In case the value was `null` before, it is converted to an object. + + @param[in] key key of the element to access + + @return reference to the element at key @a key + + @throw type_error.305 if the JSON value is not an object or null; in that + cases, using the [] operator with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be read and + written using the `[]` operator.,operatorarray__key_type} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref value() for access by value with a default value + + @since version 1.1.0 + */ + template + JSON_HEDLEY_NON_NULL(2) + reference operator[](T* key) + { + // implicitly convert null to object + if (is_null()) + { + m_type = value_t::object; + m_value = value_t::object; + assert_invariant(); + } + + // at only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + return set_parent(m_value.object->operator[](key)); + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name()), *this)); + } + + /*! + @brief read-only access specified object element + + Returns a const reference to the element at with specified key @a key. No + bounds checking is performed. + + @warning If the element with key @a key does not exist, the behavior is + undefined. + + @param[in] key key of the element to access + + @return const reference to the element at key @a key + + @pre The element with key @a key must exist. **This precondition is + enforced with an assertion.** + + @throw type_error.305 if the JSON value is not an object; in that case, + using the [] operator with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be read using + the `[]` operator.,operatorarray__key_type_const} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref value() for access by value with a default value + + @since version 1.1.0 + */ + template + JSON_HEDLEY_NON_NULL(2) + const_reference operator[](T* key) const + { + // at only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + JSON_ASSERT(m_value.object->find(key) != m_value.object->end()); + return m_value.object->find(key)->second; + } + + JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name()), *this)); + } + + /*! + @brief access specified object element with default value + + Returns either a copy of an object's element at the specified key @a key + or a given default value if no element with key @a key exists. + + The function is basically equivalent to executing + @code {.cpp} + try { + return at(key); + } catch(out_of_range) { + return default_value; + } + @endcode + + @note Unlike @ref at(const typename object_t::key_type&), this function + does not throw if the given key @a key was not found. + + @note Unlike @ref operator[](const typename object_t::key_type& key), this + function does not implicitly add an element to the position defined by @a + key. This function is furthermore also applicable to const objects. + + @param[in] key key of the element to access + @param[in] default_value the value to return if @a key is not found + + @tparam ValueType type compatible to JSON values, for instance `int` for + JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for + JSON arrays. Note the type of the expected value at @a key and the default + value @a default_value must be compatible. + + @return copy of the element at key @a key or @a default_value if @a key + is not found + + @throw type_error.302 if @a default_value does not match the type of the + value at @a key + @throw type_error.306 if the JSON value is not an object; in that case, + using `value()` with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be queried + with a default value.,basic_json__value} + + @sa @ref at(const typename object_t::key_type&) for access by reference + with range checking + @sa @ref operator[](const typename object_t::key_type&) for unchecked + access by reference + + @since version 1.0.0 + */ + // using std::is_convertible in a std::enable_if will fail when using explicit conversions + template < class ValueType, typename std::enable_if < + detail::is_getable::value + && !std::is_same::value, int >::type = 0 > + ValueType value(const typename object_t::key_type& key, const ValueType& default_value) const + { + // at only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + // if key is found, return value and given default value otherwise + const auto it = find(key); + if (it != end()) + { + return it->template get(); + } + + return default_value; + } + + JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name()), *this)); + } + + /*! + @brief overload for a default value of type const char* + @copydoc basic_json::value(const typename object_t::key_type&, const ValueType&) const + */ + string_t value(const typename object_t::key_type& key, const char* default_value) const + { + return value(key, string_t(default_value)); + } + + /*! + @brief access specified object element via JSON Pointer with default value + + Returns either a copy of an object's element at the specified key @a key + or a given default value if no element with key @a key exists. + + The function is basically equivalent to executing + @code {.cpp} + try { + return at(ptr); + } catch(out_of_range) { + return default_value; + } + @endcode + + @note Unlike @ref at(const json_pointer&), this function does not throw + if the given key @a key was not found. + + @param[in] ptr a JSON pointer to the element to access + @param[in] default_value the value to return if @a ptr found no value + + @tparam ValueType type compatible to JSON values, for instance `int` for + JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for + JSON arrays. Note the type of the expected value at @a key and the default + value @a default_value must be compatible. + + @return copy of the element at key @a key or @a default_value if @a key + is not found + + @throw type_error.302 if @a default_value does not match the type of the + value at @a ptr + @throw type_error.306 if the JSON value is not an object; in that case, + using `value()` with a key makes no sense. + + @complexity Logarithmic in the size of the container. + + @liveexample{The example below shows how object elements can be queried + with a default value.,basic_json__value_ptr} + + @sa @ref operator[](const json_pointer&) for unchecked access by reference + + @since version 2.0.2 + */ + template::value, int>::type = 0> + ValueType value(const json_pointer& ptr, const ValueType& default_value) const + { + // at only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + // if pointer resolves a value, return it or use default value + JSON_TRY + { + return ptr.get_checked(this).template get(); + } + JSON_INTERNAL_CATCH (out_of_range&) + { + return default_value; + } + } + + JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name()), *this)); + } + + /*! + @brief overload for a default value of type const char* + @copydoc basic_json::value(const json_pointer&, ValueType) const + */ + JSON_HEDLEY_NON_NULL(3) + string_t value(const json_pointer& ptr, const char* default_value) const + { + return value(ptr, string_t(default_value)); + } + + /*! + @brief access the first element + + Returns a reference to the first element in the container. For a JSON + container `c`, the expression `c.front()` is equivalent to `*c.begin()`. + + @return In case of a structured type (array or object), a reference to the + first element is returned. In case of number, string, boolean, or binary + values, a reference to the value is returned. + + @complexity Constant. + + @pre The JSON value must not be `null` (would throw `std::out_of_range`) + or an empty array or object (undefined behavior, **guarded by + assertions**). + @post The JSON value remains unchanged. + + @throw invalid_iterator.214 when called on `null` value + + @liveexample{The following code shows an example for `front()`.,front} + + @sa @ref back() -- access the last element + + @since version 1.0.0 + */ + reference front() + { + return *begin(); + } + + /*! + @copydoc basic_json::front() + */ + const_reference front() const + { + return *cbegin(); + } + + /*! + @brief access the last element + + Returns a reference to the last element in the container. For a JSON + container `c`, the expression `c.back()` is equivalent to + @code {.cpp} + auto tmp = c.end(); + --tmp; + return *tmp; + @endcode + + @return In case of a structured type (array or object), a reference to the + last element is returned. In case of number, string, boolean, or binary + values, a reference to the value is returned. + + @complexity Constant. + + @pre The JSON value must not be `null` (would throw `std::out_of_range`) + or an empty array or object (undefined behavior, **guarded by + assertions**). + @post The JSON value remains unchanged. + + @throw invalid_iterator.214 when called on a `null` value. See example + below. + + @liveexample{The following code shows an example for `back()`.,back} + + @sa @ref front() -- access the first element + + @since version 1.0.0 + */ + reference back() + { + auto tmp = end(); + --tmp; + return *tmp; + } + + /*! + @copydoc basic_json::back() + */ + const_reference back() const + { + auto tmp = cend(); + --tmp; + return *tmp; + } + + /*! + @brief remove element given an iterator + + Removes the element specified by iterator @a pos. The iterator @a pos must + be valid and dereferenceable. Thus the `end()` iterator (which is valid, + but is not dereferenceable) cannot be used as a value for @a pos. + + If called on a primitive type other than `null`, the resulting JSON value + will be `null`. + + @param[in] pos iterator to the element to remove + @return Iterator following the last removed element. If the iterator @a + pos refers to the last element, the `end()` iterator is returned. + + @tparam IteratorType an @ref iterator or @ref const_iterator + + @post Invalidates iterators and references at or after the point of the + erase, including the `end()` iterator. + + @throw type_error.307 if called on a `null` value; example: `"cannot use + erase() with null"` + @throw invalid_iterator.202 if called on an iterator which does not belong + to the current JSON value; example: `"iterator does not fit current + value"` + @throw invalid_iterator.205 if called on a primitive type with invalid + iterator (i.e., any iterator which is not `begin()`); example: `"iterator + out of range"` + + @complexity The complexity depends on the type: + - objects: amortized constant + - arrays: linear in distance between @a pos and the end of the container + - strings and binary: linear in the length of the member + - other types: constant + + @liveexample{The example shows the result of `erase()` for different JSON + types.,erase__IteratorType} + + @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + the given range + @sa @ref erase(const typename object_t::key_type&) -- removes the element + from an object at the given key + @sa @ref erase(const size_type) -- removes the element from an array at + the given index + + @since version 1.0.0 + */ + template < class IteratorType, typename std::enable_if < + std::is_same::value || + std::is_same::value, int >::type + = 0 > + IteratorType erase(IteratorType pos) + { + // make sure iterator fits the current value + if (JSON_HEDLEY_UNLIKELY(this != pos.m_object)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); + } + + IteratorType result = end(); + + switch (m_type) + { + case value_t::boolean: + case value_t::number_float: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::string: + case value_t::binary: + { + if (JSON_HEDLEY_UNLIKELY(!pos.m_it.primitive_iterator.is_begin())) + { + JSON_THROW(invalid_iterator::create(205, "iterator out of range", *this)); + } + + if (is_string()) + { + AllocatorType alloc; + std::allocator_traits::destroy(alloc, m_value.string); + std::allocator_traits::deallocate(alloc, m_value.string, 1); + m_value.string = nullptr; + } + else if (is_binary()) + { + AllocatorType alloc; + std::allocator_traits::destroy(alloc, m_value.binary); + std::allocator_traits::deallocate(alloc, m_value.binary, 1); + m_value.binary = nullptr; + } + + m_type = value_t::null; + assert_invariant(); + break; + } + + case value_t::object: + { + result.m_it.object_iterator = m_value.object->erase(pos.m_it.object_iterator); + break; + } + + case value_t::array: + { + result.m_it.array_iterator = m_value.array->erase(pos.m_it.array_iterator); + break; + } + + default: + JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()), *this)); + } + + return result; + } + + /*! + @brief remove elements given an iterator range + + Removes the element specified by the range `[first; last)`. The iterator + @a first does not need to be dereferenceable if `first == last`: erasing + an empty range is a no-op. + + If called on a primitive type other than `null`, the resulting JSON value + will be `null`. + + @param[in] first iterator to the beginning of the range to remove + @param[in] last iterator past the end of the range to remove + @return Iterator following the last removed element. If the iterator @a + second refers to the last element, the `end()` iterator is returned. + + @tparam IteratorType an @ref iterator or @ref const_iterator + + @post Invalidates iterators and references at or after the point of the + erase, including the `end()` iterator. + + @throw type_error.307 if called on a `null` value; example: `"cannot use + erase() with null"` + @throw invalid_iterator.203 if called on iterators which does not belong + to the current JSON value; example: `"iterators do not fit current value"` + @throw invalid_iterator.204 if called on a primitive type with invalid + iterators (i.e., if `first != begin()` and `last != end()`); example: + `"iterators out of range"` + + @complexity The complexity depends on the type: + - objects: `log(size()) + std::distance(first, last)` + - arrays: linear in the distance between @a first and @a last, plus linear + in the distance between @a last and end of the container + - strings and binary: linear in the length of the member + - other types: constant + + @liveexample{The example shows the result of `erase()` for different JSON + types.,erase__IteratorType_IteratorType} + + @sa @ref erase(IteratorType) -- removes the element at a given position + @sa @ref erase(const typename object_t::key_type&) -- removes the element + from an object at the given key + @sa @ref erase(const size_type) -- removes the element from an array at + the given index + + @since version 1.0.0 + */ + template < class IteratorType, typename std::enable_if < + std::is_same::value || + std::is_same::value, int >::type + = 0 > + IteratorType erase(IteratorType first, IteratorType last) + { + // make sure iterator fits the current value + if (JSON_HEDLEY_UNLIKELY(this != first.m_object || this != last.m_object)) + { + JSON_THROW(invalid_iterator::create(203, "iterators do not fit current value", *this)); + } + + IteratorType result = end(); + + switch (m_type) + { + case value_t::boolean: + case value_t::number_float: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::string: + case value_t::binary: + { + if (JSON_HEDLEY_LIKELY(!first.m_it.primitive_iterator.is_begin() + || !last.m_it.primitive_iterator.is_end())) + { + JSON_THROW(invalid_iterator::create(204, "iterators out of range", *this)); + } + + if (is_string()) + { + AllocatorType alloc; + std::allocator_traits::destroy(alloc, m_value.string); + std::allocator_traits::deallocate(alloc, m_value.string, 1); + m_value.string = nullptr; + } + else if (is_binary()) + { + AllocatorType alloc; + std::allocator_traits::destroy(alloc, m_value.binary); + std::allocator_traits::deallocate(alloc, m_value.binary, 1); + m_value.binary = nullptr; + } + + m_type = value_t::null; + assert_invariant(); + break; + } + + case value_t::object: + { + result.m_it.object_iterator = m_value.object->erase(first.m_it.object_iterator, + last.m_it.object_iterator); + break; + } + + case value_t::array: + { + result.m_it.array_iterator = m_value.array->erase(first.m_it.array_iterator, + last.m_it.array_iterator); + break; + } + + default: + JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()), *this)); + } + + return result; + } + + /*! + @brief remove element from a JSON object given a key + + Removes elements from a JSON object with the key value @a key. + + @param[in] key value of the elements to remove + + @return Number of elements removed. If @a ObjectType is the default + `std::map` type, the return value will always be `0` (@a key was not + found) or `1` (@a key was found). + + @post References and iterators to the erased elements are invalidated. + Other references and iterators are not affected. + + @throw type_error.307 when called on a type other than JSON object; + example: `"cannot use erase() with null"` + + @complexity `log(size()) + count(key)` + + @liveexample{The example shows the effect of `erase()`.,erase__key_type} + + @sa @ref erase(IteratorType) -- removes the element at a given position + @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + the given range + @sa @ref erase(const size_type) -- removes the element from an array at + the given index + + @since version 1.0.0 + */ + size_type erase(const typename object_t::key_type& key) + { + // this erase only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + return m_value.object->erase(key); + } + + JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()), *this)); + } + + /*! + @brief remove element from a JSON array given an index + + Removes element from a JSON array at the index @a idx. + + @param[in] idx index of the element to remove + + @throw type_error.307 when called on a type other than JSON object; + example: `"cannot use erase() with null"` + @throw out_of_range.401 when `idx >= size()`; example: `"array index 17 + is out of range"` + + @complexity Linear in distance between @a idx and the end of the container. + + @liveexample{The example shows the effect of `erase()`.,erase__size_type} + + @sa @ref erase(IteratorType) -- removes the element at a given position + @sa @ref erase(IteratorType, IteratorType) -- removes the elements in + the given range + @sa @ref erase(const typename object_t::key_type&) -- removes the element + from an object at the given key + + @since version 1.0.0 + */ + void erase(const size_type idx) + { + // this erase only works for arrays + if (JSON_HEDLEY_LIKELY(is_array())) + { + if (JSON_HEDLEY_UNLIKELY(idx >= size())) + { + JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range", *this)); + } + + m_value.array->erase(m_value.array->begin() + static_cast(idx)); + } + else + { + JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()), *this)); + } + } + + /// @} + + + //////////// + // lookup // + //////////// + + /// @name lookup + /// @{ + + /*! + @brief find an element in a JSON object + + Finds an element in a JSON object with key equivalent to @a key. If the + element is not found or the JSON value is not an object, end() is + returned. + + @note This method always returns @ref end() when executed on a JSON type + that is not an object. + + @param[in] key key value of the element to search for. + + @return Iterator to an element with key equivalent to @a key. If no such + element is found or the JSON value is not an object, past-the-end (see + @ref end()) iterator is returned. + + @complexity Logarithmic in the size of the JSON object. + + @liveexample{The example shows how `find()` is used.,find__key_type} + + @sa @ref contains(KeyT&&) const -- checks whether a key exists + + @since version 1.0.0 + */ + template + iterator find(KeyT&& key) + { + auto result = end(); + + if (is_object()) + { + result.m_it.object_iterator = m_value.object->find(std::forward(key)); + } + + return result; + } + + /*! + @brief find an element in a JSON object + @copydoc find(KeyT&&) + */ + template + const_iterator find(KeyT&& key) const + { + auto result = cend(); + + if (is_object()) + { + result.m_it.object_iterator = m_value.object->find(std::forward(key)); + } + + return result; + } + + /*! + @brief returns the number of occurrences of a key in a JSON object + + Returns the number of elements with key @a key. If ObjectType is the + default `std::map` type, the return value will always be `0` (@a key was + not found) or `1` (@a key was found). + + @note This method always returns `0` when executed on a JSON type that is + not an object. + + @param[in] key key value of the element to count + + @return Number of elements with key @a key. If the JSON value is not an + object, the return value will be `0`. + + @complexity Logarithmic in the size of the JSON object. + + @liveexample{The example shows how `count()` is used.,count} + + @since version 1.0.0 + */ + template + size_type count(KeyT&& key) const + { + // return 0 for all nonobject types + return is_object() ? m_value.object->count(std::forward(key)) : 0; + } + + /*! + @brief check the existence of an element in a JSON object + + Check whether an element exists in a JSON object with key equivalent to + @a key. If the element is not found or the JSON value is not an object, + false is returned. + + @note This method always returns false when executed on a JSON type + that is not an object. + + @param[in] key key value to check its existence. + + @return true if an element with specified @a key exists. If no such + element with such key is found or the JSON value is not an object, + false is returned. + + @complexity Logarithmic in the size of the JSON object. + + @liveexample{The following code shows an example for `contains()`.,contains} + + @sa @ref find(KeyT&&) -- returns an iterator to an object element + @sa @ref contains(const json_pointer&) const -- checks the existence for a JSON pointer + + @since version 3.6.0 + */ + template < typename KeyT, typename std::enable_if < + !std::is_same::type, json_pointer>::value, int >::type = 0 > + bool contains(KeyT && key) const + { + return is_object() && m_value.object->find(std::forward(key)) != m_value.object->end(); + } + + /*! + @brief check the existence of an element in a JSON object given a JSON pointer + + Check whether the given JSON pointer @a ptr can be resolved in the current + JSON value. + + @note This method can be executed on any JSON value type. + + @param[in] ptr JSON pointer to check its existence. + + @return true if the JSON pointer can be resolved to a stored value, false + otherwise. + + @post If `j.contains(ptr)` returns true, it is safe to call `j[ptr]`. + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + + @complexity Logarithmic in the size of the JSON object. + + @liveexample{The following code shows an example for `contains()`.,contains_json_pointer} + + @sa @ref contains(KeyT &&) const -- checks the existence of a key + + @since version 3.7.0 + */ + bool contains(const json_pointer& ptr) const + { + return ptr.contains(this); + } + + /// @} + + + /////////////// + // iterators // + /////////////// + + /// @name iterators + /// @{ + + /*! + @brief returns an iterator to the first element + + Returns an iterator to the first element. + + @image html range-begin-end.svg "Illustration from cppreference.com" + + @return iterator to the first element + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is constant. + + @liveexample{The following code shows an example for `begin()`.,begin} + + @sa @ref cbegin() -- returns a const iterator to the beginning + @sa @ref end() -- returns an iterator to the end + @sa @ref cend() -- returns a const iterator to the end + + @since version 1.0.0 + */ + iterator begin() noexcept + { + iterator result(this); + result.set_begin(); + return result; + } + + /*! + @copydoc basic_json::cbegin() + */ + const_iterator begin() const noexcept + { + return cbegin(); + } + + /*! + @brief returns a const iterator to the first element + + Returns a const iterator to the first element. + + @image html range-begin-end.svg "Illustration from cppreference.com" + + @return const iterator to the first element + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is constant. + - Has the semantics of `const_cast(*this).begin()`. + + @liveexample{The following code shows an example for `cbegin()`.,cbegin} + + @sa @ref begin() -- returns an iterator to the beginning + @sa @ref end() -- returns an iterator to the end + @sa @ref cend() -- returns a const iterator to the end + + @since version 1.0.0 + */ + const_iterator cbegin() const noexcept + { + const_iterator result(this); + result.set_begin(); + return result; + } + + /*! + @brief returns an iterator to one past the last element + + Returns an iterator to one past the last element. + + @image html range-begin-end.svg "Illustration from cppreference.com" + + @return iterator one past the last element + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is constant. + + @liveexample{The following code shows an example for `end()`.,end} + + @sa @ref cend() -- returns a const iterator to the end + @sa @ref begin() -- returns an iterator to the beginning + @sa @ref cbegin() -- returns a const iterator to the beginning + + @since version 1.0.0 + */ + iterator end() noexcept + { + iterator result(this); + result.set_end(); + return result; + } + + /*! + @copydoc basic_json::cend() + */ + const_iterator end() const noexcept + { + return cend(); + } + + /*! + @brief returns a const iterator to one past the last element + + Returns a const iterator to one past the last element. + + @image html range-begin-end.svg "Illustration from cppreference.com" + + @return const iterator one past the last element + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is constant. + - Has the semantics of `const_cast(*this).end()`. + + @liveexample{The following code shows an example for `cend()`.,cend} + + @sa @ref end() -- returns an iterator to the end + @sa @ref begin() -- returns an iterator to the beginning + @sa @ref cbegin() -- returns a const iterator to the beginning + + @since version 1.0.0 + */ + const_iterator cend() const noexcept + { + const_iterator result(this); + result.set_end(); + return result; + } + + /*! + @brief returns an iterator to the reverse-beginning + + Returns an iterator to the reverse-beginning; that is, the last element. + + @image html range-rbegin-rend.svg "Illustration from cppreference.com" + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) + requirements: + - The complexity is constant. + - Has the semantics of `reverse_iterator(end())`. + + @liveexample{The following code shows an example for `rbegin()`.,rbegin} + + @sa @ref crbegin() -- returns a const reverse iterator to the beginning + @sa @ref rend() -- returns a reverse iterator to the end + @sa @ref crend() -- returns a const reverse iterator to the end + + @since version 1.0.0 + */ + reverse_iterator rbegin() noexcept + { + return reverse_iterator(end()); + } + + /*! + @copydoc basic_json::crbegin() + */ + const_reverse_iterator rbegin() const noexcept + { + return crbegin(); + } + + /*! + @brief returns an iterator to the reverse-end + + Returns an iterator to the reverse-end; that is, one before the first + element. + + @image html range-rbegin-rend.svg "Illustration from cppreference.com" + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) + requirements: + - The complexity is constant. + - Has the semantics of `reverse_iterator(begin())`. + + @liveexample{The following code shows an example for `rend()`.,rend} + + @sa @ref crend() -- returns a const reverse iterator to the end + @sa @ref rbegin() -- returns a reverse iterator to the beginning + @sa @ref crbegin() -- returns a const reverse iterator to the beginning + + @since version 1.0.0 + */ + reverse_iterator rend() noexcept + { + return reverse_iterator(begin()); + } + + /*! + @copydoc basic_json::crend() + */ + const_reverse_iterator rend() const noexcept + { + return crend(); + } + + /*! + @brief returns a const reverse iterator to the last element + + Returns a const iterator to the reverse-beginning; that is, the last + element. + + @image html range-rbegin-rend.svg "Illustration from cppreference.com" + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) + requirements: + - The complexity is constant. + - Has the semantics of `const_cast(*this).rbegin()`. + + @liveexample{The following code shows an example for `crbegin()`.,crbegin} + + @sa @ref rbegin() -- returns a reverse iterator to the beginning + @sa @ref rend() -- returns a reverse iterator to the end + @sa @ref crend() -- returns a const reverse iterator to the end + + @since version 1.0.0 + */ + const_reverse_iterator crbegin() const noexcept + { + return const_reverse_iterator(cend()); + } + + /*! + @brief returns a const reverse iterator to one before the first + + Returns a const reverse iterator to the reverse-end; that is, one before + the first element. + + @image html range-rbegin-rend.svg "Illustration from cppreference.com" + + @complexity Constant. + + @requirement This function helps `basic_json` satisfying the + [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) + requirements: + - The complexity is constant. + - Has the semantics of `const_cast(*this).rend()`. + + @liveexample{The following code shows an example for `crend()`.,crend} + + @sa @ref rend() -- returns a reverse iterator to the end + @sa @ref rbegin() -- returns a reverse iterator to the beginning + @sa @ref crbegin() -- returns a const reverse iterator to the beginning + + @since version 1.0.0 + */ + const_reverse_iterator crend() const noexcept + { + return const_reverse_iterator(cbegin()); + } + + public: + /*! + @brief wrapper to access iterator member functions in range-based for + + This function allows to access @ref iterator::key() and @ref + iterator::value() during range-based for loops. In these loops, a + reference to the JSON values is returned, so there is no access to the + underlying iterator. + + For loop without iterator_wrapper: + + @code{cpp} + for (auto it = j_object.begin(); it != j_object.end(); ++it) + { + std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; + } + @endcode + + Range-based for loop without iterator proxy: + + @code{cpp} + for (auto it : j_object) + { + // "it" is of type json::reference and has no key() member + std::cout << "value: " << it << '\n'; + } + @endcode + + Range-based for loop with iterator proxy: + + @code{cpp} + for (auto it : json::iterator_wrapper(j_object)) + { + std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; + } + @endcode + + @note When iterating over an array, `key()` will return the index of the + element as string (see example). + + @param[in] ref reference to a JSON value + @return iteration proxy object wrapping @a ref with an interface to use in + range-based for loops + + @liveexample{The following code shows how the wrapper is used,iterator_wrapper} + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @note The name of this function is not yet final and may change in the + future. + + @deprecated This stream operator is deprecated and will be removed in + future 4.0.0 of the library. Please use @ref items() instead; + that is, replace `json::iterator_wrapper(j)` with `j.items()`. + */ + JSON_HEDLEY_DEPRECATED_FOR(3.1.0, items()) + static iteration_proxy iterator_wrapper(reference ref) noexcept + { + return ref.items(); + } + + /*! + @copydoc iterator_wrapper(reference) + */ + JSON_HEDLEY_DEPRECATED_FOR(3.1.0, items()) + static iteration_proxy iterator_wrapper(const_reference ref) noexcept + { + return ref.items(); + } + + /*! + @brief helper to access iterator member functions in range-based for + + This function allows to access @ref iterator::key() and @ref + iterator::value() during range-based for loops. In these loops, a + reference to the JSON values is returned, so there is no access to the + underlying iterator. + + For loop without `items()` function: + + @code{cpp} + for (auto it = j_object.begin(); it != j_object.end(); ++it) + { + std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; + } + @endcode + + Range-based for loop without `items()` function: + + @code{cpp} + for (auto it : j_object) + { + // "it" is of type json::reference and has no key() member + std::cout << "value: " << it << '\n'; + } + @endcode + + Range-based for loop with `items()` function: + + @code{cpp} + for (auto& el : j_object.items()) + { + std::cout << "key: " << el.key() << ", value:" << el.value() << '\n'; + } + @endcode + + The `items()` function also allows to use + [structured bindings](https://en.cppreference.com/w/cpp/language/structured_binding) + (C++17): + + @code{cpp} + for (auto& [key, val] : j_object.items()) + { + std::cout << "key: " << key << ", value:" << val << '\n'; + } + @endcode + + @note When iterating over an array, `key()` will return the index of the + element as string (see example). For primitive types (e.g., numbers), + `key()` returns an empty string. + + @warning Using `items()` on temporary objects is dangerous. Make sure the + object's lifetime exeeds the iteration. See + for more + information. + + @return iteration proxy object wrapping @a ref with an interface to use in + range-based for loops + + @liveexample{The following code shows how the function is used.,items} + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @since version 3.1.0, structured bindings support since 3.5.0. + */ + iteration_proxy items() noexcept + { + return iteration_proxy(*this); + } + + /*! + @copydoc items() + */ + iteration_proxy items() const noexcept + { + return iteration_proxy(*this); + } + + /// @} + + + ////////////// + // capacity // + ////////////// + + /// @name capacity + /// @{ + + /*! + @brief checks whether the container is empty. + + Checks if a JSON value has no elements (i.e. whether its @ref size is `0`). + + @return The return value depends on the different types and is + defined as follows: + Value type | return value + ----------- | ------------- + null | `true` + boolean | `false` + string | `false` + number | `false` + binary | `false` + object | result of function `object_t::empty()` + array | result of function `array_t::empty()` + + @liveexample{The following code uses `empty()` to check if a JSON + object contains any elements.,empty} + + @complexity Constant, as long as @ref array_t and @ref object_t satisfy + the Container concept; that is, their `empty()` functions have constant + complexity. + + @iterators No changes. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @note This function does not return whether a string stored as JSON value + is empty - it returns whether the JSON container itself is empty which is + false in the case of a string. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is constant. + - Has the semantics of `begin() == end()`. + + @sa @ref size() -- returns the number of elements + + @since version 1.0.0 + */ + bool empty() const noexcept + { + switch (m_type) + { + case value_t::null: + { + // null values are empty + return true; + } + + case value_t::array: + { + // delegate call to array_t::empty() + return m_value.array->empty(); + } + + case value_t::object: + { + // delegate call to object_t::empty() + return m_value.object->empty(); + } + + default: + { + // all other types are nonempty + return false; + } + } + } + + /*! + @brief returns the number of elements + + Returns the number of elements in a JSON value. + + @return The return value depends on the different types and is + defined as follows: + Value type | return value + ----------- | ------------- + null | `0` + boolean | `1` + string | `1` + number | `1` + binary | `1` + object | result of function object_t::size() + array | result of function array_t::size() + + @liveexample{The following code calls `size()` on the different value + types.,size} + + @complexity Constant, as long as @ref array_t and @ref object_t satisfy + the Container concept; that is, their size() functions have constant + complexity. + + @iterators No changes. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @note This function does not return the length of a string stored as JSON + value - it returns the number of elements in the JSON value which is 1 in + the case of a string. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is constant. + - Has the semantics of `std::distance(begin(), end())`. + + @sa @ref empty() -- checks whether the container is empty + @sa @ref max_size() -- returns the maximal number of elements + + @since version 1.0.0 + */ + size_type size() const noexcept + { + switch (m_type) + { + case value_t::null: + { + // null values are empty + return 0; + } + + case value_t::array: + { + // delegate call to array_t::size() + return m_value.array->size(); + } + + case value_t::object: + { + // delegate call to object_t::size() + return m_value.object->size(); + } + + default: + { + // all other types have size 1 + return 1; + } + } + } + + /*! + @brief returns the maximum possible number of elements + + Returns the maximum number of elements a JSON value is able to hold due to + system or library implementation limitations, i.e. `std::distance(begin(), + end())` for the JSON value. + + @return The return value depends on the different types and is + defined as follows: + Value type | return value + ----------- | ------------- + null | `0` (same as `size()`) + boolean | `1` (same as `size()`) + string | `1` (same as `size()`) + number | `1` (same as `size()`) + binary | `1` (same as `size()`) + object | result of function `object_t::max_size()` + array | result of function `array_t::max_size()` + + @liveexample{The following code calls `max_size()` on the different value + types. Note the output is implementation specific.,max_size} + + @complexity Constant, as long as @ref array_t and @ref object_t satisfy + the Container concept; that is, their `max_size()` functions have constant + complexity. + + @iterators No changes. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @requirement This function helps `basic_json` satisfying the + [Container](https://en.cppreference.com/w/cpp/named_req/Container) + requirements: + - The complexity is constant. + - Has the semantics of returning `b.size()` where `b` is the largest + possible JSON value. + + @sa @ref size() -- returns the number of elements + + @since version 1.0.0 + */ + size_type max_size() const noexcept + { + switch (m_type) + { + case value_t::array: + { + // delegate call to array_t::max_size() + return m_value.array->max_size(); + } + + case value_t::object: + { + // delegate call to object_t::max_size() + return m_value.object->max_size(); + } + + default: + { + // all other types have max_size() == size() + return size(); + } + } + } + + /// @} + + + /////////////// + // modifiers // + /////////////// + + /// @name modifiers + /// @{ + + /*! + @brief clears the contents + + Clears the content of a JSON value and resets it to the default value as + if @ref basic_json(value_t) would have been called with the current value + type from @ref type(): + + Value type | initial value + ----------- | ------------- + null | `null` + boolean | `false` + string | `""` + number | `0` + binary | An empty byte vector + object | `{}` + array | `[]` + + @post Has the same effect as calling + @code {.cpp} + *this = basic_json(type()); + @endcode + + @liveexample{The example below shows the effect of `clear()` to different + JSON types.,clear} + + @complexity Linear in the size of the JSON value. + + @iterators All iterators, pointers and references related to this container + are invalidated. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @sa @ref basic_json(value_t) -- constructor that creates an object with the + same value than calling `clear()` + + @since version 1.0.0 + */ + void clear() noexcept + { + switch (m_type) + { + case value_t::number_integer: + { + m_value.number_integer = 0; + break; + } + + case value_t::number_unsigned: + { + m_value.number_unsigned = 0; + break; + } + + case value_t::number_float: + { + m_value.number_float = 0.0; + break; + } + + case value_t::boolean: + { + m_value.boolean = false; + break; + } + + case value_t::string: + { + m_value.string->clear(); + break; + } + + case value_t::binary: + { + m_value.binary->clear(); + break; + } + + case value_t::array: + { + m_value.array->clear(); + break; + } + + case value_t::object: + { + m_value.object->clear(); + break; + } + + default: + break; + } + } + + /*! + @brief add an object to an array + + Appends the given element @a val to the end of the JSON value. If the + function is called on a JSON null value, an empty array is created before + appending @a val. + + @param[in] val the value to add to the JSON array + + @throw type_error.308 when called on a type other than JSON array or + null; example: `"cannot use push_back() with number"` + + @complexity Amortized constant. + + @liveexample{The example shows how `push_back()` and `+=` can be used to + add elements to a JSON array. Note how the `null` value was silently + converted to a JSON array.,push_back} + + @since version 1.0.0 + */ + void push_back(basic_json&& val) + { + // push_back only works for null objects or arrays + if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array()))) + { + JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name()), *this)); + } + + // transform null object into an array + if (is_null()) + { + m_type = value_t::array; + m_value = value_t::array; + assert_invariant(); + } + + // add element to array (move semantics) + m_value.array->push_back(std::move(val)); + set_parent(m_value.array->back()); + // if val is moved from, basic_json move constructor marks it null so we do not call the destructor + } + + /*! + @brief add an object to an array + @copydoc push_back(basic_json&&) + */ + reference operator+=(basic_json&& val) + { + push_back(std::move(val)); + return *this; + } + + /*! + @brief add an object to an array + @copydoc push_back(basic_json&&) + */ + void push_back(const basic_json& val) + { + // push_back only works for null objects or arrays + if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array()))) + { + JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name()), *this)); + } + + // transform null object into an array + if (is_null()) + { + m_type = value_t::array; + m_value = value_t::array; + assert_invariant(); + } + + // add element to array + m_value.array->push_back(val); + set_parent(m_value.array->back()); + } + + /*! + @brief add an object to an array + @copydoc push_back(basic_json&&) + */ + reference operator+=(const basic_json& val) + { + push_back(val); + return *this; + } + + /*! + @brief add an object to an object + + Inserts the given element @a val to the JSON object. If the function is + called on a JSON null value, an empty object is created before inserting + @a val. + + @param[in] val the value to add to the JSON object + + @throw type_error.308 when called on a type other than JSON object or + null; example: `"cannot use push_back() with number"` + + @complexity Logarithmic in the size of the container, O(log(`size()`)). + + @liveexample{The example shows how `push_back()` and `+=` can be used to + add elements to a JSON object. Note how the `null` value was silently + converted to a JSON object.,push_back__object_t__value} + + @since version 1.0.0 + */ + void push_back(const typename object_t::value_type& val) + { + // push_back only works for null objects or objects + if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_object()))) + { + JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name()), *this)); + } + + // transform null object into an object + if (is_null()) + { + m_type = value_t::object; + m_value = value_t::object; + assert_invariant(); + } + + // add element to object + auto res = m_value.object->insert(val); + set_parent(res.first->second); + } + + /*! + @brief add an object to an object + @copydoc push_back(const typename object_t::value_type&) + */ + reference operator+=(const typename object_t::value_type& val) + { + push_back(val); + return *this; + } + + /*! + @brief add an object to an object + + This function allows to use `push_back` with an initializer list. In case + + 1. the current value is an object, + 2. the initializer list @a init contains only two elements, and + 3. the first element of @a init is a string, + + @a init is converted into an object element and added using + @ref push_back(const typename object_t::value_type&). Otherwise, @a init + is converted to a JSON value and added using @ref push_back(basic_json&&). + + @param[in] init an initializer list + + @complexity Linear in the size of the initializer list @a init. + + @note This function is required to resolve an ambiguous overload error, + because pairs like `{"key", "value"}` can be both interpreted as + `object_t::value_type` or `std::initializer_list`, see + https://github.com/nlohmann/json/issues/235 for more information. + + @liveexample{The example shows how initializer lists are treated as + objects when possible.,push_back__initializer_list} + */ + void push_back(initializer_list_t init) + { + if (is_object() && init.size() == 2 && (*init.begin())->is_string()) + { + basic_json&& key = init.begin()->moved_or_copied(); + push_back(typename object_t::value_type( + std::move(key.get_ref()), (init.begin() + 1)->moved_or_copied())); + } + else + { + push_back(basic_json(init)); + } + } + + /*! + @brief add an object to an object + @copydoc push_back(initializer_list_t) + */ + reference operator+=(initializer_list_t init) + { + push_back(init); + return *this; + } + + /*! + @brief add an object to an array + + Creates a JSON value from the passed parameters @a args to the end of the + JSON value. If the function is called on a JSON null value, an empty array + is created before appending the value created from @a args. + + @param[in] args arguments to forward to a constructor of @ref basic_json + @tparam Args compatible types to create a @ref basic_json object + + @return reference to the inserted element + + @throw type_error.311 when called on a type other than JSON array or + null; example: `"cannot use emplace_back() with number"` + + @complexity Amortized constant. + + @liveexample{The example shows how `push_back()` can be used to add + elements to a JSON array. Note how the `null` value was silently converted + to a JSON array.,emplace_back} + + @since version 2.0.8, returns reference since 3.7.0 + */ + template + reference emplace_back(Args&& ... args) + { + // emplace_back only works for null objects or arrays + if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array()))) + { + JSON_THROW(type_error::create(311, "cannot use emplace_back() with " + std::string(type_name()), *this)); + } + + // transform null object into an array + if (is_null()) + { + m_type = value_t::array; + m_value = value_t::array; + assert_invariant(); + } + + // add element to array (perfect forwarding) +#ifdef JSON_HAS_CPP_17 + return set_parent(m_value.array->emplace_back(std::forward(args)...)); +#else + m_value.array->emplace_back(std::forward(args)...); + return set_parent(m_value.array->back()); +#endif + } + + /*! + @brief add an object to an object if key does not exist + + Inserts a new element into a JSON object constructed in-place with the + given @a args if there is no element with the key in the container. If the + function is called on a JSON null value, an empty object is created before + appending the value created from @a args. + + @param[in] args arguments to forward to a constructor of @ref basic_json + @tparam Args compatible types to create a @ref basic_json object + + @return a pair consisting of an iterator to the inserted element, or the + already-existing element if no insertion happened, and a bool + denoting whether the insertion took place. + + @throw type_error.311 when called on a type other than JSON object or + null; example: `"cannot use emplace() with number"` + + @complexity Logarithmic in the size of the container, O(log(`size()`)). + + @liveexample{The example shows how `emplace()` can be used to add elements + to a JSON object. Note how the `null` value was silently converted to a + JSON object. Further note how no value is added if there was already one + value stored with the same key.,emplace} + + @since version 2.0.8 + */ + template + std::pair emplace(Args&& ... args) + { + // emplace only works for null objects or arrays + if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_object()))) + { + JSON_THROW(type_error::create(311, "cannot use emplace() with " + std::string(type_name()), *this)); + } + + // transform null object into an object + if (is_null()) + { + m_type = value_t::object; + m_value = value_t::object; + assert_invariant(); + } + + // add element to array (perfect forwarding) + auto res = m_value.object->emplace(std::forward(args)...); + set_parent(res.first->second); + + // create result iterator and set iterator to the result of emplace + auto it = begin(); + it.m_it.object_iterator = res.first; + + // return pair of iterator and boolean + return {it, res.second}; + } + + /// Helper for insertion of an iterator + /// @note: This uses std::distance to support GCC 4.8, + /// see https://github.com/nlohmann/json/pull/1257 + template + iterator insert_iterator(const_iterator pos, Args&& ... args) + { + iterator result(this); + JSON_ASSERT(m_value.array != nullptr); + + auto insert_pos = std::distance(m_value.array->begin(), pos.m_it.array_iterator); + m_value.array->insert(pos.m_it.array_iterator, std::forward(args)...); + result.m_it.array_iterator = m_value.array->begin() + insert_pos; + + // This could have been written as: + // result.m_it.array_iterator = m_value.array->insert(pos.m_it.array_iterator, cnt, val); + // but the return value of insert is missing in GCC 4.8, so it is written this way instead. + + return result; + } + + /*! + @brief inserts element + + Inserts element @a val before iterator @a pos. + + @param[in] pos iterator before which the content will be inserted; may be + the end() iterator + @param[in] val element to insert + @return iterator pointing to the inserted @a val. + + @throw type_error.309 if called on JSON values other than arrays; + example: `"cannot use insert() with string"` + @throw invalid_iterator.202 if @a pos is not an iterator of *this; + example: `"iterator does not fit current value"` + + @complexity Constant plus linear in the distance between @a pos and end of + the container. + + @liveexample{The example shows how `insert()` is used.,insert} + + @since version 1.0.0 + */ + iterator insert(const_iterator pos, const basic_json& val) + { + // insert only works for arrays + if (JSON_HEDLEY_LIKELY(is_array())) + { + // check if iterator pos fits to this JSON value + if (JSON_HEDLEY_UNLIKELY(pos.m_object != this)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); + } + + // insert to array and return iterator + return set_parents(insert_iterator(pos, val), static_cast(1)); + } + + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); + } + + /*! + @brief inserts element + @copydoc insert(const_iterator, const basic_json&) + */ + iterator insert(const_iterator pos, basic_json&& val) + { + return insert(pos, val); + } + + /*! + @brief inserts elements + + Inserts @a cnt copies of @a val before iterator @a pos. + + @param[in] pos iterator before which the content will be inserted; may be + the end() iterator + @param[in] cnt number of copies of @a val to insert + @param[in] val element to insert + @return iterator pointing to the first element inserted, or @a pos if + `cnt==0` + + @throw type_error.309 if called on JSON values other than arrays; example: + `"cannot use insert() with string"` + @throw invalid_iterator.202 if @a pos is not an iterator of *this; + example: `"iterator does not fit current value"` + + @complexity Linear in @a cnt plus linear in the distance between @a pos + and end of the container. + + @liveexample{The example shows how `insert()` is used.,insert__count} + + @since version 1.0.0 + */ + iterator insert(const_iterator pos, size_type cnt, const basic_json& val) + { + // insert only works for arrays + if (JSON_HEDLEY_LIKELY(is_array())) + { + // check if iterator pos fits to this JSON value + if (JSON_HEDLEY_UNLIKELY(pos.m_object != this)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); + } + + // insert to array and return iterator + return set_parents(insert_iterator(pos, cnt, val), static_cast(cnt)); + } + + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); + } + + /*! + @brief inserts elements + + Inserts elements from range `[first, last)` before iterator @a pos. + + @param[in] pos iterator before which the content will be inserted; may be + the end() iterator + @param[in] first begin of the range of elements to insert + @param[in] last end of the range of elements to insert + + @throw type_error.309 if called on JSON values other than arrays; example: + `"cannot use insert() with string"` + @throw invalid_iterator.202 if @a pos is not an iterator of *this; + example: `"iterator does not fit current value"` + @throw invalid_iterator.210 if @a first and @a last do not belong to the + same JSON value; example: `"iterators do not fit"` + @throw invalid_iterator.211 if @a first or @a last are iterators into + container for which insert is called; example: `"passed iterators may not + belong to container"` + + @return iterator pointing to the first element inserted, or @a pos if + `first==last` + + @complexity Linear in `std::distance(first, last)` plus linear in the + distance between @a pos and end of the container. + + @liveexample{The example shows how `insert()` is used.,insert__range} + + @since version 1.0.0 + */ + iterator insert(const_iterator pos, const_iterator first, const_iterator last) + { + // insert only works for arrays + if (JSON_HEDLEY_UNLIKELY(!is_array())) + { + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); + } + + // check if iterator pos fits to this JSON value + if (JSON_HEDLEY_UNLIKELY(pos.m_object != this)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); + } + + // check if range iterators belong to the same JSON object + if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object)) + { + JSON_THROW(invalid_iterator::create(210, "iterators do not fit", *this)); + } + + if (JSON_HEDLEY_UNLIKELY(first.m_object == this)) + { + JSON_THROW(invalid_iterator::create(211, "passed iterators may not belong to container", *this)); + } + + // insert to array and return iterator + return set_parents(insert_iterator(pos, first.m_it.array_iterator, last.m_it.array_iterator), std::distance(first, last)); + } + + /*! + @brief inserts elements + + Inserts elements from initializer list @a ilist before iterator @a pos. + + @param[in] pos iterator before which the content will be inserted; may be + the end() iterator + @param[in] ilist initializer list to insert the values from + + @throw type_error.309 if called on JSON values other than arrays; example: + `"cannot use insert() with string"` + @throw invalid_iterator.202 if @a pos is not an iterator of *this; + example: `"iterator does not fit current value"` + + @return iterator pointing to the first element inserted, or @a pos if + `ilist` is empty + + @complexity Linear in `ilist.size()` plus linear in the distance between + @a pos and end of the container. + + @liveexample{The example shows how `insert()` is used.,insert__ilist} + + @since version 1.0.0 + */ + iterator insert(const_iterator pos, initializer_list_t ilist) + { + // insert only works for arrays + if (JSON_HEDLEY_UNLIKELY(!is_array())) + { + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); + } + + // check if iterator pos fits to this JSON value + if (JSON_HEDLEY_UNLIKELY(pos.m_object != this)) + { + JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); + } + + // insert to array and return iterator + return set_parents(insert_iterator(pos, ilist.begin(), ilist.end()), static_cast(ilist.size())); + } + + /*! + @brief inserts elements + + Inserts elements from range `[first, last)`. + + @param[in] first begin of the range of elements to insert + @param[in] last end of the range of elements to insert + + @throw type_error.309 if called on JSON values other than objects; example: + `"cannot use insert() with string"` + @throw invalid_iterator.202 if iterator @a first or @a last does does not + point to an object; example: `"iterators first and last must point to + objects"` + @throw invalid_iterator.210 if @a first and @a last do not belong to the + same JSON value; example: `"iterators do not fit"` + + @complexity Logarithmic: `O(N*log(size() + N))`, where `N` is the number + of elements to insert. + + @liveexample{The example shows how `insert()` is used.,insert__range_object} + + @since version 3.0.0 + */ + void insert(const_iterator first, const_iterator last) + { + // insert only works for objects + if (JSON_HEDLEY_UNLIKELY(!is_object())) + { + JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); + } + + // check if range iterators belong to the same JSON object + if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object)) + { + JSON_THROW(invalid_iterator::create(210, "iterators do not fit", *this)); + } + + // passed iterators must belong to objects + if (JSON_HEDLEY_UNLIKELY(!first.m_object->is_object())) + { + JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects", *this)); + } + + m_value.object->insert(first.m_it.object_iterator, last.m_it.object_iterator); + } + + /*! + @brief updates a JSON object from another object, overwriting existing keys + + Inserts all values from JSON object @a j and overwrites existing keys. + + @param[in] j JSON object to read values from + + @throw type_error.312 if called on JSON values other than objects; example: + `"cannot use update() with string"` + + @complexity O(N*log(size() + N)), where N is the number of elements to + insert. + + @liveexample{The example shows how `update()` is used.,update} + + @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update + + @since version 3.0.0 + */ + void update(const_reference j) + { + // implicitly convert null value to an empty object + if (is_null()) + { + m_type = value_t::object; + m_value.object = create(); + assert_invariant(); + } + + if (JSON_HEDLEY_UNLIKELY(!is_object())) + { + JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name()), *this)); + } + if (JSON_HEDLEY_UNLIKELY(!j.is_object())) + { + JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(j.type_name()), *this)); + } + + for (auto it = j.cbegin(); it != j.cend(); ++it) + { + m_value.object->operator[](it.key()) = it.value(); + } + } + + /*! + @brief updates a JSON object from another object, overwriting existing keys + + Inserts all values from from range `[first, last)` and overwrites existing + keys. + + @param[in] first begin of the range of elements to insert + @param[in] last end of the range of elements to insert + + @throw type_error.312 if called on JSON values other than objects; example: + `"cannot use update() with string"` + @throw invalid_iterator.202 if iterator @a first or @a last does does not + point to an object; example: `"iterators first and last must point to + objects"` + @throw invalid_iterator.210 if @a first and @a last do not belong to the + same JSON value; example: `"iterators do not fit"` + + @complexity O(N*log(size() + N)), where N is the number of elements to + insert. + + @liveexample{The example shows how `update()` is used__range.,update} + + @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update + + @since version 3.0.0 + */ + void update(const_iterator first, const_iterator last) + { + // implicitly convert null value to an empty object + if (is_null()) + { + m_type = value_t::object; + m_value.object = create(); + assert_invariant(); + } + + if (JSON_HEDLEY_UNLIKELY(!is_object())) + { + JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name()), *this)); + } + + // check if range iterators belong to the same JSON object + if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object)) + { + JSON_THROW(invalid_iterator::create(210, "iterators do not fit", *this)); + } + + // passed iterators must belong to objects + if (JSON_HEDLEY_UNLIKELY(!first.m_object->is_object() + || !last.m_object->is_object())) + { + JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects", *this)); + } + + for (auto it = first; it != last; ++it) + { + m_value.object->operator[](it.key()) = it.value(); + } + } + + /*! + @brief exchanges the values + + Exchanges the contents of the JSON value with those of @a other. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. + + @param[in,out] other JSON value to exchange the contents with + + @complexity Constant. + + @liveexample{The example below shows how JSON values can be swapped with + `swap()`.,swap__reference} + + @since version 1.0.0 + */ + void swap(reference other) noexcept ( + std::is_nothrow_move_constructible::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_constructible::value&& + std::is_nothrow_move_assignable::value + ) + { + std::swap(m_type, other.m_type); + std::swap(m_value, other.m_value); + + set_parents(); + other.set_parents(); + assert_invariant(); + } + + /*! + @brief exchanges the values + + Exchanges the contents of the JSON value from @a left with those of @a right. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. implemented as a friend function callable via ADL. + + @param[in,out] left JSON value to exchange the contents with + @param[in,out] right JSON value to exchange the contents with + + @complexity Constant. + + @liveexample{The example below shows how JSON values can be swapped with + `swap()`.,swap__reference} + + @since version 1.0.0 + */ + friend void swap(reference left, reference right) noexcept ( + std::is_nothrow_move_constructible::value&& + std::is_nothrow_move_assignable::value&& + std::is_nothrow_move_constructible::value&& + std::is_nothrow_move_assignable::value + ) + { + left.swap(right); + } + + /*! + @brief exchanges the values + + Exchanges the contents of a JSON array with those of @a other. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. + + @param[in,out] other array to exchange the contents with + + @throw type_error.310 when JSON value is not an array; example: `"cannot + use swap() with string"` + + @complexity Constant. + + @liveexample{The example below shows how arrays can be swapped with + `swap()`.,swap__array_t} + + @since version 1.0.0 + */ + void swap(array_t& other) + { + // swap only works for arrays + if (JSON_HEDLEY_LIKELY(is_array())) + { + std::swap(*(m_value.array), other); + } + else + { + JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); + } + } + + /*! + @brief exchanges the values + + Exchanges the contents of a JSON object with those of @a other. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. + + @param[in,out] other object to exchange the contents with + + @throw type_error.310 when JSON value is not an object; example: + `"cannot use swap() with string"` + + @complexity Constant. + + @liveexample{The example below shows how objects can be swapped with + `swap()`.,swap__object_t} + + @since version 1.0.0 + */ + void swap(object_t& other) + { + // swap only works for objects + if (JSON_HEDLEY_LIKELY(is_object())) + { + std::swap(*(m_value.object), other); + } + else + { + JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); + } + } + + /*! + @brief exchanges the values + + Exchanges the contents of a JSON string with those of @a other. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. + + @param[in,out] other string to exchange the contents with + + @throw type_error.310 when JSON value is not a string; example: `"cannot + use swap() with boolean"` + + @complexity Constant. + + @liveexample{The example below shows how strings can be swapped with + `swap()`.,swap__string_t} + + @since version 1.0.0 + */ + void swap(string_t& other) + { + // swap only works for strings + if (JSON_HEDLEY_LIKELY(is_string())) + { + std::swap(*(m_value.string), other); + } + else + { + JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); + } + } + + /*! + @brief exchanges the values + + Exchanges the contents of a JSON string with those of @a other. Does not + invoke any move, copy, or swap operations on individual elements. All + iterators and references remain valid. The past-the-end iterator is + invalidated. + + @param[in,out] other binary to exchange the contents with + + @throw type_error.310 when JSON value is not a string; example: `"cannot + use swap() with boolean"` + + @complexity Constant. + + @liveexample{The example below shows how strings can be swapped with + `swap()`.,swap__binary_t} + + @since version 3.8.0 + */ + void swap(binary_t& other) + { + // swap only works for strings + if (JSON_HEDLEY_LIKELY(is_binary())) + { + std::swap(*(m_value.binary), other); + } + else + { + JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); + } + } + + /// @copydoc swap(binary_t) + void swap(typename binary_t::container_type& other) + { + // swap only works for strings + if (JSON_HEDLEY_LIKELY(is_binary())) + { + std::swap(*(m_value.binary), other); + } + else + { + JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); + } + } + + /// @} + + public: + ////////////////////////////////////////// + // lexicographical comparison operators // + ////////////////////////////////////////// + + /// @name lexicographical comparison operators + /// @{ + + /*! + @brief comparison: equal + + Compares two JSON values for equality according to the following rules: + - Two JSON values are equal if (1) they are from the same type and (2) + their stored values are the same according to their respective + `operator==`. + - Integer and floating-point numbers are automatically converted before + comparison. Note that two NaN values are always treated as unequal. + - Two JSON null values are equal. + + @note Floating-point inside JSON values numbers are compared with + `json::number_float_t::operator==` which is `double::operator==` by + default. To compare floating-point while respecting an epsilon, an alternative + [comparison function](https://github.com/mariokonrad/marnav/blob/master/include/marnav/math/floatingpoint.hpp#L34-#L39) + could be used, for instance + @code {.cpp} + template::value, T>::type> + inline bool is_same(T a, T b, T epsilon = std::numeric_limits::epsilon()) noexcept + { + return std::abs(a - b) <= epsilon; + } + @endcode + Or you can self-defined operator equal function like this: + @code {.cpp} + bool my_equal(const_reference lhs, const_reference rhs) { + const auto lhs_type lhs.type(); + const auto rhs_type rhs.type(); + if (lhs_type == rhs_type) { + switch(lhs_type) + // self_defined case + case value_t::number_float: + return std::abs(lhs - rhs) <= std::numeric_limits::epsilon(); + // other cases remain the same with the original + ... + } + ... + } + @endcode + + @note NaN values never compare equal to themselves or to other NaN values. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether the values @a lhs and @a rhs are equal + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @complexity Linear. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__equal} + + @since version 1.0.0 + */ + friend bool operator==(const_reference lhs, const_reference rhs) noexcept + { + const auto lhs_type = lhs.type(); + const auto rhs_type = rhs.type(); + + if (lhs_type == rhs_type) + { + switch (lhs_type) + { + case value_t::array: + return *lhs.m_value.array == *rhs.m_value.array; + + case value_t::object: + return *lhs.m_value.object == *rhs.m_value.object; + + case value_t::null: + return true; + + case value_t::string: + return *lhs.m_value.string == *rhs.m_value.string; + + case value_t::boolean: + return lhs.m_value.boolean == rhs.m_value.boolean; + + case value_t::number_integer: + return lhs.m_value.number_integer == rhs.m_value.number_integer; + + case value_t::number_unsigned: + return lhs.m_value.number_unsigned == rhs.m_value.number_unsigned; + + case value_t::number_float: + return lhs.m_value.number_float == rhs.m_value.number_float; + + case value_t::binary: + return *lhs.m_value.binary == *rhs.m_value.binary; + + default: + return false; + } + } + else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_float) + { + return static_cast(lhs.m_value.number_integer) == rhs.m_value.number_float; + } + else if (lhs_type == value_t::number_float && rhs_type == value_t::number_integer) + { + return lhs.m_value.number_float == static_cast(rhs.m_value.number_integer); + } + else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_float) + { + return static_cast(lhs.m_value.number_unsigned) == rhs.m_value.number_float; + } + else if (lhs_type == value_t::number_float && rhs_type == value_t::number_unsigned) + { + return lhs.m_value.number_float == static_cast(rhs.m_value.number_unsigned); + } + else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_integer) + { + return static_cast(lhs.m_value.number_unsigned) == rhs.m_value.number_integer; + } + else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_unsigned) + { + return lhs.m_value.number_integer == static_cast(rhs.m_value.number_unsigned); + } + + return false; + } + + /*! + @brief comparison: equal + @copydoc operator==(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator==(const_reference lhs, const ScalarType rhs) noexcept + { + return lhs == basic_json(rhs); + } + + /*! + @brief comparison: equal + @copydoc operator==(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator==(const ScalarType lhs, const_reference rhs) noexcept + { + return basic_json(lhs) == rhs; + } + + /*! + @brief comparison: not equal + + Compares two JSON values for inequality by calculating `not (lhs == rhs)`. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether the values @a lhs and @a rhs are not equal + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__notequal} + + @since version 1.0.0 + */ + friend bool operator!=(const_reference lhs, const_reference rhs) noexcept + { + return !(lhs == rhs); + } + + /*! + @brief comparison: not equal + @copydoc operator!=(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator!=(const_reference lhs, const ScalarType rhs) noexcept + { + return lhs != basic_json(rhs); + } + + /*! + @brief comparison: not equal + @copydoc operator!=(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator!=(const ScalarType lhs, const_reference rhs) noexcept + { + return basic_json(lhs) != rhs; + } + + /*! + @brief comparison: less than + + Compares whether one JSON value @a lhs is less than another JSON value @a + rhs according to the following rules: + - If @a lhs and @a rhs have the same type, the values are compared using + the default `<` operator. + - Integer and floating-point numbers are automatically converted before + comparison + - In case @a lhs and @a rhs have different types, the values are ignored + and the order of the types is considered, see + @ref operator<(const value_t, const value_t). + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether @a lhs is less than @a rhs + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__less} + + @since version 1.0.0 + */ + friend bool operator<(const_reference lhs, const_reference rhs) noexcept + { + const auto lhs_type = lhs.type(); + const auto rhs_type = rhs.type(); + + if (lhs_type == rhs_type) + { + switch (lhs_type) + { + case value_t::array: + // note parentheses are necessary, see + // https://github.com/nlohmann/json/issues/1530 + return (*lhs.m_value.array) < (*rhs.m_value.array); + + case value_t::object: + return (*lhs.m_value.object) < (*rhs.m_value.object); + + case value_t::null: + return false; + + case value_t::string: + return (*lhs.m_value.string) < (*rhs.m_value.string); + + case value_t::boolean: + return (lhs.m_value.boolean) < (rhs.m_value.boolean); + + case value_t::number_integer: + return (lhs.m_value.number_integer) < (rhs.m_value.number_integer); + + case value_t::number_unsigned: + return (lhs.m_value.number_unsigned) < (rhs.m_value.number_unsigned); + + case value_t::number_float: + return (lhs.m_value.number_float) < (rhs.m_value.number_float); + + case value_t::binary: + return (*lhs.m_value.binary) < (*rhs.m_value.binary); + + default: + return false; + } + } + else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_float) + { + return static_cast(lhs.m_value.number_integer) < rhs.m_value.number_float; + } + else if (lhs_type == value_t::number_float && rhs_type == value_t::number_integer) + { + return lhs.m_value.number_float < static_cast(rhs.m_value.number_integer); + } + else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_float) + { + return static_cast(lhs.m_value.number_unsigned) < rhs.m_value.number_float; + } + else if (lhs_type == value_t::number_float && rhs_type == value_t::number_unsigned) + { + return lhs.m_value.number_float < static_cast(rhs.m_value.number_unsigned); + } + else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_unsigned) + { + return lhs.m_value.number_integer < static_cast(rhs.m_value.number_unsigned); + } + else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_integer) + { + return static_cast(lhs.m_value.number_unsigned) < rhs.m_value.number_integer; + } + + // We only reach this line if we cannot compare values. In that case, + // we compare types. Note we have to call the operator explicitly, + // because MSVC has problems otherwise. + return operator<(lhs_type, rhs_type); + } + + /*! + @brief comparison: less than + @copydoc operator<(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator<(const_reference lhs, const ScalarType rhs) noexcept + { + return lhs < basic_json(rhs); + } + + /*! + @brief comparison: less than + @copydoc operator<(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator<(const ScalarType lhs, const_reference rhs) noexcept + { + return basic_json(lhs) < rhs; + } + + /*! + @brief comparison: less than or equal + + Compares whether one JSON value @a lhs is less than or equal to another + JSON value by calculating `not (rhs < lhs)`. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether @a lhs is less than or equal to @a rhs + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__greater} + + @since version 1.0.0 + */ + friend bool operator<=(const_reference lhs, const_reference rhs) noexcept + { + return !(rhs < lhs); + } + + /*! + @brief comparison: less than or equal + @copydoc operator<=(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator<=(const_reference lhs, const ScalarType rhs) noexcept + { + return lhs <= basic_json(rhs); + } + + /*! + @brief comparison: less than or equal + @copydoc operator<=(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator<=(const ScalarType lhs, const_reference rhs) noexcept + { + return basic_json(lhs) <= rhs; + } + + /*! + @brief comparison: greater than + + Compares whether one JSON value @a lhs is greater than another + JSON value by calculating `not (lhs <= rhs)`. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether @a lhs is greater than to @a rhs + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__lessequal} + + @since version 1.0.0 + */ + friend bool operator>(const_reference lhs, const_reference rhs) noexcept + { + return !(lhs <= rhs); + } + + /*! + @brief comparison: greater than + @copydoc operator>(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator>(const_reference lhs, const ScalarType rhs) noexcept + { + return lhs > basic_json(rhs); + } + + /*! + @brief comparison: greater than + @copydoc operator>(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator>(const ScalarType lhs, const_reference rhs) noexcept + { + return basic_json(lhs) > rhs; + } + + /*! + @brief comparison: greater than or equal + + Compares whether one JSON value @a lhs is greater than or equal to another + JSON value by calculating `not (lhs < rhs)`. + + @param[in] lhs first JSON value to consider + @param[in] rhs second JSON value to consider + @return whether @a lhs is greater than or equal to @a rhs + + @complexity Linear. + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @liveexample{The example demonstrates comparing several JSON + types.,operator__greaterequal} + + @since version 1.0.0 + */ + friend bool operator>=(const_reference lhs, const_reference rhs) noexcept + { + return !(lhs < rhs); + } + + /*! + @brief comparison: greater than or equal + @copydoc operator>=(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator>=(const_reference lhs, const ScalarType rhs) noexcept + { + return lhs >= basic_json(rhs); + } + + /*! + @brief comparison: greater than or equal + @copydoc operator>=(const_reference, const_reference) + */ + template::value, int>::type = 0> + friend bool operator>=(const ScalarType lhs, const_reference rhs) noexcept + { + return basic_json(lhs) >= rhs; + } + + /// @} + + /////////////////// + // serialization // + /////////////////// + + /// @name serialization + /// @{ + + /*! + @brief serialize to stream + + Serialize the given JSON value @a j to the output stream @a o. The JSON + value will be serialized using the @ref dump member function. + + - The indentation of the output can be controlled with the member variable + `width` of the output stream @a o. For instance, using the manipulator + `std::setw(4)` on @a o sets the indentation level to `4` and the + serialization result is the same as calling `dump(4)`. + + - The indentation character can be controlled with the member variable + `fill` of the output stream @a o. For instance, the manipulator + `std::setfill('\\t')` sets indentation to use a tab character rather than + the default space character. + + @param[in,out] o stream to serialize to + @param[in] j JSON value to serialize + + @return the stream @a o + + @throw type_error.316 if a string stored inside the JSON value is not + UTF-8 encoded + + @complexity Linear. + + @liveexample{The example below shows the serialization with different + parameters to `width` to adjust the indentation level.,operator_serialize} + + @since version 1.0.0; indentation character added in version 3.0.0 + */ + friend std::ostream& operator<<(std::ostream& o, const basic_json& j) + { + // read width member and use it as indentation parameter if nonzero + const bool pretty_print = o.width() > 0; + const auto indentation = pretty_print ? o.width() : 0; + + // reset width to 0 for subsequent calls to this stream + o.width(0); + + // do the actual serialization + serializer s(detail::output_adapter(o), o.fill()); + s.dump(j, pretty_print, false, static_cast(indentation)); + return o; + } + + /*! + @brief serialize to stream + @deprecated This stream operator is deprecated and will be removed in + future 4.0.0 of the library. Please use + @ref operator<<(std::ostream&, const basic_json&) + instead; that is, replace calls like `j >> o;` with `o << j;`. + @since version 1.0.0; deprecated since version 3.0.0 + */ + JSON_HEDLEY_DEPRECATED_FOR(3.0.0, operator<<(std::ostream&, const basic_json&)) + friend std::ostream& operator>>(const basic_json& j, std::ostream& o) + { + return o << j; + } + + /// @} + + + ///////////////////// + // deserialization // + ///////////////////// + + /// @name deserialization + /// @{ + + /*! + @brief deserialize from a compatible input + + @tparam InputType A compatible input, for instance + - an std::istream object + - a FILE pointer + - a C-style array of characters + - a pointer to a null-terminated string of single byte characters + - an object obj for which begin(obj) and end(obj) produces a valid pair of + iterators. + + @param[in] i input to read from + @param[in] cb a parser callback function of type @ref parser_callback_t + which is used to control the deserialization by filtering unwanted values + (optional) + @param[in] allow_exceptions whether to throw exceptions in case of a + parse error (optional, true by default) + @param[in] ignore_comments whether comments should be ignored and treated + like whitespace (true) or yield a parse error (true); (optional, false by + default) + + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. + + @throw parse_error.101 if a parse error occurs; example: `""unexpected end + of input; expected string literal""` + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + + @complexity Linear in the length of the input. The parser is a predictive + LL(1) parser. The complexity can be higher if the parser callback function + @a cb or reading from the input @a i has a super-linear complexity. + + @note A UTF-8 byte order mark is silently ignored. + + @liveexample{The example below demonstrates the `parse()` function reading + from an array.,parse__array__parser_callback_t} + + @liveexample{The example below demonstrates the `parse()` function with + and without callback function.,parse__string__parser_callback_t} + + @liveexample{The example below demonstrates the `parse()` function with + and without callback function.,parse__istream__parser_callback_t} + + @liveexample{The example below demonstrates the `parse()` function reading + from a contiguous container.,parse__contiguouscontainer__parser_callback_t} + + @since version 2.0.3 (contiguous containers); version 3.9.0 allowed to + ignore comments. + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json parse(InputType&& i, + const parser_callback_t cb = nullptr, + const bool allow_exceptions = true, + const bool ignore_comments = false) + { + basic_json result; + parser(detail::input_adapter(std::forward(i)), cb, allow_exceptions, ignore_comments).parse(true, result); + return result; + } + + /*! + @brief deserialize from a pair of character iterators + + The value_type of the iterator must be a integral type with size of 1, 2 or + 4 bytes, which will be interpreted respectively as UTF-8, UTF-16 and UTF-32. + + @param[in] first iterator to start of character range + @param[in] last iterator to end of character range + @param[in] cb a parser callback function of type @ref parser_callback_t + which is used to control the deserialization by filtering unwanted values + (optional) + @param[in] allow_exceptions whether to throw exceptions in case of a + parse error (optional, true by default) + @param[in] ignore_comments whether comments should be ignored and treated + like whitespace (true) or yield a parse error (true); (optional, false by + default) + + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. + + @throw parse_error.101 if a parse error occurs; example: `""unexpected end + of input; expected string literal""` + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json parse(IteratorType first, + IteratorType last, + const parser_callback_t cb = nullptr, + const bool allow_exceptions = true, + const bool ignore_comments = false) + { + basic_json result; + parser(detail::input_adapter(std::move(first), std::move(last)), cb, allow_exceptions, ignore_comments).parse(true, result); + return result; + } + + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, parse(ptr, ptr + len)) + static basic_json parse(detail::span_input_adapter&& i, + const parser_callback_t cb = nullptr, + const bool allow_exceptions = true, + const bool ignore_comments = false) + { + basic_json result; + parser(i.get(), cb, allow_exceptions, ignore_comments).parse(true, result); + return result; + } + + /*! + @brief check if the input is valid JSON + + Unlike the @ref parse(InputType&&, const parser_callback_t,const bool) + function, this function neither throws an exception in case of invalid JSON + input (i.e., a parse error) nor creates diagnostic information. + + @tparam InputType A compatible input, for instance + - an std::istream object + - a FILE pointer + - a C-style array of characters + - a pointer to a null-terminated string of single byte characters + - an object obj for which begin(obj) and end(obj) produces a valid pair of + iterators. + + @param[in] i input to read from + @param[in] ignore_comments whether comments should be ignored and treated + like whitespace (true) or yield a parse error (true); (optional, false by + default) + + @return Whether the input read from @a i is valid JSON. + + @complexity Linear in the length of the input. The parser is a predictive + LL(1) parser. + + @note A UTF-8 byte order mark is silently ignored. + + @liveexample{The example below demonstrates the `accept()` function reading + from a string.,accept__string} + */ + template + static bool accept(InputType&& i, + const bool ignore_comments = false) + { + return parser(detail::input_adapter(std::forward(i)), nullptr, false, ignore_comments).accept(true); + } + + template + static bool accept(IteratorType first, IteratorType last, + const bool ignore_comments = false) + { + return parser(detail::input_adapter(std::move(first), std::move(last)), nullptr, false, ignore_comments).accept(true); + } + + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, accept(ptr, ptr + len)) + static bool accept(detail::span_input_adapter&& i, + const bool ignore_comments = false) + { + return parser(i.get(), nullptr, false, ignore_comments).accept(true); + } + + /*! + @brief generate SAX events + + The SAX event lister must follow the interface of @ref json_sax. + + This function reads from a compatible input. Examples are: + - an std::istream object + - a FILE pointer + - a C-style array of characters + - a pointer to a null-terminated string of single byte characters + - an object obj for which begin(obj) and end(obj) produces a valid pair of + iterators. + + @param[in] i input to read from + @param[in,out] sax SAX event listener + @param[in] format the format to parse (JSON, CBOR, MessagePack, or UBJSON) + @param[in] strict whether the input has to be consumed completely + @param[in] ignore_comments whether comments should be ignored and treated + like whitespace (true) or yield a parse error (true); (optional, false by + default); only applies to the JSON file format. + + @return return value of the last processed SAX event + + @throw parse_error.101 if a parse error occurs; example: `""unexpected end + of input; expected string literal""` + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + + @complexity Linear in the length of the input. The parser is a predictive + LL(1) parser. The complexity can be higher if the SAX consumer @a sax has + a super-linear complexity. + + @note A UTF-8 byte order mark is silently ignored. + + @liveexample{The example below demonstrates the `sax_parse()` function + reading from string and processing the events with a user-defined SAX + event consumer.,sax_parse} + + @since version 3.2.0 + */ + template + JSON_HEDLEY_NON_NULL(2) + static bool sax_parse(InputType&& i, SAX* sax, + input_format_t format = input_format_t::json, + const bool strict = true, + const bool ignore_comments = false) + { + auto ia = detail::input_adapter(std::forward(i)); + return format == input_format_t::json + ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict) + : detail::binary_reader(std::move(ia)).sax_parse(format, sax, strict); + } + + template + JSON_HEDLEY_NON_NULL(3) + static bool sax_parse(IteratorType first, IteratorType last, SAX* sax, + input_format_t format = input_format_t::json, + const bool strict = true, + const bool ignore_comments = false) + { + auto ia = detail::input_adapter(std::move(first), std::move(last)); + return format == input_format_t::json + ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict) + : detail::binary_reader(std::move(ia)).sax_parse(format, sax, strict); + } + + template + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, sax_parse(ptr, ptr + len, ...)) + JSON_HEDLEY_NON_NULL(2) + static bool sax_parse(detail::span_input_adapter&& i, SAX* sax, + input_format_t format = input_format_t::json, + const bool strict = true, + const bool ignore_comments = false) + { + auto ia = i.get(); + return format == input_format_t::json + ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict) + : detail::binary_reader(std::move(ia)).sax_parse(format, sax, strict); + } + + /*! + @brief deserialize from stream + @deprecated This stream operator is deprecated and will be removed in + version 4.0.0 of the library. Please use + @ref operator>>(std::istream&, basic_json&) + instead; that is, replace calls like `j << i;` with `i >> j;`. + @since version 1.0.0; deprecated since version 3.0.0 + */ + JSON_HEDLEY_DEPRECATED_FOR(3.0.0, operator>>(std::istream&, basic_json&)) + friend std::istream& operator<<(basic_json& j, std::istream& i) + { + return operator>>(i, j); + } + + /*! + @brief deserialize from stream + + Deserializes an input stream to a JSON value. + + @param[in,out] i input stream to read a serialized JSON value from + @param[in,out] j JSON value to write the deserialized input to + + @throw parse_error.101 in case of an unexpected token + @throw parse_error.102 if to_unicode fails or surrogate error + @throw parse_error.103 if to_unicode fails + + @complexity Linear in the length of the input. The parser is a predictive + LL(1) parser. + + @note A UTF-8 byte order mark is silently ignored. + + @liveexample{The example below shows how a JSON value is constructed by + reading a serialization from a stream.,operator_deserialize} + + @sa parse(std::istream&, const parser_callback_t) for a variant with a + parser callback function to filter values while parsing + + @since version 1.0.0 + */ + friend std::istream& operator>>(std::istream& i, basic_json& j) + { + parser(detail::input_adapter(i)).parse(false, j); + return i; + } + + /// @} + + /////////////////////////// + // convenience functions // + /////////////////////////// + + /*! + @brief return the type as string + + Returns the type name as string to be used in error messages - usually to + indicate that a function was called on a wrong JSON type. + + @return a string representation of a the @a m_type member: + Value type | return value + ----------- | ------------- + null | `"null"` + boolean | `"boolean"` + string | `"string"` + number | `"number"` (for all number types) + object | `"object"` + array | `"array"` + binary | `"binary"` + discarded | `"discarded"` + + @exceptionsafety No-throw guarantee: this function never throws exceptions. + + @complexity Constant. + + @liveexample{The following code exemplifies `type_name()` for all JSON + types.,type_name} + + @sa @ref type() -- return the type of the JSON value + @sa @ref operator value_t() -- return the type of the JSON value (implicit) + + @since version 1.0.0, public since 2.1.0, `const char*` and `noexcept` + since 3.0.0 + */ + JSON_HEDLEY_RETURNS_NON_NULL + const char* type_name() const noexcept + { + { + switch (m_type) + { + case value_t::null: + return "null"; + case value_t::object: + return "object"; + case value_t::array: + return "array"; + case value_t::string: + return "string"; + case value_t::boolean: + return "boolean"; + case value_t::binary: + return "binary"; + case value_t::discarded: + return "discarded"; + default: + return "number"; + } + } + } + + + JSON_PRIVATE_UNLESS_TESTED: + ////////////////////// + // member variables // + ////////////////////// + + /// the type of the current element + value_t m_type = value_t::null; + + /// the value of the current element + json_value m_value = {}; + +#if JSON_DIAGNOSTICS + /// a pointer to a parent value (for debugging purposes) + basic_json* m_parent = nullptr; +#endif + + ////////////////////////////////////////// + // binary serialization/deserialization // + ////////////////////////////////////////// + + /// @name binary serialization/deserialization support + /// @{ + + public: + /*! + @brief create a CBOR serialization of a given JSON value + + Serializes a given JSON value @a j to a byte vector using the CBOR (Concise + Binary Object Representation) serialization format. CBOR is a binary + serialization format which aims to be more compact than JSON itself, yet + more efficient to parse. + + The library uses the following mapping from JSON values types to + CBOR types according to the CBOR specification (RFC 7049): + + JSON value type | value/range | CBOR type | first byte + --------------- | ------------------------------------------ | ---------------------------------- | --------------- + null | `null` | Null | 0xF6 + boolean | `true` | True | 0xF5 + boolean | `false` | False | 0xF4 + number_integer | -9223372036854775808..-2147483649 | Negative integer (8 bytes follow) | 0x3B + number_integer | -2147483648..-32769 | Negative integer (4 bytes follow) | 0x3A + number_integer | -32768..-129 | Negative integer (2 bytes follow) | 0x39 + number_integer | -128..-25 | Negative integer (1 byte follow) | 0x38 + number_integer | -24..-1 | Negative integer | 0x20..0x37 + number_integer | 0..23 | Integer | 0x00..0x17 + number_integer | 24..255 | Unsigned integer (1 byte follow) | 0x18 + number_integer | 256..65535 | Unsigned integer (2 bytes follow) | 0x19 + number_integer | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A + number_integer | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B + number_unsigned | 0..23 | Integer | 0x00..0x17 + number_unsigned | 24..255 | Unsigned integer (1 byte follow) | 0x18 + number_unsigned | 256..65535 | Unsigned integer (2 bytes follow) | 0x19 + number_unsigned | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A + number_unsigned | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B + number_float | *any value representable by a float* | Single-Precision Float | 0xFA + number_float | *any value NOT representable by a float* | Double-Precision Float | 0xFB + string | *length*: 0..23 | UTF-8 string | 0x60..0x77 + string | *length*: 23..255 | UTF-8 string (1 byte follow) | 0x78 + string | *length*: 256..65535 | UTF-8 string (2 bytes follow) | 0x79 + string | *length*: 65536..4294967295 | UTF-8 string (4 bytes follow) | 0x7A + string | *length*: 4294967296..18446744073709551615 | UTF-8 string (8 bytes follow) | 0x7B + array | *size*: 0..23 | array | 0x80..0x97 + array | *size*: 23..255 | array (1 byte follow) | 0x98 + array | *size*: 256..65535 | array (2 bytes follow) | 0x99 + array | *size*: 65536..4294967295 | array (4 bytes follow) | 0x9A + array | *size*: 4294967296..18446744073709551615 | array (8 bytes follow) | 0x9B + object | *size*: 0..23 | map | 0xA0..0xB7 + object | *size*: 23..255 | map (1 byte follow) | 0xB8 + object | *size*: 256..65535 | map (2 bytes follow) | 0xB9 + object | *size*: 65536..4294967295 | map (4 bytes follow) | 0xBA + object | *size*: 4294967296..18446744073709551615 | map (8 bytes follow) | 0xBB + binary | *size*: 0..23 | byte string | 0x40..0x57 + binary | *size*: 23..255 | byte string (1 byte follow) | 0x58 + binary | *size*: 256..65535 | byte string (2 bytes follow) | 0x59 + binary | *size*: 65536..4294967295 | byte string (4 bytes follow) | 0x5A + binary | *size*: 4294967296..18446744073709551615 | byte string (8 bytes follow) | 0x5B + + @note The mapping is **complete** in the sense that any JSON value type + can be converted to a CBOR value. + + @note If NaN or Infinity are stored inside a JSON number, they are + serialized properly. This behavior differs from the @ref dump() + function which serializes NaN or Infinity to `null`. + + @note The following CBOR types are not used in the conversion: + - UTF-8 strings terminated by "break" (0x7F) + - arrays terminated by "break" (0x9F) + - maps terminated by "break" (0xBF) + - byte strings terminated by "break" (0x5F) + - date/time (0xC0..0xC1) + - bignum (0xC2..0xC3) + - decimal fraction (0xC4) + - bigfloat (0xC5) + - expected conversions (0xD5..0xD7) + - simple values (0xE0..0xF3, 0xF8) + - undefined (0xF7) + - half-precision floats (0xF9) + - break (0xFF) + + @param[in] j JSON value to serialize + @return CBOR serialization as byte vector + + @complexity Linear in the size of the JSON value @a j. + + @liveexample{The example shows the serialization of a JSON value to a byte + vector in CBOR format.,to_cbor} + + @sa http://cbor.io + @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + analogous deserialization + @sa @ref to_msgpack(const basic_json&) for the related MessagePack format + @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + related UBJSON format + + @since version 2.0.9; compact representation of floating-point numbers + since version 3.8.0 + */ + static std::vector to_cbor(const basic_json& j) + { + std::vector result; + to_cbor(j, result); + return result; + } + + static void to_cbor(const basic_json& j, detail::output_adapter o) + { + binary_writer(o).write_cbor(j); + } + + static void to_cbor(const basic_json& j, detail::output_adapter o) + { + binary_writer(o).write_cbor(j); + } + + /*! + @brief create a MessagePack serialization of a given JSON value + + Serializes a given JSON value @a j to a byte vector using the MessagePack + serialization format. MessagePack is a binary serialization format which + aims to be more compact than JSON itself, yet more efficient to parse. + + The library uses the following mapping from JSON values types to + MessagePack types according to the MessagePack specification: + + JSON value type | value/range | MessagePack type | first byte + --------------- | --------------------------------- | ---------------- | ---------- + null | `null` | nil | 0xC0 + boolean | `true` | true | 0xC3 + boolean | `false` | false | 0xC2 + number_integer | -9223372036854775808..-2147483649 | int64 | 0xD3 + number_integer | -2147483648..-32769 | int32 | 0xD2 + number_integer | -32768..-129 | int16 | 0xD1 + number_integer | -128..-33 | int8 | 0xD0 + number_integer | -32..-1 | negative fixint | 0xE0..0xFF + number_integer | 0..127 | positive fixint | 0x00..0x7F + number_integer | 128..255 | uint 8 | 0xCC + number_integer | 256..65535 | uint 16 | 0xCD + number_integer | 65536..4294967295 | uint 32 | 0xCE + number_integer | 4294967296..18446744073709551615 | uint 64 | 0xCF + number_unsigned | 0..127 | positive fixint | 0x00..0x7F + number_unsigned | 128..255 | uint 8 | 0xCC + number_unsigned | 256..65535 | uint 16 | 0xCD + number_unsigned | 65536..4294967295 | uint 32 | 0xCE + number_unsigned | 4294967296..18446744073709551615 | uint 64 | 0xCF + number_float | *any value representable by a float* | float 32 | 0xCA + number_float | *any value NOT representable by a float* | float 64 | 0xCB + string | *length*: 0..31 | fixstr | 0xA0..0xBF + string | *length*: 32..255 | str 8 | 0xD9 + string | *length*: 256..65535 | str 16 | 0xDA + string | *length*: 65536..4294967295 | str 32 | 0xDB + array | *size*: 0..15 | fixarray | 0x90..0x9F + array | *size*: 16..65535 | array 16 | 0xDC + array | *size*: 65536..4294967295 | array 32 | 0xDD + object | *size*: 0..15 | fix map | 0x80..0x8F + object | *size*: 16..65535 | map 16 | 0xDE + object | *size*: 65536..4294967295 | map 32 | 0xDF + binary | *size*: 0..255 | bin 8 | 0xC4 + binary | *size*: 256..65535 | bin 16 | 0xC5 + binary | *size*: 65536..4294967295 | bin 32 | 0xC6 + + @note The mapping is **complete** in the sense that any JSON value type + can be converted to a MessagePack value. + + @note The following values can **not** be converted to a MessagePack value: + - strings with more than 4294967295 bytes + - byte strings with more than 4294967295 bytes + - arrays with more than 4294967295 elements + - objects with more than 4294967295 elements + + @note Any MessagePack output created @ref to_msgpack can be successfully + parsed by @ref from_msgpack. + + @note If NaN or Infinity are stored inside a JSON number, they are + serialized properly. This behavior differs from the @ref dump() + function which serializes NaN or Infinity to `null`. + + @param[in] j JSON value to serialize + @return MessagePack serialization as byte vector + + @complexity Linear in the size of the JSON value @a j. + + @liveexample{The example shows the serialization of a JSON value to a byte + vector in MessagePack format.,to_msgpack} + + @sa http://msgpack.org + @sa @ref from_msgpack for the analogous deserialization + @sa @ref to_cbor(const basic_json& for the related CBOR format + @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + related UBJSON format + + @since version 2.0.9 + */ + static std::vector to_msgpack(const basic_json& j) + { + std::vector result; + to_msgpack(j, result); + return result; + } + + static void to_msgpack(const basic_json& j, detail::output_adapter o) + { + binary_writer(o).write_msgpack(j); + } + + static void to_msgpack(const basic_json& j, detail::output_adapter o) + { + binary_writer(o).write_msgpack(j); + } + + /*! + @brief create a UBJSON serialization of a given JSON value + + Serializes a given JSON value @a j to a byte vector using the UBJSON + (Universal Binary JSON) serialization format. UBJSON aims to be more compact + than JSON itself, yet more efficient to parse. + + The library uses the following mapping from JSON values types to + UBJSON types according to the UBJSON specification: + + JSON value type | value/range | UBJSON type | marker + --------------- | --------------------------------- | ----------- | ------ + null | `null` | null | `Z` + boolean | `true` | true | `T` + boolean | `false` | false | `F` + number_integer | -9223372036854775808..-2147483649 | int64 | `L` + number_integer | -2147483648..-32769 | int32 | `l` + number_integer | -32768..-129 | int16 | `I` + number_integer | -128..127 | int8 | `i` + number_integer | 128..255 | uint8 | `U` + number_integer | 256..32767 | int16 | `I` + number_integer | 32768..2147483647 | int32 | `l` + number_integer | 2147483648..9223372036854775807 | int64 | `L` + number_unsigned | 0..127 | int8 | `i` + number_unsigned | 128..255 | uint8 | `U` + number_unsigned | 256..32767 | int16 | `I` + number_unsigned | 32768..2147483647 | int32 | `l` + number_unsigned | 2147483648..9223372036854775807 | int64 | `L` + number_unsigned | 2147483649..18446744073709551615 | high-precision | `H` + number_float | *any value* | float64 | `D` + string | *with shortest length indicator* | string | `S` + array | *see notes on optimized format* | array | `[` + object | *see notes on optimized format* | map | `{` + + @note The mapping is **complete** in the sense that any JSON value type + can be converted to a UBJSON value. + + @note The following values can **not** be converted to a UBJSON value: + - strings with more than 9223372036854775807 bytes (theoretical) + + @note The following markers are not used in the conversion: + - `Z`: no-op values are not created. + - `C`: single-byte strings are serialized with `S` markers. + + @note Any UBJSON output created @ref to_ubjson can be successfully parsed + by @ref from_ubjson. + + @note If NaN or Infinity are stored inside a JSON number, they are + serialized properly. This behavior differs from the @ref dump() + function which serializes NaN or Infinity to `null`. + + @note The optimized formats for containers are supported: Parameter + @a use_size adds size information to the beginning of a container and + removes the closing marker. Parameter @a use_type further checks + whether all elements of a container have the same type and adds the + type marker to the beginning of the container. The @a use_type + parameter must only be used together with @a use_size = true. Note + that @a use_size = true alone may result in larger representations - + the benefit of this parameter is that the receiving side is + immediately informed on the number of elements of the container. + + @note If the JSON data contains the binary type, the value stored is a list + of integers, as suggested by the UBJSON documentation. In particular, + this means that serialization and the deserialization of a JSON + containing binary values into UBJSON and back will result in a + different JSON object. + + @param[in] j JSON value to serialize + @param[in] use_size whether to add size annotations to container types + @param[in] use_type whether to add type annotations to container types + (must be combined with @a use_size = true) + @return UBJSON serialization as byte vector + + @complexity Linear in the size of the JSON value @a j. + + @liveexample{The example shows the serialization of a JSON value to a byte + vector in UBJSON format.,to_ubjson} + + @sa http://ubjson.org + @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + analogous deserialization + @sa @ref to_cbor(const basic_json& for the related CBOR format + @sa @ref to_msgpack(const basic_json&) for the related MessagePack format + + @since version 3.1.0 + */ + static std::vector to_ubjson(const basic_json& j, + const bool use_size = false, + const bool use_type = false) + { + std::vector result; + to_ubjson(j, result, use_size, use_type); + return result; + } + + static void to_ubjson(const basic_json& j, detail::output_adapter o, + const bool use_size = false, const bool use_type = false) + { + binary_writer(o).write_ubjson(j, use_size, use_type); + } + + static void to_ubjson(const basic_json& j, detail::output_adapter o, + const bool use_size = false, const bool use_type = false) + { + binary_writer(o).write_ubjson(j, use_size, use_type); + } + + + /*! + @brief Serializes the given JSON object `j` to BSON and returns a vector + containing the corresponding BSON-representation. + + BSON (Binary JSON) is a binary format in which zero or more ordered key/value pairs are + stored as a single entity (a so-called document). + + The library uses the following mapping from JSON values types to BSON types: + + JSON value type | value/range | BSON type | marker + --------------- | --------------------------------- | ----------- | ------ + null | `null` | null | 0x0A + boolean | `true`, `false` | boolean | 0x08 + number_integer | -9223372036854775808..-2147483649 | int64 | 0x12 + number_integer | -2147483648..2147483647 | int32 | 0x10 + number_integer | 2147483648..9223372036854775807 | int64 | 0x12 + number_unsigned | 0..2147483647 | int32 | 0x10 + number_unsigned | 2147483648..9223372036854775807 | int64 | 0x12 + number_unsigned | 9223372036854775808..18446744073709551615| -- | -- + number_float | *any value* | double | 0x01 + string | *any value* | string | 0x02 + array | *any value* | document | 0x04 + object | *any value* | document | 0x03 + binary | *any value* | binary | 0x05 + + @warning The mapping is **incomplete**, since only JSON-objects (and things + contained therein) can be serialized to BSON. + Also, integers larger than 9223372036854775807 cannot be serialized to BSON, + and the keys may not contain U+0000, since they are serialized a + zero-terminated c-strings. + + @throw out_of_range.407 if `j.is_number_unsigned() && j.get() > 9223372036854775807` + @throw out_of_range.409 if a key in `j` contains a NULL (U+0000) + @throw type_error.317 if `!j.is_object()` + + @pre The input `j` is required to be an object: `j.is_object() == true`. + + @note Any BSON output created via @ref to_bson can be successfully parsed + by @ref from_bson. + + @param[in] j JSON value to serialize + @return BSON serialization as byte vector + + @complexity Linear in the size of the JSON value @a j. + + @liveexample{The example shows the serialization of a JSON value to a byte + vector in BSON format.,to_bson} + + @sa http://bsonspec.org/spec.html + @sa @ref from_bson(detail::input_adapter&&, const bool strict) for the + analogous deserialization + @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + related UBJSON format + @sa @ref to_cbor(const basic_json&) for the related CBOR format + @sa @ref to_msgpack(const basic_json&) for the related MessagePack format + */ + static std::vector to_bson(const basic_json& j) + { + std::vector result; + to_bson(j, result); + return result; + } + + /*! + @brief Serializes the given JSON object `j` to BSON and forwards the + corresponding BSON-representation to the given output_adapter `o`. + @param j The JSON object to convert to BSON. + @param o The output adapter that receives the binary BSON representation. + @pre The input `j` shall be an object: `j.is_object() == true` + @sa @ref to_bson(const basic_json&) + */ + static void to_bson(const basic_json& j, detail::output_adapter o) + { + binary_writer(o).write_bson(j); + } + + /*! + @copydoc to_bson(const basic_json&, detail::output_adapter) + */ + static void to_bson(const basic_json& j, detail::output_adapter o) + { + binary_writer(o).write_bson(j); + } + + + /*! + @brief create a JSON value from an input in CBOR format + + Deserializes a given input @a i to a JSON value using the CBOR (Concise + Binary Object Representation) serialization format. + + The library maps CBOR types to JSON value types as follows: + + CBOR type | JSON value type | first byte + ---------------------- | --------------- | ---------- + Integer | number_unsigned | 0x00..0x17 + Unsigned integer | number_unsigned | 0x18 + Unsigned integer | number_unsigned | 0x19 + Unsigned integer | number_unsigned | 0x1A + Unsigned integer | number_unsigned | 0x1B + Negative integer | number_integer | 0x20..0x37 + Negative integer | number_integer | 0x38 + Negative integer | number_integer | 0x39 + Negative integer | number_integer | 0x3A + Negative integer | number_integer | 0x3B + Byte string | binary | 0x40..0x57 + Byte string | binary | 0x58 + Byte string | binary | 0x59 + Byte string | binary | 0x5A + Byte string | binary | 0x5B + UTF-8 string | string | 0x60..0x77 + UTF-8 string | string | 0x78 + UTF-8 string | string | 0x79 + UTF-8 string | string | 0x7A + UTF-8 string | string | 0x7B + UTF-8 string | string | 0x7F + array | array | 0x80..0x97 + array | array | 0x98 + array | array | 0x99 + array | array | 0x9A + array | array | 0x9B + array | array | 0x9F + map | object | 0xA0..0xB7 + map | object | 0xB8 + map | object | 0xB9 + map | object | 0xBA + map | object | 0xBB + map | object | 0xBF + False | `false` | 0xF4 + True | `true` | 0xF5 + Null | `null` | 0xF6 + Half-Precision Float | number_float | 0xF9 + Single-Precision Float | number_float | 0xFA + Double-Precision Float | number_float | 0xFB + + @warning The mapping is **incomplete** in the sense that not all CBOR + types can be converted to a JSON value. The following CBOR types + are not supported and will yield parse errors (parse_error.112): + - date/time (0xC0..0xC1) + - bignum (0xC2..0xC3) + - decimal fraction (0xC4) + - bigfloat (0xC5) + - expected conversions (0xD5..0xD7) + - simple values (0xE0..0xF3, 0xF8) + - undefined (0xF7) + + @warning CBOR allows map keys of any type, whereas JSON only allows + strings as keys in object values. Therefore, CBOR maps with keys + other than UTF-8 strings are rejected (parse_error.113). + + @note Any CBOR output created @ref to_cbor can be successfully parsed by + @ref from_cbor. + + @param[in] i an input in CBOR format convertible to an input adapter + @param[in] strict whether to expect the input to be consumed until EOF + (true by default) + @param[in] allow_exceptions whether to throw exceptions in case of a + parse error (optional, true by default) + @param[in] tag_handler how to treat CBOR tags (optional, error by default) + + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. + + @throw parse_error.110 if the given input ends prematurely or the end of + file was not reached when @a strict was set to true + @throw parse_error.112 if unsupported features from CBOR were + used in the given input @a v or if the input is not valid CBOR + @throw parse_error.113 if a string was expected as map key, but not found + + @complexity Linear in the size of the input @a i. + + @liveexample{The example shows the deserialization of a byte vector in CBOR + format to a JSON value.,from_cbor} + + @sa http://cbor.io + @sa @ref to_cbor(const basic_json&) for the analogous serialization + @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the + related MessagePack format + @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + related UBJSON format + + @since version 2.0.9; parameter @a start_index since 2.1.1; changed to + consume input adapters, removed start_index parameter, and added + @a strict parameter since 3.0.0; added @a allow_exceptions parameter + since 3.2.0; added @a tag_handler parameter since 3.9.0. + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json from_cbor(InputType&& i, + const bool strict = true, + const bool allow_exceptions = true, + const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = detail::input_adapter(std::forward(i)); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + return res ? result : basic_json(value_t::discarded); + } + + /*! + @copydoc from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json from_cbor(IteratorType first, IteratorType last, + const bool strict = true, + const bool allow_exceptions = true, + const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = detail::input_adapter(std::move(first), std::move(last)); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + return res ? result : basic_json(value_t::discarded); + } + + template + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_cbor(ptr, ptr + len)) + static basic_json from_cbor(const T* ptr, std::size_t len, + const bool strict = true, + const bool allow_exceptions = true, + const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) + { + return from_cbor(ptr, ptr + len, strict, allow_exceptions, tag_handler); + } + + + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_cbor(ptr, ptr + len)) + static basic_json from_cbor(detail::span_input_adapter&& i, + const bool strict = true, + const bool allow_exceptions = true, + const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = i.get(); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + return res ? result : basic_json(value_t::discarded); + } + + /*! + @brief create a JSON value from an input in MessagePack format + + Deserializes a given input @a i to a JSON value using the MessagePack + serialization format. + + The library maps MessagePack types to JSON value types as follows: + + MessagePack type | JSON value type | first byte + ---------------- | --------------- | ---------- + positive fixint | number_unsigned | 0x00..0x7F + fixmap | object | 0x80..0x8F + fixarray | array | 0x90..0x9F + fixstr | string | 0xA0..0xBF + nil | `null` | 0xC0 + false | `false` | 0xC2 + true | `true` | 0xC3 + float 32 | number_float | 0xCA + float 64 | number_float | 0xCB + uint 8 | number_unsigned | 0xCC + uint 16 | number_unsigned | 0xCD + uint 32 | number_unsigned | 0xCE + uint 64 | number_unsigned | 0xCF + int 8 | number_integer | 0xD0 + int 16 | number_integer | 0xD1 + int 32 | number_integer | 0xD2 + int 64 | number_integer | 0xD3 + str 8 | string | 0xD9 + str 16 | string | 0xDA + str 32 | string | 0xDB + array 16 | array | 0xDC + array 32 | array | 0xDD + map 16 | object | 0xDE + map 32 | object | 0xDF + bin 8 | binary | 0xC4 + bin 16 | binary | 0xC5 + bin 32 | binary | 0xC6 + ext 8 | binary | 0xC7 + ext 16 | binary | 0xC8 + ext 32 | binary | 0xC9 + fixext 1 | binary | 0xD4 + fixext 2 | binary | 0xD5 + fixext 4 | binary | 0xD6 + fixext 8 | binary | 0xD7 + fixext 16 | binary | 0xD8 + negative fixint | number_integer | 0xE0-0xFF + + @note Any MessagePack output created @ref to_msgpack can be successfully + parsed by @ref from_msgpack. + + @param[in] i an input in MessagePack format convertible to an input + adapter + @param[in] strict whether to expect the input to be consumed until EOF + (true by default) + @param[in] allow_exceptions whether to throw exceptions in case of a + parse error (optional, true by default) + + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. + + @throw parse_error.110 if the given input ends prematurely or the end of + file was not reached when @a strict was set to true + @throw parse_error.112 if unsupported features from MessagePack were + used in the given input @a i or if the input is not valid MessagePack + @throw parse_error.113 if a string was expected as map key, but not found + + @complexity Linear in the size of the input @a i. + + @liveexample{The example shows the deserialization of a byte vector in + MessagePack format to a JSON value.,from_msgpack} + + @sa http://msgpack.org + @sa @ref to_msgpack(const basic_json&) for the analogous serialization + @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + related CBOR format + @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for + the related UBJSON format + @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for + the related BSON format + + @since version 2.0.9; parameter @a start_index since 2.1.1; changed to + consume input adapters, removed start_index parameter, and added + @a strict parameter since 3.0.0; added @a allow_exceptions parameter + since 3.2.0 + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json from_msgpack(InputType&& i, + const bool strict = true, + const bool allow_exceptions = true) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = detail::input_adapter(std::forward(i)); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict); + return res ? result : basic_json(value_t::discarded); + } + + /*! + @copydoc from_msgpack(detail::input_adapter&&, const bool, const bool) + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json from_msgpack(IteratorType first, IteratorType last, + const bool strict = true, + const bool allow_exceptions = true) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = detail::input_adapter(std::move(first), std::move(last)); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict); + return res ? result : basic_json(value_t::discarded); + } + + + template + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_msgpack(ptr, ptr + len)) + static basic_json from_msgpack(const T* ptr, std::size_t len, + const bool strict = true, + const bool allow_exceptions = true) + { + return from_msgpack(ptr, ptr + len, strict, allow_exceptions); + } + + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_msgpack(ptr, ptr + len)) + static basic_json from_msgpack(detail::span_input_adapter&& i, + const bool strict = true, + const bool allow_exceptions = true) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = i.get(); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict); + return res ? result : basic_json(value_t::discarded); + } + + + /*! + @brief create a JSON value from an input in UBJSON format + + Deserializes a given input @a i to a JSON value using the UBJSON (Universal + Binary JSON) serialization format. + + The library maps UBJSON types to JSON value types as follows: + + UBJSON type | JSON value type | marker + ----------- | --------------------------------------- | ------ + no-op | *no value, next value is read* | `N` + null | `null` | `Z` + false | `false` | `F` + true | `true` | `T` + float32 | number_float | `d` + float64 | number_float | `D` + uint8 | number_unsigned | `U` + int8 | number_integer | `i` + int16 | number_integer | `I` + int32 | number_integer | `l` + int64 | number_integer | `L` + high-precision number | number_integer, number_unsigned, or number_float - depends on number string | 'H' + string | string | `S` + char | string | `C` + array | array (optimized values are supported) | `[` + object | object (optimized values are supported) | `{` + + @note The mapping is **complete** in the sense that any UBJSON value can + be converted to a JSON value. + + @param[in] i an input in UBJSON format convertible to an input adapter + @param[in] strict whether to expect the input to be consumed until EOF + (true by default) + @param[in] allow_exceptions whether to throw exceptions in case of a + parse error (optional, true by default) + + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. + + @throw parse_error.110 if the given input ends prematurely or the end of + file was not reached when @a strict was set to true + @throw parse_error.112 if a parse error occurs + @throw parse_error.113 if a string could not be parsed successfully + + @complexity Linear in the size of the input @a i. + + @liveexample{The example shows the deserialization of a byte vector in + UBJSON format to a JSON value.,from_ubjson} + + @sa http://ubjson.org + @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the + analogous serialization + @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + related CBOR format + @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + the related MessagePack format + @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for + the related BSON format + + @since version 3.1.0; added @a allow_exceptions parameter since 3.2.0 + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json from_ubjson(InputType&& i, + const bool strict = true, + const bool allow_exceptions = true) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = detail::input_adapter(std::forward(i)); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict); + return res ? result : basic_json(value_t::discarded); + } + + /*! + @copydoc from_ubjson(detail::input_adapter&&, const bool, const bool) + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json from_ubjson(IteratorType first, IteratorType last, + const bool strict = true, + const bool allow_exceptions = true) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = detail::input_adapter(std::move(first), std::move(last)); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict); + return res ? result : basic_json(value_t::discarded); + } + + template + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_ubjson(ptr, ptr + len)) + static basic_json from_ubjson(const T* ptr, std::size_t len, + const bool strict = true, + const bool allow_exceptions = true) + { + return from_ubjson(ptr, ptr + len, strict, allow_exceptions); + } + + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_ubjson(ptr, ptr + len)) + static basic_json from_ubjson(detail::span_input_adapter&& i, + const bool strict = true, + const bool allow_exceptions = true) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = i.get(); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict); + return res ? result : basic_json(value_t::discarded); + } + + + /*! + @brief Create a JSON value from an input in BSON format + + Deserializes a given input @a i to a JSON value using the BSON (Binary JSON) + serialization format. + + The library maps BSON record types to JSON value types as follows: + + BSON type | BSON marker byte | JSON value type + --------------- | ---------------- | --------------------------- + double | 0x01 | number_float + string | 0x02 | string + document | 0x03 | object + array | 0x04 | array + binary | 0x05 | binary + undefined | 0x06 | still unsupported + ObjectId | 0x07 | still unsupported + boolean | 0x08 | boolean + UTC Date-Time | 0x09 | still unsupported + null | 0x0A | null + Regular Expr. | 0x0B | still unsupported + DB Pointer | 0x0C | still unsupported + JavaScript Code | 0x0D | still unsupported + Symbol | 0x0E | still unsupported + JavaScript Code | 0x0F | still unsupported + int32 | 0x10 | number_integer + Timestamp | 0x11 | still unsupported + 128-bit decimal float | 0x13 | still unsupported + Max Key | 0x7F | still unsupported + Min Key | 0xFF | still unsupported + + @warning The mapping is **incomplete**. The unsupported mappings + are indicated in the table above. + + @param[in] i an input in BSON format convertible to an input adapter + @param[in] strict whether to expect the input to be consumed until EOF + (true by default) + @param[in] allow_exceptions whether to throw exceptions in case of a + parse error (optional, true by default) + + @return deserialized JSON value; in case of a parse error and + @a allow_exceptions set to `false`, the return value will be + value_t::discarded. + + @throw parse_error.114 if an unsupported BSON record type is encountered + + @complexity Linear in the size of the input @a i. + + @liveexample{The example shows the deserialization of a byte vector in + BSON format to a JSON value.,from_bson} + + @sa http://bsonspec.org/spec.html + @sa @ref to_bson(const basic_json&) for the analogous serialization + @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the + related CBOR format + @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for + the related MessagePack format + @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the + related UBJSON format + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json from_bson(InputType&& i, + const bool strict = true, + const bool allow_exceptions = true) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = detail::input_adapter(std::forward(i)); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict); + return res ? result : basic_json(value_t::discarded); + } + + /*! + @copydoc from_bson(detail::input_adapter&&, const bool, const bool) + */ + template + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json from_bson(IteratorType first, IteratorType last, + const bool strict = true, + const bool allow_exceptions = true) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = detail::input_adapter(std::move(first), std::move(last)); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict); + return res ? result : basic_json(value_t::discarded); + } + + template + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_bson(ptr, ptr + len)) + static basic_json from_bson(const T* ptr, std::size_t len, + const bool strict = true, + const bool allow_exceptions = true) + { + return from_bson(ptr, ptr + len, strict, allow_exceptions); + } + + JSON_HEDLEY_WARN_UNUSED_RESULT + JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_bson(ptr, ptr + len)) + static basic_json from_bson(detail::span_input_adapter&& i, + const bool strict = true, + const bool allow_exceptions = true) + { + basic_json result; + detail::json_sax_dom_parser sdp(result, allow_exceptions); + auto ia = i.get(); + const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict); + return res ? result : basic_json(value_t::discarded); + } + /// @} + + ////////////////////////// + // JSON Pointer support // + ////////////////////////// + + /// @name JSON Pointer functions + /// @{ + + /*! + @brief access specified element via JSON Pointer + + Uses a JSON pointer to retrieve a reference to the respective JSON value. + No bound checking is performed. Similar to @ref operator[](const typename + object_t::key_type&), `null` values are created in arrays and objects if + necessary. + + In particular: + - If the JSON pointer points to an object key that does not exist, it + is created an filled with a `null` value before a reference to it + is returned. + - If the JSON pointer points to an array index that does not exist, it + is created an filled with a `null` value before a reference to it + is returned. All indices between the current maximum and the given + index are also filled with `null`. + - The special value `-` is treated as a synonym for the index past the + end. + + @param[in] ptr a JSON pointer + + @return reference to the element pointed to by @a ptr + + @complexity Constant. + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.404 if the JSON pointer can not be resolved + + @liveexample{The behavior is shown in the example.,operatorjson_pointer} + + @since version 2.0.0 + */ + reference operator[](const json_pointer& ptr) + { + return ptr.get_unchecked(this); + } + + /*! + @brief access specified element via JSON Pointer + + Uses a JSON pointer to retrieve a reference to the respective JSON value. + No bound checking is performed. The function does not change the JSON + value; no `null` values are created. In particular, the special value + `-` yields an exception. + + @param[in] ptr JSON pointer to the desired element + + @return const reference to the element pointed to by @a ptr + + @complexity Constant. + + @throw parse_error.106 if an array index begins with '0' + @throw parse_error.109 if an array index was not a number + @throw out_of_range.402 if the array index '-' is used + @throw out_of_range.404 if the JSON pointer can not be resolved + + @liveexample{The behavior is shown in the example.,operatorjson_pointer_const} + + @since version 2.0.0 + */ + const_reference operator[](const json_pointer& ptr) const + { + return ptr.get_unchecked(this); + } + + /*! + @brief access specified element via JSON Pointer + + Returns a reference to the element at with specified JSON pointer @a ptr, + with bounds checking. + + @param[in] ptr JSON pointer to the desired element + + @return reference to the element pointed to by @a ptr + + @throw parse_error.106 if an array index in the passed JSON pointer @a ptr + begins with '0'. See example below. + + @throw parse_error.109 if an array index in the passed JSON pointer @a ptr + is not a number. See example below. + + @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr + is out of range. See example below. + + @throw out_of_range.402 if the array index '-' is used in the passed JSON + pointer @a ptr. As `at` provides checked access (and no elements are + implicitly inserted), the index '-' is always invalid. See example below. + + @throw out_of_range.403 if the JSON pointer describes a key of an object + which cannot be found. See example below. + + @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved. + See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @since version 2.0.0 + + @liveexample{The behavior is shown in the example.,at_json_pointer} + */ + reference at(const json_pointer& ptr) + { + return ptr.get_checked(this); + } + + /*! + @brief access specified element via JSON Pointer + + Returns a const reference to the element at with specified JSON pointer @a + ptr, with bounds checking. + + @param[in] ptr JSON pointer to the desired element + + @return reference to the element pointed to by @a ptr + + @throw parse_error.106 if an array index in the passed JSON pointer @a ptr + begins with '0'. See example below. + + @throw parse_error.109 if an array index in the passed JSON pointer @a ptr + is not a number. See example below. + + @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr + is out of range. See example below. + + @throw out_of_range.402 if the array index '-' is used in the passed JSON + pointer @a ptr. As `at` provides checked access (and no elements are + implicitly inserted), the index '-' is always invalid. See example below. + + @throw out_of_range.403 if the JSON pointer describes a key of an object + which cannot be found. See example below. + + @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved. + See example below. + + @exceptionsafety Strong guarantee: if an exception is thrown, there are no + changes in the JSON value. + + @complexity Constant. + + @since version 2.0.0 + + @liveexample{The behavior is shown in the example.,at_json_pointer_const} + */ + const_reference at(const json_pointer& ptr) const + { + return ptr.get_checked(this); + } + + /*! + @brief return flattened JSON value + + The function creates a JSON object whose keys are JSON pointers (see [RFC + 6901](https://tools.ietf.org/html/rfc6901)) and whose values are all + primitive. The original JSON value can be restored using the @ref + unflatten() function. + + @return an object that maps JSON pointers to primitive values + + @note Empty objects and arrays are flattened to `null` and will not be + reconstructed correctly by the @ref unflatten() function. + + @complexity Linear in the size the JSON value. + + @liveexample{The following code shows how a JSON object is flattened to an + object whose keys consist of JSON pointers.,flatten} + + @sa @ref unflatten() for the reverse function + + @since version 2.0.0 + */ + basic_json flatten() const + { + basic_json result(value_t::object); + json_pointer::flatten("", *this, result); + return result; + } + + /*! + @brief unflatten a previously flattened JSON value + + The function restores the arbitrary nesting of a JSON value that has been + flattened before using the @ref flatten() function. The JSON value must + meet certain constraints: + 1. The value must be an object. + 2. The keys must be JSON pointers (see + [RFC 6901](https://tools.ietf.org/html/rfc6901)) + 3. The mapped values must be primitive JSON types. + + @return the original JSON from a flattened version + + @note Empty objects and arrays are flattened by @ref flatten() to `null` + values and can not unflattened to their original type. Apart from + this example, for a JSON value `j`, the following is always true: + `j == j.flatten().unflatten()`. + + @complexity Linear in the size the JSON value. + + @throw type_error.314 if value is not an object + @throw type_error.315 if object values are not primitive + + @liveexample{The following code shows how a flattened JSON object is + unflattened into the original nested JSON object.,unflatten} + + @sa @ref flatten() for the reverse function + + @since version 2.0.0 + */ + basic_json unflatten() const + { + return json_pointer::unflatten(*this); + } + + /// @} + + ////////////////////////// + // JSON Patch functions // + ////////////////////////// + + /// @name JSON Patch functions + /// @{ + + /*! + @brief applies a JSON patch + + [JSON Patch](http://jsonpatch.com) defines a JSON document structure for + expressing a sequence of operations to apply to a JSON) document. With + this function, a JSON Patch is applied to the current JSON value by + executing all operations from the patch. + + @param[in] json_patch JSON patch document + @return patched document + + @note The application of a patch is atomic: Either all operations succeed + and the patched document is returned or an exception is thrown. In + any case, the original value is not changed: the patch is applied + to a copy of the value. + + @throw parse_error.104 if the JSON patch does not consist of an array of + objects + + @throw parse_error.105 if the JSON patch is malformed (e.g., mandatory + attributes are missing); example: `"operation add must have member path"` + + @throw out_of_range.401 if an array index is out of range. + + @throw out_of_range.403 if a JSON pointer inside the patch could not be + resolved successfully in the current JSON value; example: `"key baz not + found"` + + @throw out_of_range.405 if JSON pointer has no parent ("add", "remove", + "move") + + @throw other_error.501 if "test" operation was unsuccessful + + @complexity Linear in the size of the JSON value and the length of the + JSON patch. As usually only a fraction of the JSON value is affected by + the patch, the complexity can usually be neglected. + + @liveexample{The following code shows how a JSON patch is applied to a + value.,patch} + + @sa @ref diff -- create a JSON patch by comparing two JSON values + + @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) + @sa [RFC 6901 (JSON Pointer)](https://tools.ietf.org/html/rfc6901) + + @since version 2.0.0 + */ + basic_json patch(const basic_json& json_patch) const + { + // make a working copy to apply the patch to + basic_json result = *this; + + // the valid JSON Patch operations + enum class patch_operations {add, remove, replace, move, copy, test, invalid}; + + const auto get_op = [](const std::string & op) + { + if (op == "add") + { + return patch_operations::add; + } + if (op == "remove") + { + return patch_operations::remove; + } + if (op == "replace") + { + return patch_operations::replace; + } + if (op == "move") + { + return patch_operations::move; + } + if (op == "copy") + { + return patch_operations::copy; + } + if (op == "test") + { + return patch_operations::test; + } + + return patch_operations::invalid; + }; + + // wrapper for "add" operation; add value at ptr + const auto operation_add = [&result](json_pointer & ptr, basic_json val) + { + // adding to the root of the target document means replacing it + if (ptr.empty()) + { + result = val; + return; + } + + // make sure the top element of the pointer exists + json_pointer top_pointer = ptr.top(); + if (top_pointer != ptr) + { + result.at(top_pointer); + } + + // get reference to parent of JSON pointer ptr + const auto last_path = ptr.back(); + ptr.pop_back(); + basic_json& parent = result[ptr]; + + switch (parent.m_type) + { + case value_t::null: + case value_t::object: + { + // use operator[] to add value + parent[last_path] = val; + break; + } + + case value_t::array: + { + if (last_path == "-") + { + // special case: append to back + parent.push_back(val); + } + else + { + const auto idx = json_pointer::array_index(last_path); + if (JSON_HEDLEY_UNLIKELY(idx > parent.size())) + { + // avoid undefined behavior + JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range", parent)); + } + + // default case: insert add offset + parent.insert(parent.begin() + static_cast(idx), val); + } + break; + } + + // if there exists a parent it cannot be primitive + default: // LCOV_EXCL_LINE + JSON_ASSERT(false); // LCOV_EXCL_LINE + } + }; + + // wrapper for "remove" operation; remove value at ptr + const auto operation_remove = [this, &result](json_pointer & ptr) + { + // get reference to parent of JSON pointer ptr + const auto last_path = ptr.back(); + ptr.pop_back(); + basic_json& parent = result.at(ptr); + + // remove child + if (parent.is_object()) + { + // perform range check + auto it = parent.find(last_path); + if (JSON_HEDLEY_LIKELY(it != parent.end())) + { + parent.erase(it); + } + else + { + JSON_THROW(out_of_range::create(403, "key '" + last_path + "' not found", *this)); + } + } + else if (parent.is_array()) + { + // note erase performs range check + parent.erase(json_pointer::array_index(last_path)); + } + }; + + // type check: top level value must be an array + if (JSON_HEDLEY_UNLIKELY(!json_patch.is_array())) + { + JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects", json_patch)); + } + + // iterate and apply the operations + for (const auto& val : json_patch) + { + // wrapper to get a value for an operation + const auto get_value = [&val](const std::string & op, + const std::string & member, + bool string_type) -> basic_json & + { + // find value + auto it = val.m_value.object->find(member); + + // context-sensitive error message + const auto error_msg = (op == "op") ? "operation" : "operation '" + op + "'"; + + // check if desired value is present + if (JSON_HEDLEY_UNLIKELY(it == val.m_value.object->end())) + { + JSON_THROW(parse_error::create(105, 0, error_msg + " must have member '" + member + "'", val)); + } + + // check if result is of type string + if (JSON_HEDLEY_UNLIKELY(string_type && !it->second.is_string())) + { + JSON_THROW(parse_error::create(105, 0, error_msg + " must have string member '" + member + "'", val)); + } + + // no error: return value + return it->second; + }; + + // type check: every element of the array must be an object + if (JSON_HEDLEY_UNLIKELY(!val.is_object())) + { + JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects", val)); + } + + // collect mandatory members + const auto op = get_value("op", "op", true).template get(); + const auto path = get_value(op, "path", true).template get(); + json_pointer ptr(path); + + switch (get_op(op)) + { + case patch_operations::add: + { + operation_add(ptr, get_value("add", "value", false)); + break; + } + + case patch_operations::remove: + { + operation_remove(ptr); + break; + } + + case patch_operations::replace: + { + // the "path" location must exist - use at() + result.at(ptr) = get_value("replace", "value", false); + break; + } + + case patch_operations::move: + { + const auto from_path = get_value("move", "from", true).template get(); + json_pointer from_ptr(from_path); + + // the "from" location must exist - use at() + basic_json v = result.at(from_ptr); + + // The move operation is functionally identical to a + // "remove" operation on the "from" location, followed + // immediately by an "add" operation at the target + // location with the value that was just removed. + operation_remove(from_ptr); + operation_add(ptr, v); + break; + } + + case patch_operations::copy: + { + const auto from_path = get_value("copy", "from", true).template get(); + const json_pointer from_ptr(from_path); + + // the "from" location must exist - use at() + basic_json v = result.at(from_ptr); + + // The copy is functionally identical to an "add" + // operation at the target location using the value + // specified in the "from" member. + operation_add(ptr, v); + break; + } + + case patch_operations::test: + { + bool success = false; + JSON_TRY + { + // check if "value" matches the one at "path" + // the "path" location must exist - use at() + success = (result.at(ptr) == get_value("test", "value", false)); + } + JSON_INTERNAL_CATCH (out_of_range&) + { + // ignore out of range errors: success remains false + } + + // throw an exception if test fails + if (JSON_HEDLEY_UNLIKELY(!success)) + { + JSON_THROW(other_error::create(501, "unsuccessful: " + val.dump(), val)); + } + + break; + } + + default: + { + // op must be "add", "remove", "replace", "move", "copy", or + // "test" + JSON_THROW(parse_error::create(105, 0, "operation value '" + op + "' is invalid", val)); + } + } + } + + return result; + } + + /*! + @brief creates a diff as a JSON patch + + Creates a [JSON Patch](http://jsonpatch.com) so that value @a source can + be changed into the value @a target by calling @ref patch function. + + @invariant For two JSON values @a source and @a target, the following code + yields always `true`: + @code {.cpp} + source.patch(diff(source, target)) == target; + @endcode + + @note Currently, only `remove`, `add`, and `replace` operations are + generated. + + @param[in] source JSON value to compare from + @param[in] target JSON value to compare against + @param[in] path helper value to create JSON pointers + + @return a JSON patch to convert the @a source to @a target + + @complexity Linear in the lengths of @a source and @a target. + + @liveexample{The following code shows how a JSON patch is created as a + diff for two JSON values.,diff} + + @sa @ref patch -- apply a JSON patch + @sa @ref merge_patch -- apply a JSON Merge Patch + + @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) + + @since version 2.0.0 + */ + JSON_HEDLEY_WARN_UNUSED_RESULT + static basic_json diff(const basic_json& source, const basic_json& target, + const std::string& path = "") + { + // the patch + basic_json result(value_t::array); + + // if the values are the same, return empty patch + if (source == target) + { + return result; + } + + if (source.type() != target.type()) + { + // different types: replace value + result.push_back( + { + {"op", "replace"}, {"path", path}, {"value", target} + }); + return result; + } + + switch (source.type()) + { + case value_t::array: + { + // first pass: traverse common elements + std::size_t i = 0; + while (i < source.size() && i < target.size()) + { + // recursive call to compare array values at index i + auto temp_diff = diff(source[i], target[i], path + "/" + std::to_string(i)); + result.insert(result.end(), temp_diff.begin(), temp_diff.end()); + ++i; + } + + // i now reached the end of at least one array + // in a second pass, traverse the remaining elements + + // remove my remaining elements + const auto end_index = static_cast(result.size()); + while (i < source.size()) + { + // add operations in reverse order to avoid invalid + // indices + result.insert(result.begin() + end_index, object( + { + {"op", "remove"}, + {"path", path + "/" + std::to_string(i)} + })); + ++i; + } + + // add other remaining elements + while (i < target.size()) + { + result.push_back( + { + {"op", "add"}, + {"path", path + "/-"}, + {"value", target[i]} + }); + ++i; + } + + break; + } + + case value_t::object: + { + // first pass: traverse this object's elements + for (auto it = source.cbegin(); it != source.cend(); ++it) + { + // escape the key name to be used in a JSON patch + const auto key = detail::escape(it.key()); + + if (target.find(it.key()) != target.end()) + { + // recursive call to compare object values at key it + auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key); + result.insert(result.end(), temp_diff.begin(), temp_diff.end()); + } + else + { + // found a key that is not in o -> remove it + result.push_back(object( + { + {"op", "remove"}, {"path", path + "/" + key} + })); + } + } + + // second pass: traverse other object's elements + for (auto it = target.cbegin(); it != target.cend(); ++it) + { + if (source.find(it.key()) == source.end()) + { + // found a key that is not in this -> add it + const auto key = detail::escape(it.key()); + result.push_back( + { + {"op", "add"}, {"path", path + "/" + key}, + {"value", it.value()} + }); + } + } + + break; + } + + default: + { + // both primitive type: replace value + result.push_back( + { + {"op", "replace"}, {"path", path}, {"value", target} + }); + break; + } + } + + return result; + } + + /// @} + + //////////////////////////////// + // JSON Merge Patch functions // + //////////////////////////////// + + /// @name JSON Merge Patch functions + /// @{ + + /*! + @brief applies a JSON Merge Patch + + The merge patch format is primarily intended for use with the HTTP PATCH + method as a means of describing a set of modifications to a target + resource's content. This function applies a merge patch to the current + JSON value. + + The function implements the following algorithm from Section 2 of + [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396): + + ``` + define MergePatch(Target, Patch): + if Patch is an Object: + if Target is not an Object: + Target = {} // Ignore the contents and set it to an empty Object + for each Name/Value pair in Patch: + if Value is null: + if Name exists in Target: + remove the Name/Value pair from Target + else: + Target[Name] = MergePatch(Target[Name], Value) + return Target + else: + return Patch + ``` + + Thereby, `Target` is the current object; that is, the patch is applied to + the current value. + + @param[in] apply_patch the patch to apply + + @complexity Linear in the lengths of @a patch. + + @liveexample{The following code shows how a JSON Merge Patch is applied to + a JSON document.,merge_patch} + + @sa @ref patch -- apply a JSON patch + @sa [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396) + + @since version 3.0.0 + */ + void merge_patch(const basic_json& apply_patch) + { + if (apply_patch.is_object()) + { + if (!is_object()) + { + *this = object(); + } + for (auto it = apply_patch.begin(); it != apply_patch.end(); ++it) + { + if (it.value().is_null()) + { + erase(it.key()); + } + else + { + operator[](it.key()).merge_patch(it.value()); + } + } + } + else + { + *this = apply_patch; + } + } + + /// @} +}; + +/*! +@brief user-defined to_string function for JSON values + +This function implements a user-defined to_string for JSON objects. + +@param[in] j a JSON object +@return a std::string object +*/ + +NLOHMANN_BASIC_JSON_TPL_DECLARATION +std::string to_string(const NLOHMANN_BASIC_JSON_TPL& j) +{ + return j.dump(); +} +} // namespace nlohmann + +/////////////////////// +// nonmember support // +/////////////////////// + +// specialization of std::swap, and std::hash +namespace std +{ + +/// hash value for JSON objects +template<> +struct hash +{ + /*! + @brief return a hash value for a JSON object + + @since version 1.0.0 + */ + std::size_t operator()(const nlohmann::json& j) const + { + return nlohmann::detail::hash(j); + } +}; + +/// specialization for std::less +/// @note: do not remove the space after '<', +/// see https://github.com/nlohmann/json/pull/679 +template<> +struct less<::nlohmann::detail::value_t> +{ + /*! + @brief compare two value_t enum values + @since version 3.0.0 + */ + bool operator()(nlohmann::detail::value_t lhs, + nlohmann::detail::value_t rhs) const noexcept + { + return nlohmann::detail::operator<(lhs, rhs); + } +}; + +// C++20 prohibit function specialization in the std namespace. +#ifndef JSON_HAS_CPP_20 + +/*! +@brief exchanges the values of two JSON objects + +@since version 1.0.0 +*/ +template<> +inline void swap(nlohmann::json& j1, nlohmann::json& j2) noexcept( + is_nothrow_move_constructible::value&& + is_nothrow_move_assignable::value + ) +{ + j1.swap(j2); +} + +#endif + +} // namespace std + +/*! +@brief user-defined string literal for JSON values + +This operator implements a user-defined string literal for JSON objects. It +can be used by adding `"_json"` to a string literal and returns a JSON object +if no parse error occurred. + +@param[in] s a string representation of a JSON object +@param[in] n the length of string @a s +@return a JSON object + +@since version 1.0.0 +*/ +JSON_HEDLEY_NON_NULL(1) +inline nlohmann::json operator "" _json(const char* s, std::size_t n) +{ + return nlohmann::json::parse(s, s + n); +} + +/*! +@brief user-defined string literal for JSON pointer + +This operator implements a user-defined string literal for JSON Pointers. It +can be used by adding `"_json_pointer"` to a string literal and returns a JSON pointer +object if no parse error occurred. + +@param[in] s a string representation of a JSON Pointer +@param[in] n the length of string @a s +@return a JSON pointer object + +@since version 2.0.0 +*/ +JSON_HEDLEY_NON_NULL(1) +inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std::size_t n) +{ + return nlohmann::json::json_pointer(std::string(s, n)); +} + +// #include + + +// restore GCC/clang diagnostic settings +#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) + #pragma GCC diagnostic pop +#endif +#if defined(__clang__) + #pragma GCC diagnostic pop +#endif + +// clean up +#undef JSON_ASSERT +#undef JSON_INTERNAL_CATCH +#undef JSON_CATCH +#undef JSON_THROW +#undef JSON_TRY +#undef JSON_PRIVATE_UNLESS_TESTED +#undef JSON_HAS_CPP_14 +#undef JSON_HAS_CPP_17 +#undef NLOHMANN_BASIC_JSON_TPL_DECLARATION +#undef NLOHMANN_BASIC_JSON_TPL +#undef JSON_EXPLICIT + +// #include +#undef JSON_HEDLEY_ALWAYS_INLINE +#undef JSON_HEDLEY_ARM_VERSION +#undef JSON_HEDLEY_ARM_VERSION_CHECK +#undef JSON_HEDLEY_ARRAY_PARAM +#undef JSON_HEDLEY_ASSUME +#undef JSON_HEDLEY_BEGIN_C_DECLS +#undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE +#undef JSON_HEDLEY_CLANG_HAS_BUILTIN +#undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE +#undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE +#undef JSON_HEDLEY_CLANG_HAS_EXTENSION +#undef JSON_HEDLEY_CLANG_HAS_FEATURE +#undef JSON_HEDLEY_CLANG_HAS_WARNING +#undef JSON_HEDLEY_COMPCERT_VERSION +#undef JSON_HEDLEY_COMPCERT_VERSION_CHECK +#undef JSON_HEDLEY_CONCAT +#undef JSON_HEDLEY_CONCAT3 +#undef JSON_HEDLEY_CONCAT3_EX +#undef JSON_HEDLEY_CONCAT_EX +#undef JSON_HEDLEY_CONST +#undef JSON_HEDLEY_CONSTEXPR +#undef JSON_HEDLEY_CONST_CAST +#undef JSON_HEDLEY_CPP_CAST +#undef JSON_HEDLEY_CRAY_VERSION +#undef JSON_HEDLEY_CRAY_VERSION_CHECK +#undef JSON_HEDLEY_C_DECL +#undef JSON_HEDLEY_DEPRECATED +#undef JSON_HEDLEY_DEPRECATED_FOR +#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL +#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ +#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED +#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES +#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS +#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION +#undef JSON_HEDLEY_DIAGNOSTIC_POP +#undef JSON_HEDLEY_DIAGNOSTIC_PUSH +#undef JSON_HEDLEY_DMC_VERSION +#undef JSON_HEDLEY_DMC_VERSION_CHECK +#undef JSON_HEDLEY_EMPTY_BASES +#undef JSON_HEDLEY_EMSCRIPTEN_VERSION +#undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK +#undef JSON_HEDLEY_END_C_DECLS +#undef JSON_HEDLEY_FLAGS +#undef JSON_HEDLEY_FLAGS_CAST +#undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE +#undef JSON_HEDLEY_GCC_HAS_BUILTIN +#undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE +#undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE +#undef JSON_HEDLEY_GCC_HAS_EXTENSION +#undef JSON_HEDLEY_GCC_HAS_FEATURE +#undef JSON_HEDLEY_GCC_HAS_WARNING +#undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK +#undef JSON_HEDLEY_GCC_VERSION +#undef JSON_HEDLEY_GCC_VERSION_CHECK +#undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE +#undef JSON_HEDLEY_GNUC_HAS_BUILTIN +#undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE +#undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE +#undef JSON_HEDLEY_GNUC_HAS_EXTENSION +#undef JSON_HEDLEY_GNUC_HAS_FEATURE +#undef JSON_HEDLEY_GNUC_HAS_WARNING +#undef JSON_HEDLEY_GNUC_VERSION +#undef JSON_HEDLEY_GNUC_VERSION_CHECK +#undef JSON_HEDLEY_HAS_ATTRIBUTE +#undef JSON_HEDLEY_HAS_BUILTIN +#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE +#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS +#undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE +#undef JSON_HEDLEY_HAS_EXTENSION +#undef JSON_HEDLEY_HAS_FEATURE +#undef JSON_HEDLEY_HAS_WARNING +#undef JSON_HEDLEY_IAR_VERSION +#undef JSON_HEDLEY_IAR_VERSION_CHECK +#undef JSON_HEDLEY_IBM_VERSION +#undef JSON_HEDLEY_IBM_VERSION_CHECK +#undef JSON_HEDLEY_IMPORT +#undef JSON_HEDLEY_INLINE +#undef JSON_HEDLEY_INTEL_CL_VERSION +#undef JSON_HEDLEY_INTEL_CL_VERSION_CHECK +#undef JSON_HEDLEY_INTEL_VERSION +#undef JSON_HEDLEY_INTEL_VERSION_CHECK +#undef JSON_HEDLEY_IS_CONSTANT +#undef JSON_HEDLEY_IS_CONSTEXPR_ +#undef JSON_HEDLEY_LIKELY +#undef JSON_HEDLEY_MALLOC +#undef JSON_HEDLEY_MCST_LCC_VERSION +#undef JSON_HEDLEY_MCST_LCC_VERSION_CHECK +#undef JSON_HEDLEY_MESSAGE +#undef JSON_HEDLEY_MSVC_VERSION +#undef JSON_HEDLEY_MSVC_VERSION_CHECK +#undef JSON_HEDLEY_NEVER_INLINE +#undef JSON_HEDLEY_NON_NULL +#undef JSON_HEDLEY_NO_ESCAPE +#undef JSON_HEDLEY_NO_RETURN +#undef JSON_HEDLEY_NO_THROW +#undef JSON_HEDLEY_NULL +#undef JSON_HEDLEY_PELLES_VERSION +#undef JSON_HEDLEY_PELLES_VERSION_CHECK +#undef JSON_HEDLEY_PGI_VERSION +#undef JSON_HEDLEY_PGI_VERSION_CHECK +#undef JSON_HEDLEY_PREDICT +#undef JSON_HEDLEY_PRINTF_FORMAT +#undef JSON_HEDLEY_PRIVATE +#undef JSON_HEDLEY_PUBLIC +#undef JSON_HEDLEY_PURE +#undef JSON_HEDLEY_REINTERPRET_CAST +#undef JSON_HEDLEY_REQUIRE +#undef JSON_HEDLEY_REQUIRE_CONSTEXPR +#undef JSON_HEDLEY_REQUIRE_MSG +#undef JSON_HEDLEY_RESTRICT +#undef JSON_HEDLEY_RETURNS_NON_NULL +#undef JSON_HEDLEY_SENTINEL +#undef JSON_HEDLEY_STATIC_ASSERT +#undef JSON_HEDLEY_STATIC_CAST +#undef JSON_HEDLEY_STRINGIFY +#undef JSON_HEDLEY_STRINGIFY_EX +#undef JSON_HEDLEY_SUNPRO_VERSION +#undef JSON_HEDLEY_SUNPRO_VERSION_CHECK +#undef JSON_HEDLEY_TINYC_VERSION +#undef JSON_HEDLEY_TINYC_VERSION_CHECK +#undef JSON_HEDLEY_TI_ARMCL_VERSION +#undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK +#undef JSON_HEDLEY_TI_CL2000_VERSION +#undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK +#undef JSON_HEDLEY_TI_CL430_VERSION +#undef JSON_HEDLEY_TI_CL430_VERSION_CHECK +#undef JSON_HEDLEY_TI_CL6X_VERSION +#undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK +#undef JSON_HEDLEY_TI_CL7X_VERSION +#undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK +#undef JSON_HEDLEY_TI_CLPRU_VERSION +#undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK +#undef JSON_HEDLEY_TI_VERSION +#undef JSON_HEDLEY_TI_VERSION_CHECK +#undef JSON_HEDLEY_UNAVAILABLE +#undef JSON_HEDLEY_UNLIKELY +#undef JSON_HEDLEY_UNPREDICTABLE +#undef JSON_HEDLEY_UNREACHABLE +#undef JSON_HEDLEY_UNREACHABLE_RETURN +#undef JSON_HEDLEY_VERSION +#undef JSON_HEDLEY_VERSION_DECODE_MAJOR +#undef JSON_HEDLEY_VERSION_DECODE_MINOR +#undef JSON_HEDLEY_VERSION_DECODE_REVISION +#undef JSON_HEDLEY_VERSION_ENCODE +#undef JSON_HEDLEY_WARNING +#undef JSON_HEDLEY_WARN_UNUSED_RESULT +#undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG +#undef JSON_HEDLEY_FALL_THROUGH + + + +#endif // INCLUDE_NLOHMANN_JSON_HPP_ diff --git a/cpp/external/katagocoreml/vendor/mlmodel/LICENSE.txt b/cpp/external/katagocoreml/vendor/mlmodel/LICENSE.txt new file mode 100644 index 000000000..b4570ec56 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/LICENSE.txt @@ -0,0 +1,11 @@ +Copyright (c) 2017, Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/ArrayFeatureExtractor.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/ArrayFeatureExtractor.proto new file mode 100644 index 000000000..d689a3f0e --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/ArrayFeatureExtractor.proto @@ -0,0 +1,19 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +/* + * An array feature extractor. + * + * Given an index, extracts the value at that index from its array input. + * Indexes are zero-based. + */ +message ArrayFeatureExtractor { + repeated uint64 extractIndex = 1; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/AudioFeaturePrint.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/AudioFeaturePrint.proto new file mode 100644 index 000000000..8daa3fffa --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/AudioFeaturePrint.proto @@ -0,0 +1,36 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification.CoreMLModels; + +/* + * A model which takes an input audio and outputs array(s) of features + * according to the specified feature types + */ +message AudioFeaturePrint { + + // Specific audio feature print types + + // Sound extracts features useful for identifying the predominant + // sound in audio signal + message Sound { + enum SoundVersion { + SOUND_VERSION_INVALID = 0; + // VERSION_1 is available on iOS,tvOS 15.0+, macOS 12.0+ + // It uses a variable-length input audio sample vector and yields a 512 float feature vector + SOUND_VERSION_1 = 1; + } + + SoundVersion version = 1; + } + + // Audio feature print type + oneof AudioFeaturePrintType { + Sound sound = 20; + } +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/BayesianProbitRegressor.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/BayesianProbitRegressor.proto new file mode 100644 index 000000000..742c99ae8 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/BayesianProbitRegressor.proto @@ -0,0 +1,139 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +/* + * A Bayesian probit regressor. + * + * The probit regression model is superficially similar to the more commonly known + * logistic regression, with sampling distribution of the model given by + * + * P(y=+1|x,w) = Φ(/β) + * + * where w are the set of weights, + * x are the set of features for the given event, + * β is a model hyper-parameter, and + * Φ is the link function, defined to be the CDF of the normal distribution. + * The weights w[i,j] are Gaussian distributed, with mean μ[i,j] and precision 1/(σ[i,j])^2 + * (where i indexes over features and j indexes over the values for the feature). + * The parameter β scales the steepness of the inverse link function. + * + * (see https://en.wikipedia.org/wiki/Probit_model and https://en.wikipedia.org/wiki/Logistic_regression + * for more details on probit model and logistic regression, respectively) + * + * Input: X + * x represents a set of features, each taking on a discrete value (note that continuous values + * would first need to be discretized). x can be represented as a vector where the index i is + * the feature id and x[i] is the feature value. Alternatively, x can be represented as a matrix + * with 2 columns where the first column indicates the feature id and the second column contains + * the feature values, i.e. x[i,0] is the feature id and x[i,1] is the feature value. + * + * additional input features: + * - "optimism": apply a mean shift to the probability, i.e. shift regression mean by o*stdev, + * where o is the "optimism" parameter (see additional output features) + * - "samplingScale": for sampling from posterior, multiply standard deviation by this factor + * - "samplingTruncation": for sampling from posterior, truncate sampling distribution at given multiple of std from mean + * + * Output: Y + * probability P(y|x,w) + * + * additional output features: + * - mean (regression output before applying link function) + * - variance (regression output variance before applying link function) + * - pessimistic probability: P(y|x,w) with a mean shift parameterized by "optimism" feature + * - sampled probability: p ~ P(y|x,w) with standard deviation scaling parametrized by "samplingScale" feature + * and distribution truncated at multiple of standard deviation, + * where multiple parameterized by "samplingTruncation" feature. + * + */ + +message BayesianProbitRegressor { + + /* + * Parameterization of a Gaussian distribution + */ + message Gaussian { + double mean = 1; + double precision = 2; // inverse of the variance + } + + /* + * Weight for a specific feature value + * The weight is represented as a Gaussian distribution + * with a mean and precision (1/variance) to capture + * uncertainty in the weight + */ + message FeatureValueWeight { + uint32 featureValue = 1; + Gaussian featureWeight = 2; + } + + /* + * Feature with associated weights (for different values) + * Each feature has a set of weights for the (discrete) values + * it can take + */ + message FeatureWeight { + uint32 featureId = 1; + repeated FeatureValueWeight weights = 2; + } + + uint32 numberOfFeatures = 1; + + Gaussian bias = 2; // bias term + + /* + * Set of features with associated weights + */ + repeated FeatureWeight features = 3; // feature weights + + /* + * Set this name to be the same as input feature of type multi-array (1D) + * in the model description you want to use as the regression input + */ + string regressionInputFeatureName = 10; + + /* + * Set this name to be the same as optional input feature of type double + * in the model description you want to use as the optimism input + */ + string optimismInputFeatureName = 11; + + /* + * Set this name to be the same as optional input feature of type double + * in the model description you want to use as the samplingScale input + */ + string samplingScaleInputFeatureName = 12; + + /* + * Set this name to be the same as optional input feature of type double + * in the model description you want to use as the samplingBounds input + */ + string samplingTruncationInputFeatureName = 13; + + /* + * name of 'mean' output feature + */ + string meanOutputFeatureName = 20; + + /* + * name of 'variance' output feature + */ + string varianceOutputFeatureName = 21; + + /* + * name of 'pessimistic' output feature + */ + string pessimisticProbabilityOutputFeatureName = 22; + + /* + * name of 'sampled' output feature: samples from the scaled posterior probability distribuiton + */ + string sampledProbabilityOutputFeatureName = 23; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/CategoricalMapping.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/CategoricalMapping.proto new file mode 100644 index 000000000..dcb6eaf9d --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/CategoricalMapping.proto @@ -0,0 +1,38 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +/* + * A categorical mapping. + * + * This allows conversion from integers to strings, or from strings to integers. + */ +message CategoricalMapping { + oneof MappingType { + // Conversion from strings to integers + StringToInt64Map stringToInt64Map = 1; + + // Conversion from integer to string + Int64ToStringMap int64ToStringMap = 2; + } + + /* + * The value returned if an input is not contained in the map above. + * If one of these is not set, then an error is raised on an unknown input. + */ + oneof ValueOnUnknown { + // Default output when converting from an integer to a string. + string strValue = 101; + + // Default output when converting from a string to an integer. + int64 int64Value = 102; + } +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/ClassConfidenceThresholding.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/ClassConfidenceThresholding.proto new file mode 100644 index 000000000..173296345 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/ClassConfidenceThresholding.proto @@ -0,0 +1,41 @@ +// Copyright (c) 2022, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +/* A model to filter classification labels by confidence thresholds. + * + * The model has one input: + * - A multi-array of type FP16, FP32, or FP64 and shape [C], where C + * is the number of classes. + * + * The model has one output: + * - A multi-array of type FP16, FP32, or FP64 and shape [2, C], where + * C is the number of classes. The values in [0, :] is the same as + * the confidence inputs. The values in [1, :] is either 0 or 1, + * where 1 means the class is present and 0 means it is not. + * + * Currently, the model simply takes all the classes. + * + * filteredClassConfidences[0, :] = classConfidences[:] + * filteredClassConfidences[1, :] = 1 + */ + +message ClassConfidenceThresholding { + + /** + * The precision-recall curve for each class label. + * + * The field is optional. When it exists, the number of curves + * must match the number of class labels. + */ + repeated PrecisionRecallCurve precisionRecallCurves = 100; +} + diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/CustomModel.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/CustomModel.proto new file mode 100644 index 000000000..b5a361b10 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/CustomModel.proto @@ -0,0 +1,30 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +/* + * A parameterized model whose function is defined in code + */ +message CustomModel { + + message CustomModelParamValue { + oneof value { + double doubleValue = 10; + string stringValue = 20; + int32 intValue = 30; + int64 longValue = 40; + bool boolValue = 50; + bytes bytesValue = 60; + } + } + + string className = 10; // The name of the class (conforming to MLCustomModel) corresponding to this model + map parameters = 30; + string description = 40; // An (optional) description provided by the model creator. This information is displayed when viewing the model, but does not affect the model's execution on device. +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/DataStructures.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/DataStructures.proto new file mode 100644 index 000000000..6cd2d1ee6 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/DataStructures.proto @@ -0,0 +1,126 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "FeatureTypes.proto"; + +package CoreML.Specification; + +/* + * A mapping from a string + * to a 64-bit integer. + */ +message StringToInt64Map { + map map = 1; +} + +/* + * A mapping from a 64-bit integer + * to a string. + */ +message Int64ToStringMap { + map map = 1; +} + +/* + * A mapping from a string + * to a double-precision floating point number. + */ +message StringToDoubleMap { + map map = 1; +} + +/* + * A mapping from a 64-bit integer + * to a double-precision floating point number. + */ +message Int64ToDoubleMap { + map map = 1; +} + +/* + * A vector of strings. + */ +message StringVector { + repeated string vector = 1; +} + +/* + * A vector of 64-bit integers. + */ +message Int64Vector { + repeated int64 vector = 1; +} + +/* + * A vector of floating point numbers. + */ +message FloatVector { + repeated float vector = 1; +} + +/* + * A vector of double-precision floating point numbers. + */ +message DoubleVector { + repeated double vector = 1; +} + +/* + * A range of int64 values + */ +message Int64Range { + int64 minValue = 1; + int64 maxValue = 2; +} + +/* + * A set of int64 values + */ +message Int64Set { + repeated int64 values = 1; +} + +/* + * A range of double values + */ +message DoubleRange { + double minValue = 1; + double maxValue = 2; +} + +/** + * Precision/Recall curve. + * + * The syntax comprises two tables, one to look up the confidence value threshold + * for a given precision, and the other for a given recall. + * + * Example: + * ----------------------+----+----+----+----+----+----+----+----+---- + * precisionValues | .1 | .2 | .3 | .4 | .5 | .6 | .7 | + * precisionConfidence | .0 | .0 | .0 | .0 | .1 | .3 | .4 | + * ----------------------+----+----+----+----+----+----+----+----+---- + * + * ----------------------+----+----+----+----+----+----+----+----+---- + * recallValues | .1 | .2 | .3 | .4 | .5 | .6 | .7 | .8 | .9 + * recallConfidence | .7 | .6 | .5 | .4 | .3 | .3 | .2 | .1 | .0 + * ----------------------+----+----+----+----+----+----+----+----+---- + * + * The application expects that, when it filters out samples with + * confidence threshold = 0.1, it gets precision = 0.5. Likewise, + * with threshold = 0.2 it gets recall = 0.7. + * + * The table must have only valid values; do not use `NaN`, `+/- INF`, + * or negative values. The application is responsible for inter/extrapolating + * approprate confidence threshold based on the application's specific need. + */ +message PrecisionRecallCurve { + FloatVector precisionValues = 1; + FloatVector precisionConfidenceThresholds = 2; + FloatVector recallValues = 3; + FloatVector recallConfidenceThresholds = 4; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/DictVectorizer.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/DictVectorizer.proto new file mode 100644 index 000000000..73f6a0c42 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/DictVectorizer.proto @@ -0,0 +1,36 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +/* + * Uses an index mapping to convert a dictionary to an array. + * + * The output array will be equal in length to the index mapping vector parameter. + * All keys in the input dictionary must be present in the index mapping vector. + * + * For each item in the input dictionary, insert its value in the output array. + * The position of the insertion is determined by the position of the item's key + * in the index mapping. Any keys not present in the input dictionary, will be + * zero in the output array. + * + * For example: if the ``stringToIndex`` parameter is set to ``["a", "c", "b", "z"]``, + * then an input of ``{"a": 4, "c": 8}`` will produce an output of ``[4, 8, 0, 0]``. + * + */ +message DictVectorizer { + oneof Map { + // String keys to indexes + StringVector stringToIndex = 1; + + // Int keys to indexes + Int64Vector int64ToIndex = 2; + } +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/FeatureTypes.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/FeatureTypes.proto new file mode 100644 index 000000000..46c51fb67 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/FeatureTypes.proto @@ -0,0 +1,233 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +/* + * The 64-bit integer feature type. + */ +message Int64FeatureType {} + +/* + * The double-precision floating point number feature type. + */ +message DoubleFeatureType {} + +/* + * The string feature type. + */ +message StringFeatureType {} + + +message SizeRange { + uint64 lowerBound = 1; + int64 upperBound = 2; // negative value means unbound otherwise upperbound is included in range +} + +/* + * The image feature type. + */ +message ImageFeatureType { + // Assumes raw (decompressed) format + enum ColorSpace { + INVALID_COLOR_SPACE = 0; + GRAYSCALE = 10; // 8 bits per pixel + RGB = 20; // 32 bits per pixel: RGBA with A channel ignored + BGR = 30; // 32 bits per pixel: BGRA with A channel ignored + GRAYSCALE_FLOAT16 = 40; // 16 bits float per pixel + } + + message ImageSize { + uint64 width = 1; + uint64 height = 2; + } + + message EnumeratedImageSizes { + repeated ImageSize sizes = 1; + } + + message ImageSizeRange { + SizeRange widthRange = 1; + SizeRange heightRange = 2; + } + + // The required or default image size is width x height + // + // If specificationVersion <= 2 or SizeFlexibility is empty, + // width x height is the required fixed image size + // + // If SizeFlexibility is present, width x height indicate a "default" + // image size which must be consistent with the flexibility specified + + int64 width = 1; + int64 height = 2; + + // For specification version >= 3 you can specify image size flexibility. + + oneof SizeFlexibility { + + // Use enumeratedSizes for a set of distinct fixed sizes + // e.g. portrait or landscape: [80 x 100, 100 x 8] + // + // If the width x height fields above are specified then they must be + // one of the sizes listed. + // + // If width and height are not specified above then the default width + // and height will be enumeratedSizes[0] + // + // Must be non-empty + + EnumeratedImageSizes enumeratedSizes = 21; + + // Use imageSizeRange to allow for ranges of values + // e.g. any image greater than 10 x 20: [10..= 3 you can specify image size flexibility. + + oneof ShapeFlexibility { + + // Use enumeratedShapes for a set of distinct fixed shapes + // + // If the shape field is specified then it must be + // one of the enumerated shapes. + // + // If shape is not specified, the "default" shape will be considered + // enumeratedShapes[0] + // + // Must be non-empty + + EnumeratedShapes enumeratedShapes = 21; + + // Use shapeRange to allow the size of each dimension vary within + // independently specified ranges + // + // If you specify shape above it must fall in the range + // specified in shapeRanges. It will be treated as the default shape. + // + // If you don't specify shape above then the default shape will + // have shape[d] = shapeRange.sizeRanges[d].lowerBound + + ShapeRange shapeRange = 31; + + } + + oneof defaultOptionalValue { + int32 intDefaultValue = 41; + float floatDefaultValue = 51; + double doubleDefaultValue = 61; + } + +} + +/* + * The dictionary feature type. + */ +message DictionaryFeatureType { + /* + * Key/value type tags, with the following restrictions: + * - ``keyType`` must be a hashable type + * - ``valueType`` is assumed to be a ``double`` + */ + oneof KeyType { + Int64FeatureType int64KeyType = 1; + StringFeatureType stringKeyType = 2; + } +} + +/* + * The Sequence feature type. + */ +message SequenceFeatureType { + + /* + * Currently only categorical int64 and String sequences are supported + */ + oneof Type { + Int64FeatureType int64Type = 1; + StringFeatureType stringType = 3; + } + + // Range of allowed size/length/count of sequence + SizeRange sizeRange = 101; +} + +message StateFeatureType { + oneof Type { + ArrayFeatureType arrayType = 1; + } +} + +/* + * A feature, which may be optional. + */ +message FeatureType { + oneof Type { + Int64FeatureType int64Type = 1; + DoubleFeatureType doubleType = 2; + StringFeatureType stringType = 3; + ImageFeatureType imageType = 4; + ArrayFeatureType multiArrayType = 5; + DictionaryFeatureType dictionaryType = 6; + SequenceFeatureType sequenceType = 7; + StateFeatureType stateType = 8; + } + + bool isOptional = 1000; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/FeatureVectorizer.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/FeatureVectorizer.proto new file mode 100644 index 000000000..94d97474a --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/FeatureVectorizer.proto @@ -0,0 +1,26 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +/* + * A FeatureVectorizer puts one or more features into a single array. + * + * The ordering of features in the output array is determined by + * ``inputList``. + * + * ``inputDimensions`` is a zero based index. + */ +message FeatureVectorizer { + message InputColumn { + string inputColumn = 1; + uint64 inputDimensions = 2; + } + + repeated InputColumn inputList = 1; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/GLMClassifier.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/GLMClassifier.proto new file mode 100644 index 000000000..66f5befc3 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/GLMClassifier.proto @@ -0,0 +1,43 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +/* + * A generalized linear model classifier. + */ +message GLMClassifier { + message DoubleArray { + repeated double value = 1; + } + + enum PostEvaluationTransform { + Logit = 0; + Probit = 1; // Only binary classification is supported for probit + } + + enum ClassEncoding { + ReferenceClass = 0; // First class is the reference class + OneVsRest = 1; // Also called One vs All + } + + repeated DoubleArray weights = 1; + repeated double offset = 2; + PostEvaluationTransform postEvaluationTransform = 3; + ClassEncoding classEncoding = 4; + + /* + * Required class label mapping. + */ + oneof ClassLabels { + StringVector stringClassLabels = 100; + Int64Vector int64ClassLabels = 101; + } +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/GLMRegressor.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/GLMRegressor.proto new file mode 100644 index 000000000..fb46492d0 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/GLMRegressor.proto @@ -0,0 +1,28 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +/* + * A generalized linear model regressor. + */ +message GLMRegressor { + message DoubleArray { + repeated double value = 1; + } + + enum PostEvaluationTransform { + NoTransform = 0; + Logit = 1; + Probit = 2; + } + + repeated DoubleArray weights = 1; + repeated double offset = 2; + PostEvaluationTransform postEvaluationTransform = 3; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/Gazetteer.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/Gazetteer.proto new file mode 100644 index 000000000..8dac370e7 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/Gazetteer.proto @@ -0,0 +1,43 @@ +// Copyright (c) 2019, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification.CoreMLModels; + +/* +* A model which uses an efficient probabilistic representation +* for assigning labels to a set of strings. +*/ +message Gazetteer { + + /* + * Stores the revision number for the model, revision 2 is available on + * iOS, tvOS 13.0+, macOS 10.15+ + */ + uint32 revision = 1; + + /* + * Stores the language of the model, as specified in BCP-47 format, + * e.g. "en-US". See https://tools.ietf.org/html/bcp47 + */ + string language = 10; + + /* + * Natural Language framework's efficient representation of a gazetter. + */ + bytes modelParameterData = 100; + + /* + * Stores the set of output class labels + */ + oneof ClassLabels { + StringVector stringClassLabels = 200; + } + +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/Identity.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/Identity.proto new file mode 100644 index 000000000..b932fe3d7 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/Identity.proto @@ -0,0 +1,18 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +/* + * An identity model. + * + * This model returns given inputs as outputs, unchanged. + * Intended to be used for testing purposes. + */ +message Identity { +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/Imputer.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/Imputer.proto new file mode 100644 index 000000000..ecedb0119 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/Imputer.proto @@ -0,0 +1,43 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +/* + * A transformer that replaces missing values with a default value, + * such as a statistically-derived value. + * + * If ``ReplaceValue`` is set, then missing values of that type are + * replaced with the corresponding value. + * + * For example: if ``replaceDoubleValue`` is set to ``NaN`` + * and a single ``NaN`` double value is provided as input, + * then it is replaced by ``imputedDoubleValue``. However + * if the input is an array of doubles, then any instances + * of ``NaN`` in the array is replaced with the corresponding + * value in ``imputedDoubleArray``. + */ +message Imputer { + oneof ImputedValue { + double imputedDoubleValue = 1; + int64 imputedInt64Value = 2; + string imputedStringValue = 3; + DoubleVector imputedDoubleArray = 4; + Int64Vector imputedInt64Array = 5; + StringToDoubleMap imputedStringDictionary = 6; + Int64ToDoubleMap imputedInt64Dictionary = 7; + } + + oneof ReplaceValue { + double replaceDoubleValue = 11; + int64 replaceInt64Value = 12; + string replaceStringValue = 13; + } +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/ItemSimilarityRecommender.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/ItemSimilarityRecommender.proto new file mode 100644 index 000000000..eb0292ac6 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/ItemSimilarityRecommender.proto @@ -0,0 +1,74 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + + +/* + * Item Similarity Recommender + * + * The Item Similarity recommender takes as input a list of items and scores, + * then uses that information and a table of item similarities to predict similarity + * scores for all items. By default, the items predicted are most similar to the given + * items but not part of that item set. + * + * The predicted score for a given item k is + * sum_(i in observed items) sim_(k,i) * (score_i - shift_k) + * + * Because only the most similar scores for each item i are stored, + * sim_(k,i) is often zero. + * + * For many models, the score adjustment parameter shift_j is zero -- it's occasionally used + * to counteract global biases for popular items. + * + * + * References: + */ +message ItemSimilarityRecommender { + + /* The items similar to a given base item. + */ + message ConnectedItem { + uint64 itemId = 1; + double similarityScore = 2; + } + + /* The formula for the score of a given model as given above, with shift_k + * parameter given by itemScoreAdjustment, and the similar item list filling in + * all the known sim(k,i) scores for i given by itemID and k given by the itemID parameter in + * the similarItemList. + */ + message SimilarItems { + uint64 itemId = 1; + repeated ConnectedItem similarItemList = 2; + double itemScoreAdjustment = 3; + } + + repeated SimilarItems itemItemSimilarities = 1; + + /* One or none of these are given. If none are given, then the items must number 0, 1, ..., num_items - 1. + * If either is given, the length must be exactly num_items. + */ + StringVector itemStringIds = 2; + Int64Vector itemInt64Ids = 3; + + /* Input parameter names specifying different possible inputs to the recommender. + */ + string itemInputFeatureName = 10; /* Required */ + string numRecommendationsInputFeatureName = 11; /* Optional; defaults to all items if not given.*/ + string itemRestrictionInputFeatureName = 12; /* Optional. */ + string itemExclusionInputFeatureName = 13; /* Optional; defaults to input item list if not given. */ + + /* The predicted outputs. At least one of these must be specified. + */ + string recommendedItemListOutputFeatureName = 20; + string recommendedItemScoreOutputFeatureName = 21; + +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/LICENSE.txt b/cpp/external/katagocoreml/vendor/mlmodel/format/LICENSE.txt new file mode 100644 index 000000000..bbcdc9ef8 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/LICENSE.txt @@ -0,0 +1,11 @@ +Copyright (c) 2017, Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/LinkedModel.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/LinkedModel.proto new file mode 100644 index 000000000..7b5263c3a --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/LinkedModel.proto @@ -0,0 +1,40 @@ +// Copyright (c) 2019, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; +import public "Parameters.proto"; + +package CoreML.Specification; + +/* + * A model which wraps another (compiled) model external to this one + */ +message LinkedModel { + + oneof LinkType { + // A model located via a file system path + LinkedModelFile linkedModelFile = 1; + } +} + +// Model is referenced by a model file name and search path +message LinkedModelFile { + + // Model file name: e.g. "MyFetureExtractor.mlmodelc" + StringParameter linkedModelFileName = 1; + + // Search path to find the linked model file + // Multiple paths can be searched using the unix-style path separator ":" + // Each path can be relative (to this model) or absolute + // + // An empty string is the same as the relative search path "." + // which searches in the same location as this model file + // + // There are some special paths which start with $ + // - $BUNDLE_MAIN - Indicates to look in the main bundle + // - $BUNDLE_IDENTIFIER(identifier) - Looks in Bunde with given identifier + StringParameter linkedModelSearchPath = 2; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/MIL.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/MIL.proto new file mode 100644 index 000000000..af7c3e004 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/MIL.proto @@ -0,0 +1,371 @@ +// Copyright (c) 2019, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +/* + * - A Program is the container with following information + * - set of functions: Function defines a program block to be executed + * - A model can have multiple functions defined and will have a single point of entry. + * - A Function consists of + * - List of named inputs and output types + * - A block defining scope for a function - similar to a function in C/C++ + * - A Block consists of + * - List of named inputs and output names + * - Topologically sorted Ops + * - A Op consists of + * - List of named inputs and outputs (name, type) pair + * - Optionally, blocks for Control-Flow + * + * Programs, functions, blocks, ops, and tensor types all can contain an optional set of attributes. + * + * == Identifiers == + * Identifiers, generally used for names and keys, must match the + * regular expression [A-Za-z\_][A-Za-z0-9\_@]* + */ + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification.MILSpec; + +// The top level container. +message Program { + int64 version = 1; + + // Must be unique within the containing program + // Names must be valid identifiers as described above. + map functions = 2; + + string docString = 3; + + // Any other attributes not described by other fields. + // Keys must be valid identifiers as described above. + map attributes = 4; +} + +// A program-level function. +message Function { + + // Function inputs are unordered (name, ValueType) pairs. + // Inputs intended to process images must be rank-4 Float32 tensors. Dimensions + // are interpreted as NCHW, with N == 1 and C being 1 for grayscale and 3 for RGB. + // Names must be valid identifiers as described above. + repeated NamedValueType inputs = 1; + + // The active block is drawn from this named specialization. + // This key must exist in `block_specializations`. + string opset = 2; + + // Named specializations of this function. + // + // Specialization keys are the name of the opset that the + // function specialization is written in. They must be valid + // identifiers as described above. + // + // Outputs from all blocks must match. They define the outputs + // of the function. + // Each block inherits the lexical scope from the function. + map block_specializations = 3; + + // Any other attributes not described by other fields. + // Keys must be valid identifiers as described above. + map attributes = 4; +} + +// A basic block with a single entry and exit in SSA form. +message Block { + // Infrequently used, these are for operators that may need to give + // block-local names to input values (e.g. while_loop). + repeated NamedValueType inputs = 1; + + // The names to give to values returned by this block. They must be + // identifiers as described above. + // + // ValueType of outputs[i] is Operation[j].outputs[k].type where + // i, j and k are indices of block output, block Operation and + // jth operation's output respectively. + // this is due to + // 1. An operation can have more than one output + // 2. Any one of operation's output could be potentially block's output + repeated string outputs = 2; + + repeated Operation operations = 3; + + // Any other attributes not described by other fields. + // Keys must be valid identifiers as described above. + map attributes = 4; +} + +// Argument is list of Binding to either name or value +message Argument { + message Binding { + oneof binding { + // The name of a previously defined value. + string name = 1; + + // A compile time constant. + Value value = 2; + } + } + + repeated Binding arguments = 1; +}; + +// A single operation/node/layer. +message Operation { + // Examples: "convolution", "cropResize". Operation type defines the + // expected inputs and output. + string type = 1; + + // Operator arguments + // + // Key: parameter name + // Value: Argument (list of bindings) + // + // Value is list of argument binding to given parameter + // Binding can be a string name (previous operation output or input given to model/block/function) + // or a Value (known compile time value for given operation) + // Argument can be of length 1 (general) or variable length (e.g. concat layer) + // e.g. {'stride' : ['input_01']} + // e.g. {'x' : ['input_01', 'input_02', 'input_03', false]} + map inputs = 2; + + // Names to which to bind values returned by this operation. + // Names must be: + // (*) valid identifiers as described above; and + // (*) unique within the current scope. + repeated NamedValueType outputs = 3; + + // Nested blocks for loops and conditionals. For example, + // a conditional block will have two entries here. + repeated Block blocks = 4; + + // Any other information not captured by other fields. + // Keys must be valid identifiers as described above. + map attributes = 5; +} + +// Named Value parameters +// (name, type) pair +message NamedValueType { + // The name of this parameter; must be a valid identifier as described above. + string name = 1; + + // This parameter's required type. + ValueType type = 2; +} + +/* ======== Types ======= */ + +// Primer: Two fundamental representations of state: +// +// Variable: Variables are NEVER materialized at compile time and are only +// available at run time. Therefore, for Variables we only have ValueType, +// which may have unknown shapes in the IR. Variable encompasses familiar +// concepts such as placeholder, output of an Op. +// +// Value: Values are ALWAYS materialized at compile time, and MAY be modified +// at runtime (e.g., during on-device training). Value describes notions +// such as parameter, attributes of an op. Value is either stored inside +// proto (e.g., attributes) or outside of proto (e.g. parameters) and +// NEVER contains unknown shape in the IR. +// +// Comment(daviddai): A Variable with the potential to be materialized at +// compile time (e.g., through constant propagation) does NOT preclude it to +// be a Variable. Certain Ops such as LoadParameter and Const, their output +// has potential to be materialized at compile time but is still represented +// as Variable. + +// A type of any kind +message ValueType { + oneof type { + TensorType tensorType = 1; + ListType listType = 2; + TupleType tupleType = 3; + DictionaryType dictionaryType = 4; + StateType stateType = 5; + } +} + +// Supported data types +enum DataType { + // Comment: Two schemes of specifying field id: just start with 0 + // without reserving numbers, but keep track of the next field ID. The + // other is assign blocks of ID to int / float / uint etc. + + // 0-10 reserved for special types + UNUSED_TYPE = 0; // not currently in use + BOOL = 1; + STRING = 2; // arbitrary sequence of bytes + + // Floats + FLOAT8E4M3FN = 40; + FLOAT8E5M2 = 41; + FLOAT16 = 10; + FLOAT32 = 11; + FLOAT64 = 12; + BFLOAT16 = 13; + + // Ints + INT8 = 21; + INT16 = 22; + INT32 = 23; + INT64 = 24; + INT4 = 25; + + // UInts + UINT8 = 31; + UINT16 = 32; + UINT32 = 33; + UINT64 = 34; + + UINT4 = 35; + UINT2 = 36; + UINT1 = 37; + UINT6 = 38; + UINT3 = 39; +} + +message TensorType { + // The data type stored in a tensor of this type + DataType dataType = 1; + + // The number of dimensions in the tensor shape. rank == -1 implies + // variable (not fixed) rank + int64 rank = 2; + + // Tensor shape values; must be of length "rank" + repeated Dimension dimensions = 3; + + // Any other tensor type attributes not described by other fields. + // Keys must be valid identifiers in MIL text syntax. + map attributes = 4; +} + +message TupleType { + // Recursively define TupleType from ValueType. + repeated ValueType types = 1; +} + +message ListType { + // The type of element stored in a list of this type + ValueType type = 1; + + // The number of elements in a list of this type. May be unknown (variable length) + Dimension length = 2; +} + +// An unordered key-value mapping +message DictionaryType { + ValueType keyType = 1; + ValueType valueType = 2; +} + +message StateType { + ValueType wrappedType = 1; +} + +message Dimension { + oneof dimension { + ConstantDimension constant = 1; + UnknownDimension unknown = 2; + } + + message ConstantDimension { + uint64 size = 1; + } + + message UnknownDimension { + bool variadic = 1; + } +} + +/* ======== Values ======= */ + +// See Variable vs Value primer above. +message Value { + string docString = 1; // optional human-readable texts. + ValueType type = 2; + + // An immediate value stored within the proto + message ImmediateValue { + oneof value { + TensorValue tensor = 1; + TupleValue tuple = 2; + ListValue list = 3; + DictionaryValue dictionary = 4; + } + } + + // Reference to a "blob v2" storage file + message BlobFileValue { + // name of file + string fileName = 1; + + // byte offset to metadata + uint64 offset = 2; + } + + oneof value { + ImmediateValue immediateValue = 3; + BlobFileValue blobFileValue = 5; + } +} + +message TensorValue { + oneof value { + RepeatedFloats floats = 1; + RepeatedInts ints = 2; + RepeatedBools bools = 3; + RepeatedStrings strings = 4; + RepeatedLongInts longInts = 5; + RepeatedDoubles doubles = 6; + RepeatedBytes bytes = 7; + } + + message RepeatedFloats { + repeated float values = 1 [packed = true]; + } + + message RepeatedDoubles { + repeated double values = 1 [packed = true]; + } + + message RepeatedInts { + repeated int32 values = 1 [packed = true]; + } + + message RepeatedLongInts { + repeated int64 values = 1 [packed = true]; + } + + message RepeatedBools { + repeated bool values = 1 [packed = true]; + } + + message RepeatedStrings { + repeated string values = 1; + } + + message RepeatedBytes { + bytes values = 1; + } +} + +message TupleValue { + // Comment: TupleValue is recursively defined from Value. + repeated Value values = 1; +} + +message ListValue { + repeated Value values = 1; +} + +message DictionaryValue { + message KeyValuePair { + Value key = 1; + Value value = 2; + } + repeated KeyValuePair values = 1; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/Model.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/Model.proto new file mode 100644 index 000000000..d44f0b3e9 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/Model.proto @@ -0,0 +1,415 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +/* + * A Core ML model consists of a specification version + * and a model description, + * and can be any one of the following types: + * + * Neural Networks + * - `NeuralNetwork` + * + * Regressors + * - ``GLMRegressor`` + * - ``SupportVectorRegressor`` + * - ``TreeEnsembleRegressor`` + * - ``NeuralNetworkRegressor`` + * - ``BayesianProbitRegressor`` + * + * Classifiers + * - `NeuralNetworkClassifier` + * - `TreeEnsembleClassifier` + * - `GLMClassifier` + * - `SupportVectorClassifier` + * - `KNearestNeighborsClassifier` + * + * Other models + * - `CustomModel` + * - `TextClassifier` + * - `WordTagger` + * - `Gazetteer` + * - `WordEmbedding` + * - `VisionFeaturePrint` + * - `LinkedModel` + * - `SoundAnalysisPreprocessing` + * - `ItemSimilarityRecommender` + * - `ClassConfidenceThresholding` + * + * Feature Engineering + * - `Imputer` + * - `Scaler` + * - `Normalizer` + * - `OneHotEncoder` + * - `CategoricalMapping` + * - `FeatureVectorizer` + * - `DictVectorizer` + * - `ArrayFeatureExtractor` + * - `NonMaximumSuppression` + * + * Pipelines + * - `PipelineClassifier` + * - `PipelineRegressor` + * - `Pipeline` + * + * Simple Mathematical Functions + * - `Identity` + */ + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "VisionFeaturePrint.proto"; +import public "AudioFeaturePrint.proto"; +import public "TextClassifier.proto"; +import public "WordTagger.proto"; +import public "Gazetteer.proto"; +import public "WordEmbedding.proto"; +import public "ArrayFeatureExtractor.proto"; +import public "BayesianProbitRegressor.proto"; +import public "CategoricalMapping.proto"; +import public "CustomModel.proto"; +import public "DictVectorizer.proto"; +import public "FeatureTypes.proto"; +import public "FeatureVectorizer.proto"; +import public "GLMRegressor.proto"; +import public "GLMClassifier.proto"; +import public "NearestNeighbors.proto"; +import public "Identity.proto"; +import public "Imputer.proto"; +import public "MIL.proto"; +import public "NeuralNetwork.proto"; +import public "Normalizer.proto"; +import public "OneHotEncoder.proto"; +import public "Scaler.proto"; +import public "NonMaximumSuppression.proto"; +import public "SVM.proto"; +import public "TreeEnsemble.proto"; +import public "Parameters.proto"; +import public "ItemSimilarityRecommender.proto"; +import public "SoundAnalysisPreprocessing.proto"; +import public "LinkedModel.proto"; +import public "ClassConfidenceThresholding.proto"; + +package CoreML.Specification; + +/* + * A pipeline consists of one or more models. + */ +message Pipeline { + repeated Model models = 1; + + // Optional names given for each model + // If not supplied it defaults to ["model0",..., "model"(models.size()-1)] + // These names can be used to disambiguate the scope / domain of a parameter + repeated string names = 2; +} + +/* + * A classifier pipeline. + */ +message PipelineClassifier { + Pipeline pipeline = 1; +} + +/* + * A regressor pipeline. + */ +message PipelineRegressor { + Pipeline pipeline = 1; +} + +/* + * A feature description + * consisting of a name, short description, and type. + */ +message FeatureDescription { + string name = 1; + string shortDescription = 2; + FeatureType type = 3; +} + +/* + * Model metadata, + * consisting of a short description, a version string, + * an author, a license, and any other user defined + * key/value meta data. + */ +message Metadata { + string shortDescription = 1; + string versionString = 2; + string author = 3; + string license = 4; + map userDefined = 100; +} + +/* + * A description of a function. + */ +message FunctionDescription { + // The function name. + string name = 1; + + // Input feature descriptions for the function. + repeated FeatureDescription input = 2; + + // Output feature descriptions for the function. + repeated FeatureDescription output = 3; + + // State feature descriptions for the function. + // + // The `type` of each feature description must be `StateFeatureType`. + repeated FeatureDescription state = 6; + + // [Required for regressor and classifier functions]: the name + // to give to an output feature containing the prediction. + string predictedFeatureName = 4; + + // [Optional for classifier functions]: the name to give to an + // output feature containing a dictionary mapping class + // labels to their predicted probabilities. If not specified, + // the dictionary will not be returned by the model. + string predictedProbabilitiesName = 5; +} + +/* + * A description of a model, + * consisting of descriptions of its input and output features. + * Both regressor and classifier models require the name of the + * primary predicted output feature (``predictedFeatureName``). + * Classifier models can specify the output feature containing + * probabilities for the predicted classes + * (``predictedProbabilitiesName``). + */ +message ModelDescription { + // Functions in the model. + // + // Some model types (e.g. ML Program) support multiple functions. For + // example, a large language model might have "prompt" and "extend" + // functions. Each has a different input and output behavior, but + // they are in a same model and share resources. + // + // If the model has more than one function, use the multiple + // function configuration and declare the feature descriptions and + // associated properties at function level. + // + // If the model type doesn't support multiple functions or the + // model has just "main" function, declare the feature + // descriptions and associated properties at the model level. + // + // Note: feature descriptions and associated properties mentioned + // above include input, output, state, predictedFeatureName, + // predictedProbabilitiesName, and trainingInput fields. + repeated FunctionDescription functions = 20; + + // The default function. + // + // The default function is the one that is automatically used when + // one doesn't explicitly specify. + // + // The value must be one of the names in `functions` message + // above. If `functions` is empty, this field must not be present. + string defaultFunctionName = 21; + + // The metadata (e.g. author, licence, etc) of the model. + Metadata metadata = 100; + + // Use these fields below only when `functions` above is empty. + + repeated FeatureDescription input = 1; + repeated FeatureDescription output = 10; + + // State feature descriptions for the function. + // + // The `type` of each feature description must be `StateFeatureType`. + repeated FeatureDescription state = 13; + + // [Required for regressor and classifier models]: the name + // to give to an output feature containing the prediction. + string predictedFeatureName = 11; + + // [Optional for classifier models]: the name to give to an + // output feature containing a dictionary mapping class + // labels to their predicted probabilities. If not specified, + // the dictionary will not be returned by the model. + string predictedProbabilitiesName = 12; + + repeated FeatureDescription trainingInput = 50; +} + +message SerializedModel { + // Identifier whose content describes the model type of the serialized protocol buffer message. + string identifier = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes model = 2; +} + +/* + * A Core ML model, + * consisting of a specification version, + * a model description, and a model type. + * + * Core ML model compatibility is indicated by + * a monotonically increasing specification version number, + * which is incremented anytime a backward-incompatible change is made + * (this is functionally equivalent to the MAJOR version number + * described by `Semantic Versioning 2.0.0 `_). + * + * Specification Versions : OS Availability (Core ML Version) + * + * 1 : iOS 11, macOS 10.13, tvOS 11, watchOS 4 (Core ML 1) + * - Feedforward & Recurrent Neural Networks + * - General Linear Models + * - Tree Ensembles + * - Support Vector Machines + * - Pipelines + * - Feature Engineering + * + * 2 : iOS 11.2, macOS 10.13.2, tvOS 11.2, watchOS 4.2 (Core ML 1.2) + * - Custom Layers for Neural Networks + * - Float 16 support for Neural Network layers + * + * 3 : iOS 12, macOS 10.14, tvOS 12, watchOS 5 (Core ML 2) + * - Flexible shapes and image sizes + * - Categorical sequences + * - Core ML Vision Feature Print, Text Classifier, Word Tagger + * - Non Max Suppression + * - Crop and Resize Bilinear NN layers + * - Custom Models + * + * 4 : iOS 13, macOS 10.15, tvOS 13, watchOS 6 (Core ML 3) + * - Updatable models + * - Exact shape / general rank mapping for neural networks + * - Large expansion of supported neural network layers + * - Generalized operations + * - Control flow + * - Dynamic layers + * - See NeuralNetwork.proto + * - Nearest Neighbor Classifier + * - Sound Analysis Prepreocessing + * - Recommender + * - Linked Model + * - NLP Gazeteer + * - NLP WordEmbedding + * + * 5 : iOS 14, macOS 11, tvOS 14, watchOS 7 (Core ML 4) + * - Model Deployment + * - Model Encryption + * - Unified converter API with PyTorch and Tensorflow 2 Support in coremltools 4 + * - MIL builder for neural networks and composite ops in coremltools 4 + * - New layers in neural network: + * - CumSum + * - OneHot + * - ClampedReLu + * - ArgSort + * - SliceBySize + * - Convolution3D + * - Pool3D + * - Bilinear Upsample with align corners and fractional factors + * - PixelShuffle + * - MatMul with int8 weights and int8 activations + * - Concat interleave + * - See NeuralNetwork.proto + * - Enhanced Xcode model view with interactive previews + * - Enhanced Xcode Playground support for Core ML models + * + * 6 : iOS 15, macOS 12, tvOS 15, watchOS 8 (Core ML 5) + * - Core ML Audio Feature Print + * - new type of model: mlprogram (MILSpec.Program) + * + * 7 : iOS 16, macOS 13, tvOS 16, watchOS 9 (Core ML 6) + * - FLOAT16 array data type + * - GRAYSCALE_FLOAT16 image color space. + * + * 8 : iOS 17, macOS 14, tvOS 17, watchOS 10 (Core ML 7) + * - iOS 17 ops + * - Scene print v2 + * - ClassConfidenceThresholding model + * + * 9 : iOS 18, macOS 15, tvOS 18, watchOS 11 (Core ML 8) + * - multiple functions + * + * 10 : iOS 26, macOS 26, tvOS 26, watchOS 26, visionOS 26 (Core ML 9) + * - Int8 MultiArray types for ML Programs + */ +message Model { + int32 specificationVersion = 1; + ModelDescription description = 2; + + /* + * Following model types support on-device update: + * + * - NeuralNetworkClassifier + * - NeuralNetworkRegressor + * - NeuralNetwork + * - KNearestNeighborsClassifier + */ + bool isUpdatable = 10; + + // start at 200 here + // model specific parameters: + oneof Type { + // pipeline starts at 200 + PipelineClassifier pipelineClassifier = 200; + PipelineRegressor pipelineRegressor = 201; + Pipeline pipeline = 202; + + // regressors start at 300 + GLMRegressor glmRegressor = 300; + SupportVectorRegressor supportVectorRegressor = 301; + TreeEnsembleRegressor treeEnsembleRegressor = 302; + NeuralNetworkRegressor neuralNetworkRegressor = 303; + BayesianProbitRegressor bayesianProbitRegressor = 304; + + // classifiers start at 400 + GLMClassifier glmClassifier = 400; + SupportVectorClassifier supportVectorClassifier = 401; + TreeEnsembleClassifier treeEnsembleClassifier = 402; + NeuralNetworkClassifier neuralNetworkClassifier = 403; + KNearestNeighborsClassifier kNearestNeighborsClassifier = 404; + + // generic models start at 500 + NeuralNetwork neuralNetwork = 500; + ItemSimilarityRecommender itemSimilarityRecommender = 501; + MILSpec.Program mlProgram = 502; + + // Custom and linked models + CustomModel customModel = 555; + LinkedModel linkedModel = 556; + + // Precision Recall Curve 'container'' + ClassConfidenceThresholding classConfidenceThresholding = 560; + + // feature engineering starts at 600 + OneHotEncoder oneHotEncoder = 600; + Imputer imputer = 601; + FeatureVectorizer featureVectorizer = 602; + DictVectorizer dictVectorizer = 603; + Scaler scaler = 604; + CategoricalMapping categoricalMapping = 606; + Normalizer normalizer = 607; + ArrayFeatureExtractor arrayFeatureExtractor = 609; + NonMaximumSuppression nonMaximumSuppression = 610; + + + // simple mathematical functions used for testing start at 900 + Identity identity = 900; + + // reserved until 1000 + + // CoreML provided models + CoreMLModels.TextClassifier textClassifier = 2000; + CoreMLModels.WordTagger wordTagger = 2001; + CoreMLModels.VisionFeaturePrint visionFeaturePrint = 2002; + CoreMLModels.SoundAnalysisPreprocessing soundAnalysisPreprocessing = 2003; + CoreMLModels.Gazetteer gazetteer = 2004; + CoreMLModels.WordEmbedding wordEmbedding = 2005; + CoreMLModels.AudioFeaturePrint audioFeaturePrint = 2006; + + // Reserved private messages start at 3000 + // These messages are subject to change with no notice or support. + SerializedModel serializedModel = 3000; + } +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/NearestNeighbors.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/NearestNeighbors.proto new file mode 100644 index 000000000..d7f2a60f9 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/NearestNeighbors.proto @@ -0,0 +1,132 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +import public "DataStructures.proto"; +import public "Parameters.proto"; + +/* + * A k-Nearest-Neighbor classifier + */ +message KNearestNeighborsClassifier { + + /* + * The "core" nearest neighbor model attributes. + */ + NearestNeighborsIndex nearestNeighborsIndex = 1; + + /* + * Number of neighbors to use for classification. + */ + Int64Parameter numberOfNeighbors = 3; + + /* + * Type of labels supported by the model. Currently supports String or Int64 + * labels. + */ + oneof ClassLabels { + StringVector stringClassLabels = 100; + Int64Vector int64ClassLabels = 101; + } + + /* + * Default value of class label (useful when prediction is called on an empty kNN classifier) + */ + oneof DefaultClassLabel { + string defaultStringLabel = 110; + int64 defaultInt64Label = 111; + } + + /* + * Weighting scheme to be used when computing the majority label of a + * new data point. + */ + oneof WeightingScheme { + UniformWeighting uniformWeighting = 200; + InverseDistanceWeighting inverseDistanceWeighting = 210; + } +} + +/* + * The "core" attributes of a Nearest Neighbors model. + */ +message NearestNeighborsIndex { + + /* + * Number of dimensions of the input data. + */ + int32 numberOfDimensions = 1; + + /* + * Vector of floating point data that makes up the model. Each data point must have 'numberOfDimensions' + * dimensions. + */ + repeated FloatVector floatSamples = 2; + + /* + * Backing data structure for the Nearest Neighbors Index. Currently supports + * a linear index or a kd-tree index. + */ + oneof IndexType { + LinearIndex linearIndex = 100; + SingleKdTreeIndex singleKdTreeIndex = 110; + } + + /* + * Distance function to be used to find neighbors. Currently only Squared Euclidean + * Distance is supported. + */ + oneof DistanceFunction { + SquaredEuclideanDistance squaredEuclideanDistance = 200; + } + +} + +/* + * Specifies a uniform weighting scheme (i.e. each neighbor receives equal + * voting power). + */ +message UniformWeighting { +} + + +/* + * Specifies a inverse-distance weighting scheme (i.e. closest neighbors receives higher + * voting power). A nearest neighbor with highest sum of (1 / distance) is picked. + */ +message InverseDistanceWeighting { +} + + +/* + * Specifies a flat index of data points to be searched by brute force. + */ +message LinearIndex { +} + + +/* + * Specifies a kd-tree backend for the nearest neighbors model. + */ +message SingleKdTreeIndex { + + /* + * Number of data points contained within a leaf node of the kd-tree. + */ + int32 leafSize = 1; + +} + + +/* + * Specifies the Squared Euclidean Distance function. + */ +message SquaredEuclideanDistance { +} + diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/NeuralNetwork.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/NeuralNetwork.proto new file mode 100644 index 000000000..f2bdb68c0 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/NeuralNetwork.proto @@ -0,0 +1,6531 @@ +// Copyright (c) 2017-2019, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +/* + * A neural network is defined through a collection of layers + * and represents a directed acyclic graph (DAG). + * Each layer has a name, a layer type, + * a list of input names, a list of output names, + * and a collection of parameters specific to the layer type. + * + * The graph structure and connectivity of the neural network + * is inferred from the input and output names. + * A neural network starts with the layer + * whose input name is equal to the value specified in + * ``Model.description.input.name``, + * and ends with the layer + * whose output name is equal to the value specified in + * ``Model.description.output.name``. + * Layers must have unique input and output names, + * and a layer may not have input or output names that + * refer to layers that are not yet defined. + * + * For Core ML specification version <=3, + * all inputs are mapped to static rank 5 tensors, with axis notations + * [Sequence, Batch, Channel, Height, Width]. + * + * From specification version 4 onwards (iOS >= 13, macOS >= 10.15), more options are available + * (see enums ``NeuralNetworkMultiArrayShapeMapping``, ``NeuralNetworkImageShapeMapping``) + * to map inputs to generic N-Dimensional (or N rank) tensors, where N >= 1. + * + * Each layer type may have specific constraints on the ranks of its inputs and outputs. + * + * Some of the layers (such as softmax, reduce, etc) have parameters that have been described in + * terms of notational axis "Channel", "Height", "Width" or "Sequence". They can be re-interpreted easily in + * the general ND setting by using the following rule: + * "width" is same as axis = -1 (i.e. the last axis from the end) + * "height" is same as axis = -2 (i.e. the second last axis from the end) + * "channel" is same as axis = -3 (i.e. the third last axis from the end) + * "sequence" is same as axis = -5 (i.e. the fifth last axis from the end) + * + * Several layers are available in 3 different variations, with the names ending + * in identifiers: ``like``, ``static`` and ``dynamic``. For instance, ``FillLike``, + * ``FillStatic`` and ``FillDynamic``. The ``static`` variation generally will have + * a property corresponding to the shape of the output. For instance, if the + * output of the ``FillStatic`` layer is desired to be of shape (10, 4), the + * property ``targetShape`` will have to be set to [10, 4]. In the ``dynamic`` case, + * the shape is an input, hence it can be changed at runtime. For instance, for + * a ``FillDynamic`` layer, the input would have to be an array containing the + * values 10 and 4, if the desired output is of shape (10, 4). Whereas in the + * ``like`` case, the additional input's shape is used as the output shape, ignoring + * its values. For instance, for a ``FillLike`` layer, for an input with shape + * (10, 4), the output generated will also be of shape (10, 4), values of the + * input will be ignored. + */ + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; +import public "Parameters.proto"; + +package CoreML.Specification; + + +enum NeuralNetworkMultiArrayShapeMapping { + + /* + * Describes how the MultiArray shape for the inputs, + * provided in Features Types proto via model description, + * is mapped to construct tensors that are fed into the Neural Network layers. + */ + + /* + * Default legacy value. Only supported for Core ML Specification version <= 3. + * + * The default legacy shape mapping resolves all input shapes to a rank 5 equivalent + * with axis notation of [Seq, Batch, Channel, Height, Width]. + * + * When this enum value is selected, + * the repeated shape field in the message "ArrayFeatureType" in feature types proto, + * must be either length 1 or length 3. + * + * The following rule is used to map the values in the shape field to the actual tensor shape: + * rank 1 shape is mapped to shape [1,1,C,1,1] + * rank 3 shape is mapped to shape [1,1,C,H,W] + * At runtime, the first two dimensions (Seq or Batch) can be presented as well, with non-1 values. + * + * It is invalid to use this enum value if any of the layers added + * Specification version 4 (iOS >= 13, macOS >= 10.15) onwards are used in the network. + * Validator will raise an error in that case. + */ + RANK5_ARRAY_MAPPING = 0; + + /* + * The exact shape and rank (i.e. number of dimensions in the shape) of the input, + * as specified in the message "ArrayFeatureType", is passed through to the layers. + * Supported only for Specification version >= 4 (iOS >= 13, macOS >= 10.15). + */ + EXACT_ARRAY_MAPPING = 1; + +} + +enum NeuralNetworkImageShapeMapping { + + /* + * Describes how the shape of the input tensors is constructed from image inputs. + */ + + /* + * In this case, image input is mapped to a rank 5 tensor. + * For Color images, input tensor is shaped as [1,1,3,H,W]. + * For Gray images, input tensor is shaped as [1,1,1,H,W]. + */ + RANK5_IMAGE_MAPPING = 0; + + /* + * For Color images, input tensor is shaped as [1,3,H,W]. + * For Gray images, input tensor is shaped as [1,1,H,W]. + * Supported only for Specification version >= 4 (iOS >= 13, macOS >= 10.15). + */ + RANK4_IMAGE_MAPPING = 1; + +} + +/* + A neural network. + */ +message NeuralNetwork { + + repeated NeuralNetworkLayer layers = 1; + repeated NeuralNetworkPreprocessing preprocessing = 2; + + // use this enum value to determine the input tensor shapes to the neural network, for multiarray inputs + NeuralNetworkMultiArrayShapeMapping arrayInputShapeMapping = 5; + + // use this enum value to determine the input tensor shapes to the neural network, for image inputs + NeuralNetworkImageShapeMapping imageInputShapeMapping = 6; + + + NetworkUpdateParameters updateParams = 10; + +} + +// Preprocessing +// ------------- + +/* + * A neural network preprocessor that + * performs a scalar multiplication of an image + * followed by addition of scalar biases to the channels. + * + * Input: X + * An image in BGR or RGB format with shape ``[3, H, W]`` + * or in grayscale format with shape ``[1, H, W]``. + * Output: Y + * An image with format and shape corresponding to the input. + * + * If the input image is in BGR format: + * + * .. code:: + * + * Y[0, :, :] = channelScale * X[0, :, :] + blueBias + * Y[1, :, :] = channelScale * X[1, :, :] + greenBias + * Y[2, :, :] = channelScale * X[2, :, :] + redBias + * + * If the input image is in RGB format: + * + * .. code:: + * + * Y[0, :, :] = channelScale * X[0, :, :] + redBias + * Y[1, :, :] = channelScale * X[1, :, :] + greenBias + * Y[2, :, :] = channelScale * X[2, :, :] + blueBias + * + * If the input image is in grayscale format: + * + * .. code:: + * + * Y[0, :, :] = channelScale * X[0, :, :] + grayBias + */ +message NeuralNetworkImageScaler { + + float channelScale = 10; // Scalar to be multiplied. + float blueBias = 20; // Scalar blue bias to be added. + float greenBias = 21; // Scalar green bias to be added. + float redBias = 22; // Scalar red bias to be added. + float grayBias = 30; // Scalar bias to be added for grayscale images. + +} + +/* + * A neural network preprocessor that + * subtracts the provided mean image from the input image. + * The mean image is subtracted from the input named + * ``NeuralNetworkPreprocessing.featureName``. + */ +message NeuralNetworkMeanImage { + + /* + * Mean image stored as a flattened array of floats, + * representing shape [Channel,Height,Width]. + */ + repeated float meanImage = 1; + +} + +// Preprocessing parameters for image inputs. +message NeuralNetworkPreprocessing { + + string featureName = 1; // must be equal to the input name to which the preprocessing is applied + oneof preprocessor { + NeuralNetworkImageScaler scaler = 10; + NeuralNetworkMeanImage meanImage = 11; + } + +} + +// Activation Functions +// -------------------- + +/* + * A rectified linear unit (ReLU) activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \text{max}(0, x) + */ +message ActivationReLU { + +} + +/* + * A leaky rectified linear unit (ReLU) activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \begin{cases} + * x & \text{if } x \geq 0 \\ + * \alpha x & \text{if } x < 0 + * \end{cases} + */ +message ActivationLeakyReLU { + + float alpha = 1; //negative slope value for leakyReLU + +} + +/* + * A hyperbolic tangent activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \dfrac{1 - e^{-2x}}{1 + e^{-2x}} + */ +message ActivationTanh { + +} + +/* + * A scaled hyperbolic tangent activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \alpha \tanh(\beta x) + */ +message ActivationScaledTanh { + + float alpha = 1; + float beta = 2; + +} + +/* + * A sigmoid activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \dfrac{1}{1 + e^{-x}} + */ +message ActivationSigmoid { + +} + +/* + * A linear activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \alpha x + \beta + */ +message ActivationLinear { + + float alpha = 1; + float beta = 2; + +} + +/* + * A hard sigmoid activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \text{min}(\text{max}(\alpha x + \beta, 0), 1) + */ +message ActivationSigmoidHard { + + float alpha = 1; + float beta = 2; + +} + +/* + * A parameterized rectified linear unit (PReLU) activation function. + * Input must be at least rank 3. Axis = -3 is denoted by "C", or channels. + * "alpha" parameter can be a vector of length C. + * + * This function has the following formula: + * + * .. math:: + * f(x_i) = \begin{cases} + * x_i & \text{if } x_i \geq 0 \\ + * \alpha_i x_i & \text{if } x_i < 0 + * \end{cases} \;,\;i=1,...,C + */ +message ActivationPReLU { + + // parameter of length C or 1. + // If length is 1, same value is used for all channels + WeightParams alpha = 1; + +} + +/* + * An exponential linear unit (ELU) activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \begin{cases} + * x & \text{if } x \geq 0 \\ + * \alpha (e^x - 1) & \text{if } x < 0 + * \end{cases} + */ +message ActivationELU { + + float alpha = 1; + +} + +/* + * A thresholded rectified linear unit (ReLU) activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \begin{cases} + * x & \text{if } x \geq \alpha \\ + * 0 & \text{if } x < \alpha + * \end{cases} + */ +message ActivationThresholdedReLU { + + float alpha = 1; + +} + +/* + * A softsign activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \dfrac{x}{1 + |x|} + */ +message ActivationSoftsign { + +} + +/* + * A softplus activation function. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \text{log}(1 + e^x) + */ +message ActivationSoftplus { + +} + +/* + * A parametric softplus activation function. + * Input must be at least rank 3. axis = -3 is denoted by "C", or channels. + * "alpha"/"beta" parameter can be a vector of length C. + * + * This function has the following formula: + * + * .. math:: + * f(x_i) = \alpha_i \text{log}(1 + e^{\beta_i x_i}) \;,\;i=1,...,C + */ +message ActivationParametricSoftplus { + + // If length is 1, same value is used for all channels + WeightParams alpha = 1; //parameter of length C or 1 + WeightParams beta = 2; //parameter of length C or 1 + +} + +message ActivationParams { + + oneof NonlinearityType { + ActivationLinear linear = 5; + + ActivationReLU ReLU = 10; + ActivationLeakyReLU leakyReLU = 15; + ActivationThresholdedReLU thresholdedReLU = 20; + ActivationPReLU PReLU = 25; + + ActivationTanh tanh = 30; + ActivationScaledTanh scaledTanh = 31; + + ActivationSigmoid sigmoid = 40; + ActivationSigmoidHard sigmoidHard = 41; + + ActivationELU ELU = 50; + + ActivationSoftsign softsign = 60; + ActivationSoftplus softplus = 70; + ActivationParametricSoftplus parametricSoftplus = 71; + } + +} + +/* + * Representation of the intermediate tensors + */ +message Tensor { + + // Number of dimensions in the tensor shape + uint32 rank = 1; + // actual value of the tensor shape. + // must be of length "rank". Can contain -1s for unknown dimensions. + repeated int64 dimValue = 2; + +} + +/* + * A single neural network layer. + */ +message NeuralNetworkLayer { + + string name = 1; //descriptive name of the layer + repeated string input = 2; + repeated string output = 3; + + repeated Tensor inputTensor = 4; // must be the same length as the "input" field + repeated Tensor outputTensor = 5; // must be the same length as the "output" field + + // Must be set to true to mark the layer as updatable. + // If true, the weightParams in the layer's properties must also be set to updatable + // If false, the value of the isUpdatable parameter within the layer's weights are ignored + bool isUpdatable = 10; + + oneof layer { + + // Start at 100 here + ConvolutionLayerParams convolution = 100; + + PoolingLayerParams pooling = 120; + + ActivationParams activation = 130; + + InnerProductLayerParams innerProduct = 140; + EmbeddingLayerParams embedding = 150; + + // Normalization-related Layers + BatchnormLayerParams batchnorm = 160; + MeanVarianceNormalizeLayerParams mvn = 165; + L2NormalizeLayerParams l2normalize = 170; + SoftmaxLayerParams softmax = 175; + LRNLayerParams lrn = 180; + + CropLayerParams crop = 190; + PaddingLayerParams padding = 200; + UpsampleLayerParams upsample = 210; + + ResizeBilinearLayerParams resizeBilinear = 211; + CropResizeLayerParams cropResize = 212; + + UnaryFunctionLayerParams unary = 220; + + // Element-wise Operations + AddLayerParams add = 230; + MultiplyLayerParams multiply = 231; + + AverageLayerParams average = 240; + ScaleLayerParams scale = 245; + + BiasLayerParams bias = 250; + MaxLayerParams max = 260; + MinLayerParams min = 261; + + DotProductLayerParams dot = 270; + ReduceLayerParams reduce = 280; + LoadConstantLayerParams loadConstant = 290; + + // Data Reorganization + ReshapeLayerParams reshape = 300; + FlattenLayerParams flatten = 301; + PermuteLayerParams permute = 310; + ConcatLayerParams concat = 320; + SplitLayerParams split = 330; + SequenceRepeatLayerParams sequenceRepeat = 340; + + ReorganizeDataLayerParams reorganizeData = 345; + SliceLayerParams slice = 350; + + // Recurrent Layers + SimpleRecurrentLayerParams simpleRecurrent = 400; + GRULayerParams gru = 410; + UniDirectionalLSTMLayerParams uniDirectionalLSTM = 420; + BiDirectionalLSTMLayerParams biDirectionalLSTM = 430; + + // Custom (user-implemented) Layer + CustomLayerParams custom = 500; + + // Following layers are available only after Core ML Specification + // version >= 4 (iOS >= 13, macOS >= 10.15) + + // Control Flow related Layers + CopyLayerParams copy = 600; + BranchLayerParams branch = 605; + + LoopLayerParams loop = 615; + LoopBreakLayerParams loopBreak = 620; + LoopContinueLayerParams loopContinue = 625; + + RangeStaticLayerParams rangeStatic = 635; + RangeDynamicLayerParams rangeDynamic = 640; + + // Element-wise Unary Layers + ClipLayerParams clip = 660; + CeilLayerParams ceil = 665; + FloorLayerParams floor = 670; + + SignLayerParams sign = 680; + RoundLayerParams round = 685; + + Exp2LayerParams exp2 = 700; + + SinLayerParams sin = 710; + CosLayerParams cos = 715; + TanLayerParams tan = 720; + + AsinLayerParams asin = 730; + AcosLayerParams acos = 735; + AtanLayerParams atan = 740; + + SinhLayerParams sinh = 750; + CoshLayerParams cosh = 755; + TanhLayerParams tanh = 760; + + AsinhLayerParams asinh = 770; + AcoshLayerParams acosh = 775; + AtanhLayerParams atanh = 780; + + ErfLayerParams erf = 790; + GeluLayerParams gelu = 795; + + // Element-wise Binary with Broadcasting Support + EqualLayerParams equal = 815; + NotEqualLayerParams notEqual = 820; + LessThanLayerParams lessThan = 825; + LessEqualLayerParams lessEqual = 827; + GreaterThanLayerParams greaterThan = 830; + GreaterEqualLayerParams greaterEqual = 832; + + LogicalOrLayerParams logicalOr = 840; + LogicalXorLayerParams logicalXor = 845; + LogicalNotLayerParams logicalNot = 850; + LogicalAndLayerParams logicalAnd = 855; + + ModBroadcastableLayerParams modBroadcastable = 865; + MinBroadcastableLayerParams minBroadcastable = 870; + MaxBroadcastableLayerParams maxBroadcastable = 875; + AddBroadcastableLayerParams addBroadcastable = 880; + PowBroadcastableLayerParams powBroadcastable = 885; + DivideBroadcastableLayerParams divideBroadcastable = 890; + FloorDivBroadcastableLayerParams floorDivBroadcastable = 895; + MultiplyBroadcastableLayerParams multiplyBroadcastable = 900; + SubtractBroadcastableLayerParams subtractBroadcastable = 905; + + // Tensor Manipulations + TileLayerParams tile = 920; + StackLayerParams stack = 925; + GatherLayerParams gather = 930; + ScatterLayerParams scatter = 935; + GatherNDLayerParams gatherND = 940; + ScatterNDLayerParams scatterND = 945; + SoftmaxNDLayerParams softmaxND = 950; + GatherAlongAxisLayerParams gatherAlongAxis = 952; + ScatterAlongAxisLayerParams scatterAlongAxis = 954; + + ReverseLayerParams reverse = 960; + ReverseSeqLayerParams reverseSeq = 965; + + SplitNDLayerParams splitND = 975; + ConcatNDLayerParams concatND = 980; + TransposeLayerParams transpose = 985; + + SliceStaticLayerParams sliceStatic = 995; + SliceDynamicLayerParams sliceDynamic = 1000; + SlidingWindowsLayerParams slidingWindows = 1005; + + TopKLayerParams topK = 1015; + ArgMinLayerParams argMin = 1020; + ArgMaxLayerParams argMax = 1025; + + EmbeddingNDLayerParams embeddingND = 1040; + BatchedMatMulLayerParams batchedMatmul = 1045; + + // Tensor Allocation / Reshape-related Operations + GetShapeLayerParams getShape = 1065; + LoadConstantNDLayerParams loadConstantND = 1070; + + FillLikeLayerParams fillLike = 1080; + FillStaticLayerParams fillStatic = 1085; + FillDynamicLayerParams fillDynamic = 1090; + + BroadcastToLikeLayerParams broadcastToLike = 1100; + BroadcastToStaticLayerParams broadcastToStatic = 1105; + BroadcastToDynamicLayerParams broadcastToDynamic = 1110; + + SqueezeLayerParams squeeze = 1120; + ExpandDimsLayerParams expandDims = 1125; + FlattenTo2DLayerParams flattenTo2D = 1130; + ReshapeLikeLayerParams reshapeLike = 1135; + ReshapeStaticLayerParams reshapeStatic = 1140; + ReshapeDynamicLayerParams reshapeDynamic = 1145; + RankPreservingReshapeLayerParams rankPreservingReshape = 1150; + + ConstantPaddingLayerParams constantPad = 1155; + + // Random Distributions + RandomNormalLikeLayerParams randomNormalLike = 1170; + RandomNormalStaticLayerParams randomNormalStatic = 1175; + RandomNormalDynamicLayerParams randomNormalDynamic = 1180; + + RandomUniformLikeLayerParams randomUniformLike = 1190; + RandomUniformStaticLayerParams randomUniformStatic = 1195; + RandomUniformDynamicLayerParams randomUniformDynamic = 1200; + + RandomBernoulliLikeLayerParams randomBernoulliLike = 1210; + RandomBernoulliStaticLayerParams randomBernoulliStatic = 1215; + RandomBernoulliDynamicLayerParams randomBernoulliDynamic = 1220; + + CategoricalDistributionLayerParams categoricalDistribution = 1230; + + // Reduction-related Layers: + ReduceL1LayerParams reduceL1 = 1250; + ReduceL2LayerParams reduceL2 = 1255; + ReduceMaxLayerParams reduceMax = 1260; + ReduceMinLayerParams reduceMin = 1265; + ReduceSumLayerParams reduceSum = 1270; + ReduceProdLayerParams reduceProd = 1275; + ReduceMeanLayerParams reduceMean = 1280; + ReduceLogSumLayerParams reduceLogSum = 1285; + ReduceSumSquareLayerParams reduceSumSquare = 1290; + ReduceLogSumExpLayerParams reduceLogSumExp = 1295; + + // Masking / Selection Layers + WhereNonZeroLayerParams whereNonZero = 1313; + MatrixBandPartLayerParams matrixBandPart = 1315; + LowerTriangularLayerParams lowerTriangular = 1320; + UpperTriangularLayerParams upperTriangular = 1325; + WhereBroadcastableLayerParams whereBroadcastable = 1330; + + // Normalization Layers + LayerNormalizationLayerParams layerNormalization = 1350; + + NonMaximumSuppressionLayerParams NonMaximumSuppression = 1400; + + // Following layers are available only after Core ML Specification + // version >= 5 (iOS >= 14, macOS >= 11.0) + OneHotLayerParams oneHot = 1450; + CumSumLayerParams cumSum = 1455; + ClampedReLULayerParams clampedReLU = 1460; + ArgSortLayerParams argSort = 1461; + Pooling3DLayerParams pooling3d = 1465; + GlobalPooling3DLayerParams globalPooling3d = 1466; + SliceBySizeLayerParams sliceBySize = 1470; + Convolution3DLayerParams convolution3d = 1471; + + } + +} + +/* + * Branching Layer + * + * A layer that provides the functionality of branching or an If-Else block. + * + * Must have 1 input. There are no outputs as the execution is transferred to either the + * if or the else branch based on the value of the input. + * + * Input is the condition predicate. Must be a scalar (length 1 tensor). + * + */ +message BranchLayerParams { + + /* + * execute this graph if the absolute value of the input Tensor is greater than 1e-6 + * This must be present. + */ + NeuralNetwork ifBranch = 1; + /* + * execute this graph if the absolute value of the input Tensor is less than 1e-6 + * This is optional. + */ + NeuralNetwork elseBranch = 2; + +} + +/* + * Loop Layer + * + * A layer that provides the functionality of a "for" loop or a "while" loop. + * + * There are either no inputs or 1 input. When an input is present, it corresponds to the maximum loop count, + * in that case the value of the "maxLoopIterations" field is ignored. Input must be a scalar. + * (For description below, maxLoopIterations is assumed to be the value of the input, when its present) + * + * No outputs are produced. Blobs produced by the condition or the body network are visible in the scope of the overall network. + * + * "conditionNetwork" must produce a tensor with the name specified in the "conditionVar" field. + * + * There are 3 possible cases for determining the termination condition: + * + * Case 1: + * + * If there is no "conditionNetwork", in this case the layer corresponds to a pure for loop, which is run "maxLoopIterations" number of times. + * Equivalent pseudo-code: + * + * for loopIterator = 0 : maxLoopIterations + * bodyNetwork() + * + * + * Case 2: + * + * "conditionNetwork" is present, and "maxLoopIterations" is 0 and there is no input, + * in this case the layer corresponds to a while loop. Equivalent pseudo-code: + * + * conditionVar = conditionNetwork() + * while conditionVar: + * bodyNetwork() + * conditionVar = conditionNetwork() + * + * + * Case 3: + * + * "conditionNetwork" is provided, and "maxLoopIterations" is positive or there is an input, + * in this case the layer corresponds to a while loop with a joint condition. Equivalent pseudo-code: + * + * loopIterator = 0 + * conditionVar = conditionNetwork() + * while (conditionVar and loopIterator < maxLoopIterations): + * bodyNetwork() + * loopIterator = loopIterator + 1 + * conditionVar = conditionNetwork() + * + */ +message LoopLayerParams { + + /* + * maximum number of iterations. Ignored if input is present. + */ + uint64 maxLoopIterations = 1; + /* + * This field provides the name of the tensor which is produced by the conditionNetwork + * and whose value is checked to start/continue/terminate the loop. Value close to 0.0f is treated as False. + * This field is optional. + * Must be a non empty string if and only if "conditionNetwork" is present. + */ + string conditionVar = 2; + /* + * Must generate a tensor with the name provided in the "conditionVar" field. + * This field is optional. + * Must be present if and only if "conditionVar" field is a non empty string. + */ + NeuralNetwork conditionNetwork = 3; + /* + * Body of the loop. + * This field must be present. + */ + NeuralNetwork bodyNetwork = 4; + +} + +/* + * Loop break Layer + * + * Terminate the loop that has this layer. + * If present, it should always reside in the "bodyNetwork" of the loop layer + * + * No inputs/outputs + * + */ +message LoopBreakLayerParams { + +} + +/* + * Loop Continue Layer + * + * Stop the current loop iteration and continue on the next iteration. + * If present, it should always reside in the "bodyNetwork" of the loop layer + * + * No inputs/outputs + * + */ +message LoopContinueLayerParams { + +} + +/* + * Copy Layer + * + * A layer that copies its input tensor to the output tensor. + * Must have 1 input and 1 output, with distinct names. + * This is the only layer that is allowed to re-generate an output that is already present in the neural network prior to this layer, + * in which case it will overwrite the output tensor. + * + */ +message CopyLayerParams { + +} + +/* + * GreaterThan Layer + * + * Either 1 or 2 inputs. + * Produces 1 output. + * Perform elementwise greater than operation. + * + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = x1 > x2 + * or + * y = x1 > alpha, if only one input is provided + * + * Broadcasting is supported. + * + */ +message GreaterThanLayerParams { + + /* + * Compare to the scalar value provided here if there is 1 input + */ + float alpha = 2; + +} + +/* + * GreaterEqual Layer + * + * Either 1 or 2 inputs. + * Produces 1 output. + * Perform elementwise greater equal operation. + * + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = x1 >= x2 + * or + * y = x1 >= alpha, if only one input is provided + * + * Broadcasting is supported. + * + */ +message GreaterEqualLayerParams { + + /* + * Compare to the scalar value provided here if there is 1 input + */ + float alpha = 2; + +} + +/* + * LessThan Layer + * + * Either 1 or 2 inputs. + * Produces 1 output. + * Perform elementwise less than operation. + * + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = x1 < x2 + * or + * y = x1 < alpha, if only one input is provided + * + * Broadcasting is supported. + * + */ +message LessThanLayerParams { + + /* + * Compare to the scalar value provided here if there is 1 input + */ + float alpha = 2; + +} + +/* + * LessEqual Layer + * + * Either 1 or 2 inputs. + * Produces 1 output. + * Perform elementwise less equal operation. + * + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = x1 <= x2 + * or + * y = x1 <= alpha, if only one input is provided + * + * Broadcasting is supported. + * + */ +message LessEqualLayerParams { + + /* + * Compare to the scalar value provided here if there is 1 input + */ + float alpha = 2; + +} + +/* + * Equal Layer + * + * Either 1 or 2 inputs. + * Produces 1 output. + * Perform elementwise equal operation. + * + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = x1 == x2 + * or + * y = x1 == alpha, if only one input is provided + * + * Broadcasting is supported. + * + */ +message EqualLayerParams { + + /* + * Compare to the scalar value provided here if there is 1 input + */ + float alpha = 1; + +} + +/* + * NotEqual Layer + * + * Either 1 or 2 inputs. + * Produces 1 output. + * Perform elementwise not equal operation. + * + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = x1 != x2 + * or + * y = x1 != alpha, if only one input is provided + * + * Broadcasting is supported. + * + */ +message NotEqualLayerParams { + + /* + * Compare to the scalar value provided here if there is 1 input + */ + float alpha = 1; + +} + +/* + * LogicalAnd Layer + * + * Must have 2 inputs, produces 1 output. + * Perform elementwise logical AND operation. + * + * Input is considered False if equal to 0.0f otherwise True. + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = AND(x1, x2) + * + * Broadcasting is supported. + * + */ +message LogicalAndLayerParams { + +} + +/* + * LogicalOr Layer + * + * Must have 2 inputs, produces 1 output. + * Perform elementwise logical OR operation. + * + * Input is considered False if equal to 0.0f otherwise True. + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = OR(x1, x2) + * + * Broadcasting is supported. + * + */ +message LogicalOrLayerParams { + +} + +/* + * LogicalXor Layer + * + * Must have 2 inputs, produces 1 output. + * Perform elementwise logical XOR operation. + * + * Input is considered False if equal to 0.0f otherwise True. + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = XOR(x1, x2) + * + * Broadcasting is supported. + * + */ +message LogicalXorLayerParams { + +} + +/* + * LogicalNot Layer + * + * Must have 1 input, produces 1 output. + * Perform elementwise logical NOT operation. + * + * Input is considered False if equal to 0.0f otherwise True. + * Output is 1.0f if the condition is true otherwise 0.0f. + * + * .. code:: + * + * y = NOT(x) + * + * + */ +message LogicalNotLayerParams { + +} + +// Border Amounts +// -------------- + +/* + * Specifies the amount of spatial border to be either padded or cropped. + * + * For padding: + * + * .. code:: + * + * H_out = borderAmounts[0].startEdgeSize + H_in + borderAmounts[0].endEdgeSize + * W_out = borderAmounts[1].startEdgeSize + W_in + borderAmounts[1].endEdgeSize + * + * topPaddingAmount == Height startEdgeSize + * bottomPaddingAmount == Height endEdgeSize + * leftPaddingAmount == Width startEdgeSize + * rightPaddingAmount == Width endEdgeSize + * + * For cropping: + * + * .. code:: + * + * H_out = (-borderAmounts[0].startEdgeSize) + H_in + (-borderAmounts[0].endEdgeSize) + * W_out = (-borderAmounts[1].startEdgeSize) + W_in + (-borderAmounts[1].endEdgeSize) + * + * topCropAmount == Height startEdgeSize + * bottomCropAmount == Height endEdgeSize + * leftCropAmount == Width startEdgeSize + * rightCropAmount == Width endEdgeSize + */ +message BorderAmounts { + + message EdgeSizes { + /* + * The amount to be padded or cropped from the beginning. + */ + uint64 startEdgeSize = 1; + + /* + * The amount to be padded or cropped from the end. + */ + uint64 endEdgeSize = 2; + } + + /* + * The border amounts. + * This must be length 2 in the order ``[H, W]``. + */ + repeated EdgeSizes borderAmounts = 10; + +} + +/* + * Specifies the type of padding to be used with Convolution/Deconvolution and Pooling layers. + * After padding, input spatial shape: ``[H_in, W_in]``, gets modified to the + * output spatial shape ``[H_out, W_out]``. + * + * .. code:: + * + * topPaddingAmount == Height startEdgeSize == borderAmounts[0].startEdgeSize + * bottomPaddingAmount == Height endEdgeSize == borderAmounts[0].endEdgeSize + * leftPaddingAmount == Width startEdgeSize == borderAmounts[1].startEdgeSize + * rightPaddingAmount == Width endEdgeSize == borderAmounts[1].endEdgeSize + * + * With Convolution or Pooling: + * + * .. code:: + * + * H_out = int_division_round_down((H_in + topPaddingAmount + bottomPaddingAmount - KernelSize[0]),stride[0]) + 1 + * + * which is same as: + * + * .. code:: + * + * H_out = int_division_round_up((H_in + topPaddingAmount + bottomPaddingAmount - KernelSize[0] + 1),stride[0]) + * + * With Deconvolution: + * + * .. code:: + * + * H_out = (H_in-1) * stride[0] + kernelSize[0] - (topPaddingAmount + bottomPaddingAmount) + * + * + * The equivalent expressions hold true for ``W_out`` as well. + * + * + * By default, the values of ``paddingAmounts`` are set to ``0``, + * which results in a "true" valid padding. + * If non-zero values are provided for ``paddingAmounts``, + * "valid" convolution/pooling is performed within the spatially expanded input. + * + */ +message ValidPadding { + + BorderAmounts paddingAmounts = 1; + +} + +/* + * Specifies the type of padding to be used with Convolution/Deconvolution and pooling layers. + * After padding, input spatial shape: ``[H_in, W_in]``, gets modified to the + * output spatial shape ``[H_out, W_out]``. + * With Convolution or pooling: + * + * .. code:: + * + * H_out = int_division_round_up(H_in,stride[0]) + * W_out = int_division_round_up(W_in,stride[1]) + * + * This is achieved by using the following padding amounts: + * + * .. code:: + * + * totalPaddingHeight = max(0,(H_out-1) * stride[0] + KernelSize[0] - Hin) + * totalPaddingWidth = max(0,(W_out-1) * stride[1] + KernelSize[1] - Win) + * + * There are two modes of asymmetry: + * ``BOTTOM_RIGHT_HEAVY``, and ``TOP_LEFT_HEAVY``. + * + * If the mode is ``BOTTOM_RIGHT_HEAVY``: + * + * .. code:: + * + * topPaddingAmount = floor(totalPaddingHeight / 2) + * bottomPaddingAmount = totalPaddingHeight - topPaddingAmount + * leftPaddingAmount = floor(totalPaddingWidth / 2) + * rightPaddingAmount = totalPaddingWidth - leftPaddingAmount + * + * If the mode is ``TOP_LEFT_HEAVY``: + * + * .. code:: + * + * bottomPaddingAmount = floor(totalPaddingHeight / 2) + * topPaddingAmount = totalPaddingHeight - bottomPaddingAmount + * rightPaddingAmount = floor(totalPaddingWidth / 2) + * leftPaddingAmount = totalPaddingWidth - rightPaddingAmount + * + * + * With Deconvolution: + * + * .. code:: + * + * H_out = H_in * stride[0] + * W_out = W_in * stride[1] + */ +message SamePadding { + + enum SamePaddingMode { + + BOTTOM_RIGHT_HEAVY = 0; + TOP_LEFT_HEAVY = 1; + + } + SamePaddingMode asymmetryMode = 1; + +} + +/* + * Specifies how grid points are sampled from an interval. + * Without the loss of generality, assume the interval to be [0, X-1] from which N points are to be sampled. + * Here X may correspond to an input image's height or width. + * All the methods can be expressed in terms of numpy's linspace function, along with the constraint that grid points have to lie in the interval [0, X-1]. + * Note: numpy.linspace(start = start, end = end, num = N, endpoint = True) corresponds to sampling + * N points uniformly from the interval [start, end], endpoints included. + * The methods vary in how the ``start`` and ``end`` values are computed. + */ +message SamplingMode { + + enum Method { + + /* + * start = 0, end = X-1 + * grid points = numpy.linspace(start, end) + */ + STRICT_ALIGN_ENDPOINTS_MODE = 0; + + /* + * if N == 1: start = end = (X-1)/2 + * otherwise, start = 0, end = X-1 + * grid points = numpy.linspace(start, end) + */ + ALIGN_ENDPOINTS_MODE = 1; + + /* + * start = 0, end = X - X/N + * grid points = min(X-1, numpy.linspace(start, end)) + * This is same as the mode used in the upsample layer in this specification, when used with bilinear interpolation. In that case N/X = upsample ratio. + */ + UPSAMPLE_MODE = 2; + + /* + * spacing = max(1, X-1)/N + * start = 0.5 * spacing + * end = start + (N-1) * spacing + * grid points = min(X-1, numpy.linspace(start, end)) + */ + ROI_ALIGN_MODE = 3; + + } + + Method samplingMethod = 1; + +} + +/* + * Specifies the convention used to specify four bounding box coordinates for an image of size (Height, Width). + * The (0,0) coordinate corresponds to the top-left corner of the image. + */ +message BoxCoordinatesMode { + + enum Coordinates { + + /* + * [h_start, w_start, h_end, w_end] + */ + CORNERS_HEIGHT_FIRST = 0; + + /* + * [w_start, h_start, w_end, h_end] + */ + CORNERS_WIDTH_FIRST = 1; + + /* + * [h_center, w_center, box_height, box_width] + */ + CENTER_SIZE_HEIGHT_FIRST = 2; + + /* + * [w_center, h_center, box_width, box_height] + */ + CENTER_SIZE_WIDTH_FIRST = 3; + + } + + Coordinates boxMode = 1; + +} + +/* + * Weights for layer parameters. + * Weights are stored as repeated floating point numbers + * using row-major ordering + * and can represent 1-, 2-, 3-, or 4-dimensional data. + */ +message WeightParams { + + /* + * Values specified in single / float / FP32 precision. + */ + repeated float floatValue = 1; + + /* + * Values in 16-bit half precision floating point. + */ + bytes float16Value = 2; + + /* + * Raw value specification for quantized lower precisions. + * + * This field is interpreted as uintN, where N is the number of bits in quantization. + * E.g. if n=8, the field is interpreted as an array of UINT8. + * Use this field for quantized parameters unless specifically noted to use + * int8RawValue. + */ + bytes rawValue = 30; + + /* + * Field to be used if int8DynamicQuantize is set in the parent layer. + * Cannot be set if rawValue is also set. + * The values in this field are interpreted as INT8. + * + * If this field is set, following conditions must hold true: + * * QuantizationType == LinearQuantizationParams, such that + * * size of the "scale" field is 1 and "bias" field is empty in "LinearQuantizationParams" + */ + bytes int8RawValue = 31; + + /* + * Quantization related parameters. + */ + QuantizationParams quantization = 40; + + bool isUpdatable = 50; + +} + +/* + * Quantization parameters. + */ +message QuantizationParams { + + uint64 numberOfBits = 1; + oneof QuantizationType { + LinearQuantizationParams linearQuantization = 101; + LookUpTableQuantizationParams lookupTableQuantization = 102; + } + +} + +message LinearQuantizationParams { + + /* + * Stores scale and bias values corresponding to the quantized weights. + * Must be an array of 1 element, or an array of C elements, where C + * is number of output channels. For recurrent layers it is equal to + * the output vector size. + * + * Relationship between quantized weights, unquantized weights, scale and bias: + * + * W_unquantized = W_quantized * scale + bias + * + */ + repeated float scale = 1; + repeated float bias = 2; + +} + +message LookUpTableQuantizationParams { + + /* Stores look-up table quantization values. Must be an array of + (2^numberOfBits) Elements. + */ + repeated float floatValue = 1; + +} + +// Layers +// ------ + +/* + * A layer that performs spatial convolution or deconvolution. + * + * .. code:: + * + * y = ConvolutionLayer(x) + * + * Requires 1 or 2 inputs and produces 1 output. + * + * Input + * First Input: + * A blob with rank greater than or equal to 4. + * Rank 4 blob represents [Batch, channels, height, width]. + * For ranks greater than 4, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * + * From Core ML specification version 4 onwards (iOS >= 13, macOS >= 10.15). + * convolution layer can have 2 inputs, in which case the second input is + * the blob representing the weights. This is allowed when "isDeconvolution" = False. + * The weight blob should have shape + * ``[outputChannels, kernelChannels, kernelHeight, kernelWidth]``, + * where kernelChannels == inputChannels / nGroups. + * + * Output + * Rank is same as the input. e.g.: for rank 4 input, output shape is [B, C_out, H_out, W_out] + * + * + * If ``dilationFactor`` is not 1, effective kernel size is + * modified as follows: + * + * .. code:: + * + * KernelSize[0] <-- (kernelSize[0]-1) * dilationFactor[0] + 1 + * KernelSize[1] <-- (kernelSize[1]-1) * dilationFactor[1] + 1 + * + * Type of padding can be ``valid`` or ``same``. Output spatial dimensions depend on the + * the type of padding. For details, refer to the descriptions of the messages "ValidPadding" + * and "SamePadding". Padded values are all zeros. + * + * For Deconvolution, ``ConvolutionPaddingType`` (``valid`` or ``same``) is ignored when ``outputShape`` is set. + * + * + */ +message ConvolutionLayerParams { + + /* + * The number of kernels. + * Same as ``C_out`` used in the layer description. + */ + uint64 outputChannels = 1; + + /* + * Channel dimension of the kernels. + * Must be equal to ``inputChannels / nGroups``, if isDeconvolution == False + * Must be equal to ``inputChannels``, if isDeconvolution == True + */ + uint64 kernelChannels = 2; + + /* + * Group convolution, i.e. weight reuse along channel axis. + * Input and kernels are divided into g groups + * and convolution / deconvolution is applied within the groups independently. + * If not set or 0, it is set to the default value 1. + */ + uint64 nGroups = 10; + + /* + * Must be length 2 in the order ``[H, W]``. + * If not set, default value ``[3, 3]`` is used. + */ + repeated uint64 kernelSize = 20; + + /* + * Must be length 2 in the order ``[H, W]``. + * If not set, default value ``[1, 1]`` is used. + */ + repeated uint64 stride = 30; + + /* + * Must be length 2 in order ``[H, W]``. + * If not set, default value ``[1, 1]`` is used. + * It is ignored if ``isDeconvolution == true``. + */ + repeated uint64 dilationFactor = 40; + + /* + * The type of padding. + */ + oneof ConvolutionPaddingType { + ValidPadding valid = 50; + SamePadding same = 51; + } + + /* + * Flag to specify whether it is a deconvolution layer. + */ + bool isDeconvolution = 60; + + /* + * Flag to specify whether a bias is to be added or not. + */ + bool hasBias = 70; + + /* + * Weights associated with this layer. + * If convolution (``isDeconvolution == false``), weights have the shape + * ``[outputChannels, kernelChannels, kernelHeight, kernelWidth]``, where kernelChannels == inputChannels / nGroups + * If deconvolution (``isDeconvolution == true``) weights have the shape + * ``[kernelChannels, outputChannels / nGroups, kernelHeight, kernelWidth]``, where kernelChannels == inputChannels + */ + WeightParams weights = 90; + WeightParams bias = 91; // Must be of size [outputChannels]. + + /* + * The output shape, which has length 2 ``[H_out, W_out]``. + * This is used only for deconvolution (``isDeconvolution == true``). + * If not set, the deconvolution output shape is calculated + * based on ``ConvolutionPaddingType``. + */ + repeated uint64 outputShape = 100; + +} + +/* + * A layer that performs a 3-dimensional convolution. + * + * .. code:: + * + * y = Convolution3DLayer(x) + * + * Input + * A blob of rank 5. + * The input blob's shape should be ``[batch, channels, depth, height, width]``. + * + * Fields + * The bias field, if set, should have shape of ``[channelsOut]``. + * + * Output + * A blob of rank 5. + * The output blob's shape is ``[batch, channelsOut, depthOut, heightOut, widthOut]``. + * + * Type of padding can be ``custom``, ``valid``, or ``same``. Padded values are all zeros. + * Output spatial dimensions depend on the the type of padding. For details, refer to the + * descriptions of the ``PaddingType`` field of this ``Convolution3DLayerParams`` message. + * + * Example + * For example, given an input of size ``[1, 3, 3, 8, 8]``, a stride of 2 in each dimension, + * a kernel of 3 in each dimension, 2 output channels, and ``same`` padding, this layer will + * compute the total padding applied in the depth, height, and width dimensions to be 2, 1, and 1, + * respectively. The depth padding is even and will be applied equally to both sides of the depth + * dimension. Since the height and width padding values are odd, they'll be applied to the + * bottom/right of the height/width dimensions. Thus, the padding applied to the input will be + * ``[1, 1, 0, 1, 0, 1]`` (front, back, top, bottom, left, right). Finally, the output produced + * will have size ``[1, 2, 2, 4, 4]``. + * + */ +message Convolution3DLayerParams { + + /* + * The number of channels in the output (channelsOut). Must be a positive integer. + */ + int32 outputChannels = 1; + + /* + * The number of channels in the input (channels). Must be a positive integer. + */ + int32 inputChannels = 2; + + /* + * Group convolution, i.e., weight reuse along the channel axis. + * It must evenly divide both the number of input and output channels and be at most the number + * of input channels (a depthwise convolution). + * Input and kernels are divided into g groups and convolution is applied within the groups + * independently. + */ + int32 nGroups = 10; + + /* Depth of the convolution kernel. Must be a positive integer. + */ + int32 kernelDepth = 20; + + /* Height of the convolution kernel. Must be a positive integer. + */ + int32 kernelHeight = 21; + + /* Width of the convolution kernel. Must be a positive integer. + */ + int32 kernelWidth = 22; + + /* Stride along the depth direction. Must be a positive integer. + */ + int32 strideDepth = 31; + + /* Stride along the height direction. Must be a positive integer. + */ + int32 strideHeight = 32; + + /* Stride along the width direction. Must be a positive integer. + */ + int32 strideWidth = 33; + + /* Dilation along the depth direction. Must be a positive integer. + */ + int32 dilationDepth = 40; + + /* Dilation along the height direction. Must be a positive integer. + */ + int32 dilationHeight = 41; + + /* Dilation along the width direction. Must be a positive integer. + */ + int32 dilationWidth = 42; + + /* + * Flag to specify whether a bias is to be added or not. + * If false, then no bias is added. + */ + bool hasBias = 50; + + /* + * Weights associated with this layer. + * Weights have the shape + * if deconvolution == False + * ``[outputChannels, kernelChannels, kernelDepth, kernelHeight, kernelWidth]``, where + * kernelChannels == inputChannels / nGroups + * else if deconvolution == True + * ``[outputChannels / nGroups, kernelChannels, kernelDepth, kernelHeight, kernelWidth]``, where + */ + WeightParams weights = 60; + + /* + * Must be of size ``[outputChannels]``. + */ + WeightParams bias = 61; + + + /* + * The type of padding. + * All padding types pad the input shape with zeros. + * CUSTOM padding will add the custom padding values specified below to their respective + * dimensions, e.g., `customPaddingFront` number of zeros will be added to one side of the + * input's depth dimension and `customPaddingBack` number of zeros will be added to the other + * side of the input's depth dimension. + * VALID padding adds no padding to any dimension. In this case, the last convolution along + * each dimension will be dropped if the input dimension and the kernel size, stride, and + * dilation do not match. + * SAME padding adds enough padding to each dimension such that the output of the convolution + * has size ``Ceiling(inputShape / stride)``. Padding is added evenly to both sides of each + * dimension unless the total padding to add is odd, in which case it is added to the + * back/bottom/right side of the respective dimension. For example, if the total padding needed + * in the depth dimension is 3, 1 zero will be added to the front side of the depth dimension + * and 2 zeros will be added to the back side. + */ + enum PaddingType { + CUSTOM = 0; + VALID = 1; + SAME = 2; + } + PaddingType paddingType = 70; + + /* Padding before the input in the depth direction. Must be zero or a positive integer. + * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types. + */ + int32 customPaddingFront = 80; + + /* Padding after the input in the depth direction. Must be zero or a positive integer. + * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types. + */ + int32 customPaddingBack = 81; + + /* Padding before the input in the height direction. Must be zero or a positive integer. + * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types. + */ + int32 customPaddingTop = 82; + + /* Padding after the input in the height direction. Must be zero or a positive integer. + * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types. + */ + int32 customPaddingBottom = 83; + + /* Padding before the input in the width direction. Must be zero or a positive integer. + * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types. + */ + int32 customPaddingLeft = 84; + + /* Padding after the input in the width direction. Must be zero or a positive integer. + * Used when the `PaddingType` is `CustomPadding`, otherwise ignored by other padding types. + */ + int32 customPaddingRight = 85; + + /* Flag to specify if this is Convolution Transpose or not. + */ + bool isDeconvolution = 86; + + /* + * The output shape, which has length 3 ``[D_out, H_out, W_out]``. + * This is used only for deconvolution (``isDeconvolution == true``). + * If not set, the deconvolution output shape is calculated + * based on ``PaddingType``. + */ + repeated uint64 outputShape = 87; + +} + +/* + * A layer that performs a matrix-vector or matrix-matrix product. + * This is equivalent to a fully-connected, or dense layer. + * The weight parameters correspond to a matrix of dimensions (inputChannels, outputChannels) i.e. (C_in, C_out) + * + * .. code:: + * + * y = InnerProductLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * Input can have rank 1 to rank 5. This is how it is reshaped in to the matrix (for rank > 1): + * rank 1 (x1) : in this case, the layer corresponds to a matrix-vector product. x1 must be equal to C_in + * rank 2 (x1, x2): x2 must be equal to C_in + * rank 3 (x1, x2, x3) --> (x1 * x2, x3). x3 must be equal to C_in + * rank 4 (x1, x2, x3, x4) ---> (x1, x2 * x3 * x4). x2 * x3 * x4 must be equal to C_in + * rank 5 (x1, x2, x3, x4, x5) ---> (x1 * x2, x3 * x4 * x5). x3 * x4 * x5 must be equal to C_in + * + * Output + * Output rank is same as the input rank + * rank 1: (C_out) + * rank 2: (x1, C_out) + * rank 3: (x1, x2, C_out) + * rank 4: (x1, C_out, 1, 1) + * rank 5: (x1, x2, C_out, 1, 1) + * + */ +message InnerProductLayerParams { + + uint64 inputChannels = 1; // Input size: C_in. + uint64 outputChannels = 2; // Output size: C_out. + + bool hasBias = 10; // Whether a bias is added or not. + + WeightParams weights = 20; // Weight matrix [C_out, C_in]. + WeightParams bias = 21; // Bias vector [C_out]. + + /* + * If set, this layer, at runtime, quantizes the floating point input blob to int8 before applying an + * inner product using INT8 weight matrix parameters, as provided in weights->int8RawValue. The + * result is then dequantized. + * Requires: + * * hasBias == false + * * QuantizationType == LinearQuantizationParams, such that + * * size of the "scale" field is 1 and "bias" field is empty in "LinearQuantizationParams" + * * numberOfBits == 8 + * * weights->rawValue_size to be empty + */ + bool int8DynamicQuantize = 22; + +} + +/* + * A layer that performs a matrix lookup and optionally adds a bias. + * The weights matrix is stored with dimensions [outputChannels, inputDim]. + * + * .. code:: + * + * y = EmbeddingLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * Input values must be in the range ``[0, inputDim - 1]``. + * + * Input must have rank equal to 4 or 5, such that the last 3 dimensions are all 1. + * rank 4: shape (x1, 1, 1, 1). x1 is effectively the batch/sequence length. + * rank 5: shape (x1, x2 , 1, 1, 1). x1 * x2 is effectively the combined batch/sequence length. + * + * Output + * Output rank is same as the input rank. Please see input description above. + * rank 4: shape (x1, outputChannels, 1, 1) + * rank 5: shape (x1, x2, outputChannels, 1, 1) + * + */ +message EmbeddingLayerParams { + + uint64 inputDim = 1; // Size of the input dictionary. + uint64 outputChannels = 2; // Size of the output vectors. + + bool hasBias = 10; // Whether a bias is added or not. + + WeightParams weights = 20; // 2-D weights of dimensions [outputChannels, inputDim]. + WeightParams bias = 21; // Bias of size [outputChannels]. + +} + +/* + * A layer that performs a matrix lookup and optionally adds a bias. + * The weights matrix is stored with dimensions [embeddingSize, vocabSize]. + * + * .. code:: + * + * y = EmbeddingNDLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * Input values must be in the range ``[0, vocabSize - 1]``. + * Input must have rank at least 2. The last dimension must always be 1. + * rank 2: shape (x1, 1). x1 is the batch/sequence length. + * rank 3: shape (x1, x2, 1). x1 * x2 is effectively the combined batch/sequence length. + * rank 4: shape (x1, x2, x3, 1). x1 * x2 * x2 is effectively the combined batch/sequence length. + * rank 5: shape (x1, x2 , x3, x4, 1). x1 * x2 * x3 * x4 is effectively the combined batch/sequence length. + * + * Output + * Output rank is same as the input rank. Please see input description above. + * rank 2: shape (x1, embeddingSize) + * rank 3: shape (x1, x2, embeddingSize) + * rank 4: shape (x1, x2, x3, embeddingSize) + * rank 5: shape (x1, x2, x3, x4, embeddingSize) + * + */ +message EmbeddingNDLayerParams { + + uint64 vocabSize = 1; // Size of the input dictionary. + uint64 embeddingSize = 2; // Size of the output vectors. + bool hasBias = 3; // Whether a bias is added or not. + WeightParams weights = 20; // 2-D weights of dimensions [embeddingSize, vocabSize]. + WeightParams bias = 21; // Bias of size [embeddingSize]. + +} + +/* + * A layer that performs batch normalization, + * which is performed along axis = -3, + * and repeated along the other axes, if present. + * + * .. code:: + * + * y = BatchnormLayer(x) + * + * Requires 1 input and produces 1 output. + * + * This operation is described by the following formula: + * + * .. math:: + * y_i = \gamma_i \dfrac{ (x_i - \mu_i)}{\sqrt{\sigma_i^2 + \epsilon}} + \beta_i \;,\;i=1,....,C + * + * Input + * A blob with rank greater than equal to 3. + * Example: Rank 4 blob represents [Batch, channels, height, width] + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * + * Output + * A blob with the same shape as the input. + */ +message BatchnormLayerParams { + + uint64 channels = 1; // Size of the channel dimension in the input. + + /* + * If ``computeMeanVar == true``, + * the mean and variance are calculated from either + * the single input instance, if ``instanceNormalization == true``, + * or the whole batch, if ``instanceNormalization = false``. + * and the values provided in parameters "mean" and "variance" are ignored. + */ + bool computeMeanVar = 5; + bool instanceNormalization = 6; + + /* + * A small constant to avoid division by 0 while normalizing by variance. + * Defaults to ``1e-5`` if not set or set to ``0``. + */ + float epsilon = 10; + + WeightParams gamma = 15; // Parameter of length [channels] + WeightParams beta = 16; // Parameter of length [channels] + WeightParams mean = 17; // Parameter of length [channels] + WeightParams variance = 18; // Parameter of length [channels] + +} + +/* + * A spatial pooling layer. + * + * .. code:: + * + * y = PoolingLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank greater than equal to 4. + * Rank 4 blob represents [Batch, channels, height, width] + * For ranks greater than 4, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * + * Output + * Rank is same as the input. e.g.: for rank 4 input, output shape is [B, C, H_out, W_out] + * + * Padding options are similar to ``ConvolutionLayerParams`` + * with the additional option of ``ValidCompletePadding`` (``includeLastPixel``), + * which ensures that the last application of the kernel + * always includes the last pixel of the input image, if there is padding. + * + * .. code:: + * + * H_out = ceil(float(H_in + 2 * paddingAmounts[0] - kernelSize[0])/float(Stride[0])) + 1 + * if (paddingAmounts[0] > 0 or paddingAmounts[1] > 0) + * if ((H_out - 1) * Stride >= H_in + paddingAmounts[0]) { + * H_out = H_out - 1 + * } + * } + * + * The equivalent expressions hold true for ``W_out`` as well. + * Only symmetric padding is supported with this option. + */ +message PoolingLayerParams { + + enum PoolingType { + + MAX = 0; + AVERAGE = 1; + L2 = 2; + + } + PoolingType type = 1; // Type of pooling operation. + + /* + * Must be length 2 in the order ``[H, W]``. + * If not set, default value ``[3, 3]`` is used. + */ + repeated uint64 kernelSize = 10; + + /* + * Must be length 2 in the order ``[H, W]``. + * If not set, default value ``[1, 1]`` is used. + */ + repeated uint64 stride = 20; + + message ValidCompletePadding { + + /* + * Must be length 2 in order ``[H, W]``. + * If not set, value ``[0, 0]`` is used. + */ + repeated uint64 paddingAmounts = 10; + + } + + oneof PoolingPaddingType { + ValidPadding valid = 30; + SamePadding same = 31; + ValidCompletePadding includeLastPixel = 32; + } + + /* + * If true, padded values are excluded from the count (denominator) + * when computing average pooling. + */ + bool avgPoolExcludePadding = 50; + + /* + * If true, global pooling is performed. + * Kernel size is inferred from the input data spatial dimensions. + */ + bool globalPooling = 60; + +} + +/* + * A layer to pool three spatial dimensions + * + * Input + * A blob with rank equal to 5, representing [Batch, channels, depth, height, width]. + * + * Output + * Rank is same as the input: A blob with rank equal to 5, representing [Batch, channels, depth, height, width]. + * + * Requires 1 input and produces 1 output. + * + * For example, given an input of shape (1,1,2,3,3): + * +----+----+----+ + * / | 10 | 11 | 12 | + * / +----+----+----+ + * / | 13 | 14 | 15 | + * / +----+----+----+ + * / | 16 | 17 | 18 | + * / +----+----+----+ + * +----+----+----+ / + * | 1 | 2 | 3 | / + * +----+----+----+ / + * | 4 | 5 | 6 | / + * +----+----+----+ / + * | 7 | 8 | 9 | / + * +----+----+----+ + * + * And applying MAX pooling using: + * Kernel: 2x2x2 + * Stride: 1x1x1 + * Valid Padding + * We expect to get an output with shape: (1,1,1,2,2) and value: + * +----+----+ + * | 14 | 15 | + * +----+----+ + * | 17 | 18 | + * +----+----+ + */ +message Pooling3DLayerParams { + + enum PoolingType3D { + MAX = 0; + AVERAGE = 1; + } + + // Whether to use Max or Average + PoolingType3D type = 1; + + // Depth of the pooling region. + int32 kernelDepth = 2; + + // Height of the pooling region. + int32 kernelHeight = 3; + + // Width of the pooling region. + int32 kernelWidth = 4; + + // Stride along the depth direction + int32 strideDepth = 5; + + // Stride along the height direction + int32 strideHeight = 6; + + // Stride along the width direction + int32 strideWidth = 7; + + /* + * The type of padding. + * All padding types pad the input shape with zeros. + * CUSTOM padding will add the custom padding values specified below to their respective + * dimensions, e.g., `customPaddingFront` number of zeros will be added to one side of the + * input's depth dimension and `customPaddingBack` number of zeros will be added to the other + * side of the input's depth dimension. + * VALID padding adds no padding to any dimension. In this case, the last pool along + * each dimension will be dropped if the input dimension and the kernel size, and stride do not match. + * SAME padding adds enough padding to each dimension such that the output + * has the same spatial dimensions as the input. Padding is added evenly to both + * sides of each dimension unless the total padding to add is odd, in which case the extra padding + * is added to the back/bottom/right side of the respective dimension. For example, if the the + * total horizontal padding is 3, then there will be 1 padding on the left, and 2 padding on the right. + */ + enum Pooling3DPaddingType { + CUSTOM = 0; + VALID = 1; + SAME = 2; + } + Pooling3DPaddingType paddingType = 15; + + // Padding before the input in the depth direction. + int32 customPaddingFront = 8; + + // Padding after the input in the depth direction. + int32 customPaddingBack = 9; + + // Padding before the input in the height direction. + int32 customPaddingTop = 10; + + // Padding after the input in the height direction. + int32 customPaddingBottom = 11; + + // Padding before the input in the width direction. + int32 customPaddingLeft = 12; + + // Padding after the input in the width direction. + int32 customPaddingRight = 13; + + // If true, exclude zeros from padding in Average pooling. Meaningless in Max Pooling. + bool countExcludePadding = 14; +} + +/* + * A layer to pool three spatial dimensions down to one value. + * This behaves like a special case of Pooling3DLayerParams in which + * the Kernel is the size of the input and there is no padding. + * + * Input + * A blob with rank equal to 5, representing [Batch, channels, depth, height, width]. + * + * Output + * Rank is same as the input: A blob with rank equal to 5, representing [Batch, channels, depth, height, width]. + * Depth, height, and width of the output will always be 1. + * + * Requires 1 input and produces 1 output. + * + * For example, given an input of shape (1,1,2,3,3): + * +----+----+----+ + * / | 10 | 11 | 12 | + * / +----+----+----+ + * / | 13 | 14 | 15 | + * / +----+----+----+ + * / | 16 | 17 | 18 | + * / +----+----+----+ + * +----+----+----+ / + * | 1 | 2 | 3 | / + * +----+----+----+ / + * | 4 | 5 | 6 | / + * +----+----+----+ / + * | 7 | 8 | 9 | / + * +----+----+----+ + * + * And applying MAX global 3d pooling, we expect to get an output with shape: (1,1,1,1,1) and value: + * +----+ + * | 18 | + * +----+ + */ +message GlobalPooling3DLayerParams { + + enum GlobalPoolingType3D { + MAX = 0; + AVERAGE = 1; + } + + // Whether to use Max or Average + GlobalPoolingType3D type = 1; +} + +/* + * A layer that performs padding along spatial dimensions. + * + * .. code:: + * + * y = PaddingLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank at least 2. + * e.g.: blob with shape ``[H_in, W_in]``. + * For ranks greater than 2, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch + * i.e. Padding is applied on last two dimensions. + * + * Output + * Same rank as the input. + * e.g.: blob with shape ``[H_out, W_out]``. + * + * Output dimensions are calculated as follows: + * + * .. code:: + * + * H_out = H_in + topPaddingAmount + bottomPaddingAmount + * W_out = W_in + leftPaddingAmount + rightPaddingAmount + * + * topPaddingAmount == Height startEdgeSize == borderAmounts[0].startEdgeSize + * bottomPaddingAmount == Height endEdgeSize == borderAmounts[0].endEdgeSize + * leftPaddingAmount == Width startEdgeSize == borderAmounts[1].startEdgeSize + * rightPaddingAmount == Width endEdgeSize == borderAmounts[1].endEdgeSize + * + * There are three types of padding: + * + * - ``PaddingConstant``, which fills a constant value at the border. + * - ``PaddingReflection``, which reflects the values at the border. + * - ``PaddingReplication``, which replicates the values at the border. + * + * Given the following input: + * + * .. code:: + * + * [1, 3, 4] : 1 2 3 4 + * 5 6 7 8 + * 9 10 11 12 + * + * Here is the output of applying the padding + * ``(top=2, left=2, bottom=0, right=0)`` + * with each of the supported types: + * + * - ``PaddingConstant`` (``value = 0``): + * .. code:: + * + * [1, 5, 6] : 0 0 0 0 0 0 + * 0 0 0 0 0 0 + * 0 0 1 2 3 4 + * 0 0 5 6 7 8 + * 0 0 9 10 11 12 + * + * - ``PaddingReflection``: + * .. code:: + * + * [1, 5, 6] : 11 10 9 10 11 12 + * 7 6 5 6 7 8 + * 3 2 1 2 3 4 + * 7 6 5 6 7 8 + * 11 10 9 10 11 12 + * + * - ``PaddingReplication``: + * .. code:: + * + * [1, 5, 6] : 1 1 1 2 3 4 + * 1 1 1 2 3 4 + * 1 1 1 2 3 4 + * 5 5 5 6 7 8 + * 9 9 9 10 11 12 + */ +message PaddingLayerParams { + + /* + * Fill a constant value in the padded region. + */ + message PaddingConstant { + float value = 1; + } + + /* + * Reflect the values at the border for padding. + */ + message PaddingReflection { + } + + /* + * Replicate the values at the border for padding. + */ + message PaddingReplication { + } + + oneof PaddingType { + PaddingConstant constant = 1; + PaddingReflection reflection = 2; + PaddingReplication replication = 3; + } + + BorderAmounts paddingAmounts = 10; // Amounts to be padded to the input. + +} + +/* + * A layer that concatenates along the axis = -3 or -5. + * For general concatenation along any axis, see ConcatNDLayer. + * + * .. code:: + * + * y = ConcatLayer(x1,x2,....) + * + * Requires more than 1 input and produces 1 output. + * + * Input + * All input blobs must have same rank. + * If "sequenceConcat" = False, rank must be greater than equal to 3. In this case concatenation is along axis = -3 + * If "sequenceConcat" = True, rank must be greater than equal to 5. In this case concatenation is along axis = -5 + * + * Output + * Same rank as the input. + * + */ +message ConcatLayerParams { + + /* + * If true, concatenate along the axis = -5 instead of axis = -3. + */ + bool sequenceConcat = 100; + +} + +/* + * A layer that performs local response normalization (LRN). + * + * .. code:: + * + * y = LRNLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank greater than equal to 3. + * Example: Rank 4 blob represents [Batch, channels, height, width] + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * Output + * A blob with the same shape as the input. + * + * This layer is described by the following formula: + * + * .. math:: + * x_i \leftarrow \dfrac{x_i}{\left ( k + \dfrac{\alpha}{\text{localSize}} \sum_j x_j^2 \right )^\beta} + * + * where the summation is done over a ``(localSize, 1, 1)`` neighborhood --- + * that is, over a window "across" channels in 1x1 spatial neighborhoods. + */ +message LRNLayerParams { + + float alpha = 1; + float beta = 2; + uint64 localSize = 3; // Number of channels in the normalization window. + float k = 4; // Defaults to 1 if not set or 0. Must be strictly positive. + +} + +/* + * Softmax Normalization Layer + * + * A layer that performs softmax normalization. + * Normalization is applied along axis = -3 or N-3 (where N is the rank of the input) + * For softmax layer that can operate on any axis, see SoftmaxNDLayer. + * + * + * .. code:: + * + * y = SoftmaxLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * Must be a blob with rank >= 3. + * Output + * A blob with the same shape as the input. + * + * This layer is described by the following formula: + * + * .. math:: + * x_i \leftarrow \dfrac{e^{x_i}}{\sum_i{e^{x_i}}} + */ +message SoftmaxLayerParams { + +} + +/* + * A layer that uniformly splits across axis = -3 to produce a specified number of outputs. + * For general split operation along any axis, see SplitNDLayer. + * + * .. code:: + * + * (y1,y2,...yN) = SplitLayer(x), where N = nOutputs + * + * Requires 1 input and produces multiple outputs. + * + * Input + * A blob with rank at least 3. + * e.g.: blob with shape ``[C, H, W]`` + * Output + * ``nOutputs`` blobs each with same rank as the input. + * e.g.: For input that is of shape ``[C, H, W]``, output shapes will be ``[C/nOutputs, H, W]`` + */ +message SplitLayerParams { + + uint64 nOutputs = 1; // The number of outputs. + +} + +/* + * A layer that performs elementwise addition. + * This layer has limited broadcasting support. For general broadcasting see AddBroadcastableLayer. + * + * .. code:: + * + * y = AddLayer(x1,x2,...) + * + * Requires 1 or more than 1 input and produces 1 output. + * + * Input + * In general, there are no rank constraints. + * However, only certain set of shapes are broadcastable. For example: + * [B, 1, 1, 1], [B, C, 1, 1], [B, 1, H, W], [B, C, H, W] + * Output + * A blob with shape equal to the input blob. + * + * If only one input is provided, scalar addition is performed: + * + * .. math:: + * y = x + \alpha + * + */ +message AddLayerParams { + + /* + * Scalar to be added to the input. + * Only used if there is a single input. + */ + float alpha = 1; + +} + +/* + * A layer that performs elementwise multiplication. + * This layer has limited broadcasting support. For general broadcasting see MultiplyBroadcastableLayer. + * + * .. code:: + * + * y = MultiplyLayer(x1,x2,...) + * + * Requires 1 or more than 1 input and produces 1 output. + * + * Input + * In general, there are no rank constraints. + * However, only certain set of shapes are broadcastable. For example: + * [B, 1, 1, 1], [B, C, 1, 1], [B, 1, H, W], [B, C, H, W] + * Output + * A blob with shape equal to the first input blob. + * + * If only one input is provided, scalar multiplication is performed: + * + * .. math:: + * y = \alpha x + * + */ +message MultiplyLayerParams { + + /* + * Scalar to be multiplied with the input. + * Only used if there is a single input. + */ + float alpha = 1; + +} + +/* + * A layer that applies a unary function. + * + * .. code:: + * + * y = UnaryFunctionLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with no rank constraints. + * Output + * A blob with the same shape as the input. + * + * The input is first modified by shifting and scaling: + * + * .. math:: + * x \leftarrow \text{scale} \cdot x + \text{shift} + */ +message UnaryFunctionLayerParams { + + /* + * A unary operator. + * + * The following functions are supported: + * + * ``SQRT`` + * .. math:: f(x) = \sqrt{x} + * + * ``RSQRT`` + * .. math:: f(x) = \dfrac{1}{\sqrt{x + \epsilon}} + * + * ``INVERSE`` + * .. math:: f(x) = \dfrac{1}{x + \epsilon} + * + * ``POWER`` + * .. math:: f(x) = x^\alpha + * + * ``EXP`` + * .. math:: f(x) = e^x + * + * ``LOG`` + * .. math:: f(x) = \log x + * + * ``ABS`` + * .. math:: f(x) = |x| + * + * ``THRESHOLD`` + * .. math:: f(x) = \text{max}(\alpha, x) + */ + enum Operation { + SQRT = 0; + RSQRT = 1; + INVERSE = 2; + POWER = 3; + EXP = 4; + LOG = 5; + ABS = 6; + THRESHOLD = 7; + } + Operation type = 1; // The type of unary function. + + /* + * A constant used in ``POWER`` and ``THRESHOLD`` functions. + */ + float alpha = 2; + + /* + * A small constant to avoid division by 0 while normalizing variance. + * Defaults to ``1e-6`` if not set or set to ``0``. + */ + float epsilon = 3; + + /* + * Input is shifted by this amount + * before the unary function is applied. + * Defaults to ``0.0`` if not set. + */ + float shift = 4; + + /* + * Input is scaled by this amount + * before the unary function is applied. + * Defaults to ``1.0`` if not set or set to ``0``. + */ + float scale = 5; + +} + +/* + * A layer that scales up spatial dimensions. + * It supports two modes: nearest neighbour (default) and bilinear. + * + * .. code:: + * + * y = UpsampleLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank at least 3. + * e.g.: blob with shape ``[C, H, W]``. + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * + * Output + * Same rank as the input. + * e.g.: blob with shape ``[C, scalingFactor[0] * H, scalingFactor[1] * W]`` + */ +message UpsampleLayerParams { + + /* + * Scaling Factor. Mutually exclusive with fractionalScalingFactor. + * Must be length 2 in order ``[H, W]``. + * If not set, default value ``[1, 1]`` is used. + */ + repeated uint64 scalingFactor = 1; + + /* + * Fractional scaling factor. Mutually exclusive with scalingFactor. + * Must be length 2 in order ``[H, W]``. + * If not set, default value ``[1.0, 1.0]`` is used. + */ + repeated float fractionalScalingFactor = 7; + + /* + * Overall mode for interpolating new elements when upsampling. + * NN - Nearest Neighbors - simply pick the nearest true value for interpolated values. + * BILINEAR - Use bilinear interpolation. See LinearUpsamplingMode for behavior. + */ + enum InterpolationMode { + + NN = 0; // Nearest Neighbour + BILINEAR = 1; // Bilinear + + } + + InterpolationMode mode = 5; + + /* + * LinearUpsampleMode specifies the behavior for linear upsampling. Only valid when Interpolation Mode is BILINEAR. + * If input grid is [0, Xin-1] (corresponding to an input size of Xin), and if the output size is Xout, + * then the grid points are sampled in the following manner: + * DEFAULT: + * spacing = (Xin-Xin/Xout) / (Xout-1) + * grid_point[i] = min(Xin-1, max(0, i * spacing)), for i = 0,1,2,….,Xout-1 + * ALIGN_CORNERS_TRUE: + * spacing = (Xin-1) / (Xout-1) + * grid_point[i] = min(Xin-1, max(0, i * spacing)), for i = 0,1,2,….,Xout-1 + * ALIGN_CORNERS_FALSE: + * spacing = Xin / Xout + * grid_point[i] = min(Xin-1, max(0, i * spacing + 0.5 * spacing - 0.5)), for i = 0,1,2,….,Xout-1 + */ + enum LinearUpsampleMode { + + DEFAULT = 0; + ALIGN_CORNERS_TRUE = 1; + ALIGN_CORNERS_FALSE = 2; + + } + + LinearUpsampleMode linearUpsampleMode = 6; + +} + +/* +* A layer that resizes the input to a pre-specified spatial size using bilinear interpolation. +* +* .. code:: +* +* y = ResizeBilinearLayer(x) +* +* Requires 1 input and produces 1 output. +* +* Input +* A blob with rank at least 3. +* e.g.: blob with shape ``[C, H_in, W_in]``. +* For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. +* +* Output +* Same rank as the input. +* e.g.: blob with shape ``[C, H_out, W_out]``. +* +*/ +message ResizeBilinearLayerParams { + + /* + * Target Spatial Size. + * Must be length 2 in order ``[Height, Width]``, i.e. ``[H_out, W_out]``. + * If not set, default value ``[1, 1]`` is used. + */ + repeated uint64 targetSize = 1; + + /* + * Mode used to compute the grid on which the spatial output values are evaluated. + * Same mode is applied to both the height and width axes. + */ + SamplingMode mode = 2; + +} + +/* +* A layer that extracts cropped spatial patches or RoIs (regions of interest) from the input and resizes them to a pre-specified size using +* bilinear interpolation. +* Note that RoI Align layer can be implemented with this layer followed by a pooling layer. +* +* .. code:: +* +* y = CropResizeLayer(x) +* +* Requires 2 inputs and produces 1 output. +* +* Input +* There are two inputs. +* First input represents an image feature map. +* Second input represents the bounding box coordinates for N patches or RoIs (region of interest). +* +* First input is rank 5: [1, Batch, C, H_in, W_in]. +* Second input is rank 5. Its shape can be either [N, 1, 4, 1, 1] or [N, 1, 5, 1, 1]. +* +* N: number of patches/RoIs to be extracted +* +* If RoI shape = ``[N, 1, 4, 1, 1]`` +* The axis=-3 corresponds to the four coordinates specifying the bounding box. +* All the N RoIs are extracted from all the batches of the input. +* +* If RoI shape = ``[N, 1, 5, 1, 1]`` +* The first element of the axis=-3 specifies the input batch id from which to extract the RoI and +* must be in the interval ``[0, Batch - 1]``. That is, n-th RoI is extracted from the RoI[n,0,0,0,0]-th +* input batch id. The last four elements of the axis=-3 specify the bounding box coordinates. +* +* Output +* A blob with rank 5. +* - Shape is [N, Batch, C, H_out, W_out] if input RoI shape is [N, 1, 4, 1, 1] +* - Shape is [N, 1, C, H_out, W_out] if input RoI shape is [N, 1, 5, 1, 1] +* +*/ +message CropResizeLayerParams { + + /* + * Target Spatial Size. + * Must be length 2 in order ``[Height, Width]``, i.e. ``[H_out, W_out]``. + * If not set, default value ``[1, 1]`` is used. + */ + repeated uint64 targetSize = 1; + + /* + * If true the bounding box coordinates must be in the interval [0, 1]. + * They are scaled by (H_in - 1), (W_in - 1), i.e. based on the input spatial dimensions. + * If false the bounding box coordinates must be in the interval + * [0, H_in -1] and [0, W_in - 1], respectively for height and width dimensions. + */ + bool normalizedCoordinates = 2; + + /* + * Mode used to compute the grid on which the spatial output values are evaluated. + * Same mode is applied to both the height and width axes. + */ + SamplingMode mode = 3; + + /* + * Representation used to express the bounding box coordinates. + * It determines how the values of the second input are interpreted. + */ + BoxCoordinatesMode boxIndicesMode = 4; + + /* + * Additional spatial scale that multiplies the bounding box coordinates. + * Generally used while implementing the RoI Align layer, + * which uses unnormalized RoI coordinates along with a spatial scale less than or equal to 1. + */ + float spatialScale = 5; + +} + +/* + * A layer that performs elementwise addition of a bias, + * which is broadcasted to match the input shape. + * + * .. code:: + * + * y = BiasLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank at least 3. + * e.g.: blob with shape ``[C, H, W]``. + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * Output + * A blob with the same shape as the input. + */ +message BiasLayerParams { + + /* + * The shape of the bias. + * Must be one of the following: + * ``[1]``, ``[C]``, ``[1, H, W]`` or ``[C, H, W]``. + */ + repeated uint64 shape = 1; + + /* + * The bias values. + * The size must be equal to the product of the ``shape`` dimensions. + */ + WeightParams bias = 2; + +} + +/* + * A layer that performs elmentwise multiplication by a scale factor + * and optionally adds a bias; + * both the scale and bias are broadcasted to match the input shape. + * + * .. code:: + * + * y = ScaleLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank at least 3. + * e.g.: blob with shape ``[C, H, W]``. + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * Output + * A blob with the same shape as the input. + */ +message ScaleLayerParams { + + /* + * The shape of the scale. + * Must be one of the following: + * ``[1]``, ``[C]``, ``[1, H, W]`` or ``[C, H, W]``. + */ + repeated uint64 shapeScale = 1; + + /* + * The scale values. + * The size must be equal to the product of the ``shape`` dimensions. + */ + WeightParams scale = 2; // Scale values. Size must be equal to the product of dimensions specified in shapeScale. + + bool hasBias = 3; // If true, a bias is added after scaling. + + /* + * The shape of the bias. + * Must be one of the following: + * ``[1]``, ``[C]``, ``[1, H, W]`` or ``[C, H, W]``. + */ + repeated uint64 shapeBias = 4; + + /* + * The bias values. + * The size must be equal to the product of the ``shape`` dimensions. + */ + WeightParams bias = 5; + +} + +/* + * A layer that loads data as a parameter and provides it as an output. + * The output is rank 5. For general rank, see LoadConstantNDLayer. + * + * .. code:: + * + * y = LoadConstantLayer() + * + * Requires no input and produces 1 output. + * + * Output: + * A blob with rank 5 and shape ``[1, 1, C, H, W]`` + */ +message LoadConstantLayerParams { + + /* + * The shape of the constant to be loaded, + * which must be``[C, H, W]``, that is length 3. + */ + repeated uint64 shape = 1; + + /* + * The data values, + * of size ``C * H * W``. + */ + WeightParams data = 2; + +} + +/* + * A layer that performs L2 normalization, i.e. divides by the + * the square root of the sum of squares of all elements of input. + * + * .. code:: + * + * y = L2NormalizeLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank greater than equal to 3. + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * Output + * A blob with the same shape as the input. + * + * This layer is described by the following formula: + * + * .. math:: + * x_i \leftarrow \dfrac{x_i}{\sqrt{\sum{x_i^2} + \epsilon}} + */ +message L2NormalizeLayerParams { + + /* + * A small constant to avoid division by 0 while normalizing variance. + * Defaults to ``1e-6`` if not set or set to ``0``. + */ + float epsilon = 1; + +} + +// Data Reorganization Layers +// -------------------------- + +/* + * A layer that flattens the input. + * + * .. code:: + * + * y = FlattenLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank greater than equal to 3. + * e.g.: Rank 4 blob represents [Batch, C, H, W] + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * Output + * Same rank as the input, such that last two dimensions are both 1. + * e.g.: For rank 4 input, output shape is ``[Batch, C * H * W, 1, 1]`` + * + * There are two X orders: ``CHANNEL_FIRST`` and ``CHANNEL_LAST``. + * ``CHANNEL_FIRST`` does not require data to be rearranged, + * because row major ordering is used by internal storage. + * ``CHANNEL_LAST`` requires data to be rearranged. + */ +message FlattenLayerParams { + + enum FlattenOrder { + + CHANNEL_FIRST = 0; + CHANNEL_LAST = 1; + + } + FlattenOrder mode = 1; + +} + +/* + * A layer that recasts the input into a new shape. + * + * .. code:: + * + * y = ReshapeLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank 5. + * e.g.: ``[1, 1, C, H, W]`` or ``[Seq, 1, C, H, W]``. + * Output + * A blob with rank 5. + * e.g.: ``[1, 1, C_out, H_out, W_out]`` or ``[Seq_out, 1, C_out, H_out, W_out]``. + * + * There are two reshape orders: ``CHANNEL_FIRST`` and ``CHANNEL_LAST``. + * ``CHANNEL_FIRST`` is equivalent to + * flattening the input to ``[Seq, 1, C * H * W, 1, 1]`` in channel first order + * and then reshaping it to the target shape; + * no data rearrangement is required. + * ``CHANNEL_LAST`` is equivalent to + * flattening the input to ``[Seq, 1, H * W * C, 1, 1]`` in channel last order, + * reshaping it to ``[Seq_out, 1, H_out, W_out, C_out]`` (it is now in "H_out-major"" order), + * and then permuting it to ``[C_out, H_out, W_out]``; + * both the flattening and permuting requires the data to be rearranged. + */ +message ReshapeLayerParams { + + /* + * The shape of the output. + * Must be of length 3 or 4. + * If set to 3, ``targetShape`` is interpreted as + * ``[1, 1, C_out, H_out, W_out]``, and sequence length of the input is preserved. + * If set to 4, ``targetShape`` is interpreted as + * ``[Seq_out, 1, C_out, H_out, W_out]``, + * where ``Seq_out`` is the new sequence length. + */ + repeated int64 targetShape = 1; + + enum ReshapeOrder { + + CHANNEL_FIRST = 0; + CHANNEL_LAST = 1; + + } + ReshapeOrder mode = 2; + +} + +/* + * A layer that rearranges the dimensions and data of an input. + * For generic transpose/permute operation see TransposeLayer. + * + * .. code:: + * + * y = PermuteLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * Must be a rank 5 blob. + * e.g.: shape ``[Seq, B, C, H, W]``. + * Output + * Rank 5 blob. Transposed version of the input, such that dimensions at axis=1 or axis=-4 is unchanged. + * + * + * Examples: + * + * Assume input shape is [Seq, B, C, H, W] + * + * - If ``axis`` is set to ``[0, 3, 1, 2]``, + * then the output has shape ``[Seq, B, W, C, H]`` + * + * - If ``axis`` is set to ``[3, 1, 2, 0]``, + * then the output has shape ``[W, B, C, H, Seq]`` + * + * - If ``axis`` is set to ``[0, 3, 2, 1]``, + * then the output has shape ``[Seq, B, W, H, C]`` + * + * - If ``axis`` is not set, or is set to ``[0, 1, 2, 3]``, + * the output is the same as the input. + */ +message PermuteLayerParams { + + /* + * The order in which to permute the dimensions. + * Must have length 4 and a permutation of ``[0, 1, 2, 3]``. + */ + repeated uint64 axis = 1; + +} + +/* + * A layer that reorganizes data in the input in specific ways. + * + * .. code:: + * + * y = ReorganizeDataLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank at least 3. + * e.g.: blob with shape ``[C, H, W]``. + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * Output + * Same rank as the input. + * e.g.: blob with shape ``[C_out, H_out, W_out]``. + * + * mode == SPACE_TO_DEPTH + * ``[C_out, H_out, W_out]`` : ``[C * blockSize * blockSize, H/blockSize, W/blockSize]``. + * blockSize must divide H and W. + * Data is moved from the spatial dimensions to the channel dimension. Input is spatially divided into + * non-overlapping blocks of size blockSize X blockSize and data from each block is moved into the + * channel dimension. + * + * mode == DEPTH_TO_SPACE + * ``[C_out, H_out, W_out]`` : ``[C/(blockSize * blockSize), H * blockSize, W * blockSize]``. + * Square of blockSize must divide C. + * Reverse of SPACE_TO_DEPTH. Data is moved from the channel dimension to the spatial dimensions. + * + * mode == PIXEL_SHUFFLE + * ``[C_out, H_out, W_out]`` : ``[C/(blockSize * blockSize), H * blockSize, W * blockSize]``. + * Square of blockSize must divide C. + * Similar to DEPTH_TO_SPACE, but using the pixel-shuffle semantics for channel order in the output space. + * In both modes, elements along the channel dimension are collapsed into + * blocks in the spatial dimensions. The difference is in the arrangement of + * the input-channels' data in the output space. See below example for more + * detail. + * (Only available in Core ML Specification >= 5 (iOS >= 14, macOS >= 11.0) + * + * + * Examples: + * + * Assume input is the following [C = 8, H = 1, W = 2] tensor: + * + * .. code:: + * + * [[[1 2]] [[3 4]] [[5 6]] [[7 8]] [[9 10]] [[11 12]] [[13 14]] [[15 16]]] + * + * If block_size == 2 and mode == DEPTH_TO_SPACE, output will be the following + * [C = 2, H = 2, W = 4] tensor: + * + * .. code:: + * + * [[[ 1 5 2 6] + * [ 9 13 10 14]] + * + * [[ 3 7 4 8] + * [11 15 12 16]]] + * + * For mode == SPACE_TO_DEPTH, the behavior is the same as mode == + * DEPTH_TO_SPACE, but with the input and output swapped. + * + * If block_size == 2 and mode == PIXEL_SHUFFLE, output will be the following + * [C = 2, H = 2, W = 4] tensor: + * + * .. code:: + * + * [[[ 1 3 2 4] + * [ 5 7 6 8]] + * + * [[ 9 11 10 12] + * [13 15 14 16]]] + * + */ +message ReorganizeDataLayerParams { + + enum ReorganizationType { + + SPACE_TO_DEPTH = 0; + DEPTH_TO_SPACE = 1; + PIXEL_SHUFFLE = 2; + + } + ReorganizationType mode = 1; + uint64 blockSize = 2; // must be greater than 1 + +} + +/* + * A layer that slices the input data along axis = -1 or -2 or -3. + * For general slice along any axis, please see SliceStaticLayer/SliceDynamicLayer. + * + * .. code:: + * + * y = SliceLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob that can, in general, have any rank. However, depending on the value of "axis" , + * there may be additional rank constraints. + * Output + * A blob with the same rank as the input. + * + * Sliced section is taken from the interval ``[startIndex, endIndex)``, i.e. + * startIndex is inclusive while endIndex is exclusive. + * stride must be positive and represents the step size for slicing. + * Negative indexing is supported for startIndex and endIndex. + * -1 denotes N-1, -2 denotes N-2 and so on, where N is the length of the dimension to be sliced. + * + */ +message SliceLayerParams { + + int64 startIndex = 1; // start of the sliced section. Inclusive. + int64 endIndex = 2; // end of sliced section. Exclusive. + uint64 stride = 3; // The step size. Must be positive. + + enum SliceAxis { + + CHANNEL_AXIS = 0; + HEIGHT_AXIS = 1; + WIDTH_AXIS = 2; + + } + // The following mapping is used for interpreting this parameter: + // CHANNEL_AXIS => axis = -3, input must have rank at least 3. + // HEIGHT_AXIS => axis = -2, input must have rank at least 2. + // WIDTH_AXIS => axis = -1 + SliceAxis axis = 4; + +} + +/* + * A layer that reduces the input using a specified operation. + * + * .. code:: + * + * y = ReduceLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob that can, in general, have any rank. However, depending on the value of "axis" , + * there may be additional rank constraints. + * Output + * A blob with the same rank as the input, which has 1s on the dimensions specified in the parameter "axis" + * + * Values supported for axis are [-1], [-2], [-3], [-2,-1], [-3,-2,-1] + * and the equivalent positive values (depending on the rank of the input) + * For mode == 'ArgMax', axis must be [-1] or [-2] or [-3]. + */ +message ReduceLayerParams { + + /* + * The following reduction operations are supported + * and are applied on the specified axis of the input array: + * + * ``SUM`` + * Sum of all elements + * + * .. math:: \sum{x_i} + * + * ``AVG`` + * Sum of all elements divided by the number of elements + * + * .. math:: \dfrac{\sum^n{x_i}}{n} + * + * ``PROD`` + * Product of all elements + * + * .. math:: \prod{x_i} + * + * ``LOGSUM`` + * Sum of the natural logarithm of all elements + * + * .. math:: \sum{\ln{(x_i + \epsilon)}} + * + * ``SUMSQUARE`` + * Sum of squares of all elements + * + * .. math:: \sum{x^2} + * + * ``L1`` + * L1 normalization of all elements + * + * .. math:: ||x||_1 = \sum{|x_i|} + * + * ``L2`` + * L2 normalization of all elements + * + * .. math:: ||x||_2 = \sqrt{\sum{x_i^2}} + * + * ``MAX`` + * Maximum of all elements + * + * .. math:: \text{max}(x_i) + * + * ``MIN`` + * Minimum of all elements + * + * .. math:: \text{min}(x_i) + * + * ``ARGMAX`` + * Argument of the maximum of all elements + * + * .. math:: \text{argmax}(x_i) + * + */ + enum ReduceOperation { + + SUM = 0; + AVG = 1; + PROD = 2; + LOGSUM = 3; + SUMSQUARE = 4; + L1 = 5; + L2 = 6; + MAX = 7; + MIN = 8; + ARGMAX = 9; // only supported with axis = C, H or W. + + } + ReduceOperation mode = 1; // Specifies function used to reduce. + + /* + * Used if mode is ``LOGSUM``. + * Defaults to ``1e-6`` if not set or is set to ``0``. + */ + float epsilon = 2; + + enum ReduceAxis { + + CHW = 0; + HW = 1; + C = 2; + H = 3; + W = 4; + + } + + // The following mapping is used for interpreting this parameter: + // CHW = axis [-3, -2, -1], input must have rank at least 3. + // HW = axis [-2, -1], input must have rank at least 2. + // C = axis [-3] + // H = axis [-2] + // W = axis [-1] + ReduceAxis axis = 3; + +} + +/* + * A layer that crops the spatial dimensions of an input. + * If two inputs are provided, the shape of the second input is used as the reference shape. + * + * .. code:: + * + * y = CropLayer(x1) or y = CropLayer(x1,x2) + * + * Requires 1 or 2 inputs and produces 1 output. + * + * Input + * 1 or 2 tensors, each with rank at least 3, both inputs must have equal rank. + * Example: + * - 1 input case: A blob with shape ``[C, H_in, W_in]``. + * - 2 input case: 1st blob with shape ``[C, H_in, W_in]``, 2nd blob with shape ``[C, H_out, W_out]``. + * + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * + * Output + * Same rank as the inputs. + * e.g.: A blob with shape ``[C, H_out, W_out]``. + * + * If one input is used, output is computed as follows: + * + * .. code:: + * + * y = x1[:, topCropAmount:H_in - bottomCropAmount, leftCropAmount:W_in - rightCropAmount] + * + * topCropAmount == Height startEdgeSize == borderAmounts[0].startEdgeSize + * bottomCropAmount == Height endEdgeSize == borderAmounts[0].endEdgeSize + * leftCropAmount == Width startEdgeSize == borderAmounts[1].startEdgeSize + * rightCropAmount == Width endEdgeSize == borderAmounts[1].endEdgeSize + * + * H_out = H_in - topCropAmount - bottomCropAmount + * W_out = W_in - leftCropAmount - rightCropAmount + * + * If two inputs are used, output is computed as follows: + * + * .. code:: + * + * y = x1[:, offset[0]:offset[0] + H_out, offset[1]:offset[1] + W_out] + */ +message CropLayerParams { + + /* + * The amounts to be cropped from the input. + * Used only if a single input is provided. + */ + BorderAmounts cropAmounts = 1; + + /* + * The offset amounts. + * Used only if two inputs are provided. + * Must be of length 2, in order ``[H, W]``. + */ + repeated uint64 offset = 5; + +} + +/* + * A layer that computes the elementwise average of the inputs. + * This layer has limited broadcasting support. For general broadcasting see AddBroadcastableLayer. + * + * .. code:: + * + * y = AverageLayer(x1,x2,...) + * + * Requires multiple inputs and produces 1 output. + * + * Input + * In general, there are no rank constraints. + * However, only certain set of shapes are broadcastable. For example: + * [B, 1, 1, 1], [B, C, 1, 1], [B, 1, H, W], [B, C, H, W] + * Output + * A blob with the same shape as each input. + */ +message AverageLayerParams { + +} + +/* + * A layer that computes the elementwise maximum over the inputs. + * + * .. code:: + * + * y = MaxLayer(x1,x2,...) + * + * Requires multiple inputs and produces 1 output. + * + * Input + * In general, there are no rank constraints. + * However, only certain set of shapes are broadcastable. For example: + * [B, C, 1, 1], [B, C, H, W] + * Output + * A blob with the same shape as each input. + */ +message MaxLayerParams { + +} + +/* + * A layer that computes the elementwise minimum over the inputs. + * + * .. code:: + * + * y = MinLayer(x1,x2,...) + * + * Requires multiple inputs and produces 1 output. + * + * Input + * In general, there are no rank constraints. + * However, only certain set of shapes are broadcastable. For example: + * [B, C, 1, 1], [B, C, H, W] + * Output + * A blob with the same shape as each input. + */ +message MinLayerParams { + +} + +/* + * A layer that computes the dot product of two vectors. + * + * .. code:: + * + * y = DotProductLayer(x1,x2) + * + * Requires 2 inputs and produces 1 output. + * + * Input + * Two blobs with rank at least 3, such that the last two dimensions must be 1. + * e.g.: blobs with shape ``[B, C, 1, 1]``. + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * + * Output + * Same rank as the input. + * e.g. for rank 4 inputs, output shape: [B, 1, 1, 1] + */ +message DotProductLayerParams { + + /* + * If true, inputs are normalized first, + * thereby computing the cosine similarity. + */ + bool cosineSimilarity = 1; + +} + +/* + * A layer that performs mean variance normalization, along axis = -3. + * + * .. code:: + * + * y = MeanVarianceNormalizeLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank greater than equal to 3. + * Example: Rank 4 blob represents [Batch, channels, height, width] + * For ranks greater than 3, the leading dimensions, starting from 0 to -4 (inclusive), are all treated as batch. + * + * Output + * A blob with the same shape as the input. + * + * If ``acrossChannels == true`` + * normalization is performed on flattened input, i.e. the input is reshaped to (Batch,C), where "Batch" contains + * all dimensions from 0 to -4 (inclusive), and C contains dimensions -1, -2, -3. + * + * If ``acrossChannels == false`` + * normalization is performed within a channel, + * across spatial dimensions (i.e. last two dimensions). + */ +message MeanVarianceNormalizeLayerParams { + + /* + * If true, mean and variance are computed across channels. + */ + bool acrossChannels = 1; + + /* + * If false, only mean is subtracted. + */ + bool normalizeVariance = 2; + + /* + * A small constant to avoid division by 0 while normalizing variance. + * Defaults to ``1e-6`` if not set or set to ``0``. + */ + float epsilon = 3; + +} + +/* + * A layer that repeats a sequence or the dimension sitting at axis = -5 + * + * .. code:: + * + * y = SequenceRepeatLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A blob with rank at least 5. + * e.g: shape ``[Seq, B, C, H, W]`` + * Output + * A blob with the same rank as the input. + * e.g.: for input shape ``[Seq, B, C, H, W]``, output shape is ``[nRepetitions * Seq, B, C, H, W]``. + */ +message SequenceRepeatLayerParams { + + /* + * Number of repetitions. + * Defaults to ``1`` if not set or set to ``0``. + */ + uint64 nRepetitions = 1; + +} + +// Recurrent Layers +// ---------------- + +/* + * The following activations are supported with recurrent layers: + * - Linear + * - Sigmoid + * - Tanh + * - ReLU + * - Scaled Hyperbolic Tangent: alpha * tanh(beta * x), currently only supported for alpha = 1.7159, beta = 2/3 + * - Hard Sigmoid: min(max(alpha * x + beta, 0), 1), currently only supported for alpha = 0.2, beta = 0.5 + */ + +/* + * A simple recurrent layer. + * + * .. code:: + * + * y_t = SimpleRecurrentLayer(x_t, y_{t-1}) + * + * Input + * A blob of rank 5, with shape `[Seq, Batch, inputVectorSize, 1, 1]``. + * This represents a sequence of vectors of size ``inputVectorSize``. + * Output + * Same rank as the input. + * Represents a vector of size ``outputVectorSize``. It is either the final output or a sequence of outputs at all time steps. + * + * - Output Shape: ``[1, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == false`` + * - Output Shape: ``[Seq, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == true`` + * + * This layer is described by the following equation: + * + * .. math:: + * \boldsymbol{y_t} = f(\mathrm{clip}(W \boldsymbol{x_t} + \ + * R \boldsymbol{y_{t-1}} + b)) + * + * - ``W`` is a 2-dimensional weight matrix + * (``[outputVectorSize, inputVectorSize]``, row-major) + * - ``R`` is a 2-dimensional recursion matrix + * (``[outputVectorSize, outputVectorSize]``, row-major) + * - ``b`` is a 1-dimensional bias vector (``[outputVectorSize]``) + * - ``f()`` is an activation + * - ``clip()`` is a function that constrains values between ``[-50.0, 50.0]`` + */ +message SimpleRecurrentLayerParams { + + uint64 inputVectorSize = 1; // The size of the input vectors. + uint64 outputVectorSize = 2; // The size of the output vectors. + + /* + * Activations supported are Linear, Sigmoid, Tanh, ReLU, Scaled Tanh (alpha = 1.71, beta = 2/3), Hard sigmoid (alpha = 0.2, beta = 0.5) + */ + ActivationParams activation = 10; // The activation function. + + /* + If false output is just the result after final state update. + If true, output is a sequence, containing outputs at all time steps. + */ + bool sequenceOutput = 15; + + bool hasBiasVector = 20; // If false, no bias is added. + + WeightParams weightMatrix = 30; // Weight matrix W. + WeightParams recursionMatrix = 31; // Recursion Weight matrix R. + WeightParams biasVector = 32; // Bias vector b. + + bool reverseInput = 100; + // If true, then the node processes the input sequence from right to left + +} + +/* + * Gated-Recurrent Unit (GRU) Layer + * + * .. code:: + * + * y_t = GRULayer(x_t, y_{t-1}) + * + * Input + * A blob of rank 5, with shape `[Seq, Batch, inputVectorSize, 1, 1]``. + * This represents a sequence of vectors of size ``inputVectorSize``. + * Output + * Same rank as the input. + * Represents a vector of size ``outputVectorSize``. It is either the final output or a sequence of outputs at all time steps. + * + * - Output Shape: ``[1, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == false`` + * - Output Shape: ``[Seq, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == true`` + * + * This layer is described by the following equations: + * + * Update Gate + * .. math:: + * \boldsymbol{z_t} = \ + * f(\mathrm{clip}(W_z \boldsymbol{x_t} + \ + * R_z \boldsymbol{y_{t-1}} + b_z) + * + * Reset Gate + * .. math:: + * \boldsymbol{r_t} = \ + * f(\mathrm{clip}(W_r \boldsymbol{x_t} + \ + * R_r \boldsymbol{y_{t-1}} + b_r)) + * + * Cell Memory State + * .. math:: + * \boldsymbol{c_t} = \ + * \boldsymbol{y_{t-1}} \odot \boldsymbol{r_t} + * + * Output Gate + * .. math:: + * \boldsymbol{o_t} = \ + * g(\mathrm{clip}(W_o \boldsymbol{x_t} + \ + * R_o \boldsymbol{c_t} + b_o)) + * + * Output + * .. math:: + * \boldsymbol{y_t} = \ + * (1 - \boldsymbol{z_t}) \odot \boldsymbol{o_t} + \ + * \boldsymbol{z_t} \odot \boldsymbol{y_{t-1}} + * + * - ``W_z``, ``W_r``, ``W_o`` are 2-dimensional input weight matrices + * (``[outputVectorSize, inputVectorSize]``, row-major) + * - ``R_z``, ``R_r``, ``R_o`` are 2-dimensional recursion matrices + * (``[outputVectorSize, outputVectorSize]``, row-major) + * - ``b_z``, ``b_r``, ``b_o`` are 1-dimensional bias vectors + * (``[outputVectorSize]``) + * - ``f()``, ``g()`` are activations + * - ``clip()`` is a function that constrains values between ``[-50.0, 50.0]`` + * - ``⊙`` denotes the elementwise product of matrices + */ +message GRULayerParams { + + uint64 inputVectorSize = 1; // Size of the input vectors. + uint64 outputVectorSize = 2; // Size of the output vectors. + + /* + * 2 element array representing activations [f(), g()] in that order. + * Typical values used = [sigmoid, tanh]. + * Activations supported are Linear, Sigmoid, Tanh, ReLU, Scaled Tanh (alpha = 1.71, beta = 2/3), Hard sigmoid (alpha = 0.2, beta = 0.5) + */ + repeated ActivationParams activations = 10; + + /* + * If false output is just the result after final state update. + * If true, output is a sequence, containing outputs at all time steps. + */ + bool sequenceOutput = 15; + + /* + * If false, no biases (``b_z``, ``b_r``, ``b_o``) are added. + */ + bool hasBiasVectors = 20; + + WeightParams updateGateWeightMatrix = 30; // Weight Matrix W_z. + WeightParams resetGateWeightMatrix = 31; // Weight Matrix W_r. + WeightParams outputGateWeightMatrix = 32; // Weight Matrix W_o. + + WeightParams updateGateRecursionMatrix = 50; // Recursion Weight Matrix R_z. + WeightParams resetGateRecursionMatrix = 51; // Recursion Weight Matrix R_r. + WeightParams outputGateRecursionMatrix = 52; // Recursion Weight Matrix R_o. + + WeightParams updateGateBiasVector = 70; // Bias vector b_z. + WeightParams resetGateBiasVector = 71; // Bias vector b_r. + WeightParams outputGateBiasVector = 72; // Bias vector b_o. + + // If true, then the node processes the input sequence from right to left + bool reverseInput = 100; + +} + +/* + * Long short-term memory (LSTM) parameters. + * + * This is described by the following equations: + * + * Input Gate + * .. math:: + * \boldsymbol{i_t} = \ + * f(\mathrm{clip}(W_i \boldsymbol{x_t} + \ + * R_i \boldsymbol{y_{t-1}} + \ + * p_i \odot c_{t-1} + b_i)) + * + * Forget Gate + * .. math:: + * \boldsymbol{f_t} = \ + * f(\mathrm{clip}(W_f \boldsymbol{x_t} + \ + * R_f \boldsymbol{y_{t-1}} + \ + * p_f \odot c_{t-1} + b_f)) + * + * Block Input + * .. math:: + * \boldsymbol{z_t} = \ + * g(\mathrm{clip}(W_z \boldsymbol{x_t} + \ + * R_z \boldsymbol{y_{t-1}} + b_z)) + * + * Cell Memory State + * .. math:: + * \boldsymbol{c_t} = \ + * \boldsymbol{c_{t-1}} \odot \boldsymbol{f_t} + \ + * \boldsymbol{i_t} \odot \boldsymbol{z_t} + * + * Output Gate + * .. math:: + * \boldsymbol{o_t} = \ + * f(\mathrm{clip}(W_o \boldsymbol{x_t} + \ + * R_o \boldsymbol{y_{t-1}} + \ + * p_o \odot c_t + b_o)) + * + * Output + * .. math:: + * \boldsymbol{y_t} = \ + * h(\boldsymbol{c_t}) \odot \boldsymbol{o_t} + * + * - ``W_i``, ``W_f``, ``W_z``, ``W_o`` are 2-dimensional input weight matrices + * (``[outputVectorSize, inputVectorSize]``, row-major) + * - ``R_i``, ``R_f``, ``R_z``, ``R_o`` are 2-dimensional recursion matrices + * (``[outputVectorSize, outputVectorSize]``, row-major) + * - ``b_i``, ``b_f``, ``b_z``, ``b_o`` are 1-dimensional bias vectors + * (``[outputVectorSize]``) + * - ``p_``, ``p_f``, ``p_o`` are 1-dimensional peephole vectors + * (``[outputVectorSize]``) + * - ``f()``, ``g()``, ``h()`` are activations + * - ``clip()`` is a function that constrains values between ``[-50.0, 50.0]`` + * - ``⊙`` denotes the elementwise product of matrices + */ +message LSTMParams { + + /* + * If true, output is a sequence, containing outputs at all time steps. + * If false, output is just the result after final state update. + */ + bool sequenceOutput = 10; + + /* + * If false, no biases (``b_i``, ``b_f``, ``b_z``, ``b_o``) are added. + */ + bool hasBiasVectors = 20; + + /* + * If true, a vector of ``1`` values is added to ``b_f``. + */ + bool forgetBias = 30; + + /* + * If true, peephole vectors are included. + */ + bool hasPeepholeVectors = 40; + + /* + * If the coupled Input and Forget flag is on, the behaviour of + * ``c_t`` is changed to the following (i.e. forget gate is not used): + * + * .. math:: + * \boldsymbol{c_t} = \ + * \boldsymbol{c_{t-1}} \odot (1 - \boldsymbol{i_t}) + \ + * \boldsymbol{i_t} \odot \boldsymbol{z_t} + * + */ + bool coupledInputAndForgetGate = 50; + + /* + * Places a limit on the maximum and minimum values of ``c_t``. + * c_t = min(c_t, cellClipThreshold) + * c_t = max(c_t, -cellClipThreshold) + * If 0, it is set to its default value = 50.0. + */ + float cellClipThreshold = 60; + +} + +/* + * Weights for long short-term memory (LSTM) layers + */ +message LSTMWeightParams { + + WeightParams inputGateWeightMatrix = 1; // Weight Matrix W_i. + WeightParams forgetGateWeightMatrix = 2; // Weight Matrix W_f. + WeightParams blockInputWeightMatrix = 3; // Weight Matrix W_z. + WeightParams outputGateWeightMatrix = 4; // Weight Matrix W_o. + + WeightParams inputGateRecursionMatrix = 20; // Recursion Weight Matrix R_i. + WeightParams forgetGateRecursionMatrix = 21; // Recursion Weight Matrix R_f. + WeightParams blockInputRecursionMatrix = 22; // Recursion Weight Matrix R_z. + WeightParams outputGateRecursionMatrix = 23; // Recursion Weight Matrix R_o. + + //biases: + WeightParams inputGateBiasVector = 40; // Bias vector b_i. + WeightParams forgetGateBiasVector = 41; // Bias vector b_f. + WeightParams blockInputBiasVector = 42; // Bias vector b_z. + WeightParams outputGateBiasVector = 43; // Bias vector b_o. + + //peepholes: + WeightParams inputGatePeepholeVector = 60; // Peephole vector p_i. + WeightParams forgetGatePeepholeVector = 61; // Peephole vector p_f. + WeightParams outputGatePeepholeVector = 62; // Peephole vector p_o. + +} + +/* + * A unidirectional long short-term memory (LSTM) layer. + * + * .. code:: + * + * (y_t, c_t) = UniDirectionalLSTMLayer(x_t, y_{t-1}, c_{t-1}) + * + * Input + * A blob of rank 5, with shape `[Seq, Batch, inputVectorSize, 1, 1]``. + * This represents a sequence of vectors of size ``inputVectorSize``. + * Output + * Same rank as the input. + * Represents a vector of size ``outputVectorSize``. It is either the final output or a sequence of outputs at all time steps. + * + * - Output Shape: ``[1, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == false`` + * - Output Shape: ``[Seq, Batch, outputVectorSize, 1, 1]`` , if ``sequenceOutput == true`` + * + */ +message UniDirectionalLSTMLayerParams { + + uint64 inputVectorSize = 1; // Size of the input vectors. + uint64 outputVectorSize = 2; // Size of the output vectors. + + /* + * 3 element array representing activations [f(),g(),h()] in that order. + * Typical values used = [sigmoid, tanh, tanh]. + * Activations supported are Linear, Sigmoid, Tanh, ReLU, Scaled Tanh (alpha = 1.71, beta = 2/3), Hard sigmoid (alpha = 0.2, beta = 0.5) + */ + repeated ActivationParams activations = 10; + + LSTMParams params = 15; + + LSTMWeightParams weightParams = 20; // Weights, biases and peepholes. + + // If true, then the node processes the input sequence from right to left + bool reverseInput = 100; + +} + +/* + * Bidirectional long short-term memory (LSTM) layer + * + * .. code:: + * + * (y_t, c_t, y_t_reverse, c_t_reverse) = BiDirectionalLSTMLayer(x_t, y_{t-1}, c_{t-1}, y_{t-1}_reverse, c_{t-1}_reverse) + * + * Input + * A blob of rank 5, with shape `[Seq, Batch, inputVectorSize, 1, 1]``. + * This represents a sequence of vectors of size ``inputVectorSize``. + * Output + * Same rank as the input. + * Represents a vector of size ``2 * outputVectorSize``. It is either the final output or a sequence of outputs at all time steps. + * + * - Output Shape: ``[1, Batch, 2 * outputVectorSize, 1, 1]`` , if ``sequenceOutput == false`` + * - Output Shape: ``[Seq, Batch, 2 * outputVectorSize, 1, 1]`` , if ``sequenceOutput == true`` + * + * + * The first LSTM operates on the input sequence in the forward direction. + * The second LSTM operates on the input sequence in the reverse direction. + * + * Example: given the input sequence ``[x_1, x_2, x_3]``, + * where ``x_i`` are vectors at time index ``i``: + * + * The forward LSTM output is ``[yf_1, yf_2, yf_3]``, + * + * where ``yf_i`` are vectors of size ``outputVectorSize``: + * + * - ``yf_1`` is the output at the end of sequence {``x_1``} + * - ``yf_2`` is the output at the end of sequence {``x_1``, ``x_2``} + * - ``yf_3`` is the output at the end of sequence {``x_1``, ``x_2``, ``x_3``} + * + * The backward LSTM output: ``[yb_1, yb_2, yb_3]``, + * + * where ``yb_i`` are vectors of size ``outputVectorSize``: + * + * - ``yb_1`` is the output at the end of sequence {``x_3``} + * - ``yb_2`` is the output at the end of sequence {``x_3``, ``x_2``} + * - ``yb_3`` is the output at the end of sequence {``x_3``, ``x_2``, ``x_1``} + * + * Output of the bi-dir layer: + * + * - if ``sequenceOutput = True`` : { ``[yf_1, yb_3]``, ``[yf_2, yb_2]``, ``[yf_3, yb_1]`` } + * - if ``sequenceOutput = False`` : { ``[yf_3, yb_3]`` } + */ +message BiDirectionalLSTMLayerParams { + + /* + * Size of the input vectors. + */ + uint64 inputVectorSize = 1; + /* + * Size of the outputs vectors. + * It is same for both forward and backward LSTMs. + */ + uint64 outputVectorSize = 2; + + /* + * 3 element array representing activations [f(),g(),h()] in that order. + * Typical values used = [sigmoid, tanh, tanh]. + * Activations supported are Linear, Sigmoid, Tanh, ReLU, Scaled Tanh (alpha = 1.71, beta = 2/3), Hard sigmoid (alpha = 0.2, beta = 0.5) + */ + repeated ActivationParams activationsForwardLSTM = 10; + /* + * Currently, backward LSTM activations + * must be same as the ones for the forward LSTM. + */ + repeated ActivationParams activationsBackwardLSTM = 11; + + /* + * Common parameters shared by the forward and backward LSTMs. + */ + LSTMParams params = 15; + + /* + * Weights and biases. + * Must be a length 2 message, + * for the forward and backward LSTM respectively. + */ + repeated LSTMWeightParams weightParams = 20; + +} + +message CustomLayerParams { + + message CustomLayerParamValue { + oneof value { + double doubleValue = 10; + string stringValue = 20; + int32 intValue = 30; + int64 longValue = 40; + bool boolValue = 50; + } + } + + string className = 10; // The name of the class (conforming to MLCustomLayer) corresponding to this layer + repeated WeightParams weights = 20; // Any weights -- these are serialized in binary format and memmapped at runtime + map parameters = 30; // these may be handled as strings, so this should not be large + string description = 40; // An (optional) description of the layer provided by the model creator. This information is displayed when viewing the model, but does not affect the model's execution on device. + +} + +/* + * A layer that rearranges the dimensions and data of an input. + * + * .. code:: + * + * y = TransposeLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * A N-Dimensional tensor. + * Output + * A N-Dimensional tensor of the same rank but with dimensions and data permuted according to axes. + * Shape: ``[InputShape[axis[0]], InputShape[axis[1]], ... , InputShape[axis[N-1]]]`` + * + * Examples: + * + * - If ``axes`` is set to ``[3, 1, 2, 0]`` and the input shape is ``[6,7,8,9]``, + * then the output has shape ``[9,7,8,6]`` + */ + +message TransposeLayerParams { + + /* + * Length of "axes" should match the rank of input & output tensor + * "axes" should be a permutation of "[0,1,2,...,N-1]" where N is the rank. + */ + repeated uint64 axes = 1; // + +} + +/* + * A layer that computes the matrix multiplication of two tensors with numpy-like broadcasting + * where the matrices reside in the last two indices of the tensor. + * + * .. code:: + * + * y = BatchedMatMul(a,b) + * + * Requires 1 or 2 inputs and produces 1 output. + * + * The first tensor, "a", must be provided as an input. The second tensor can either be an input or provided as a weight matrix parameter. + * + * Input + * - a: First N-Dimensional tensor + * - b: Second N-Dimensional tensor (either a rank-N input or a matrix, i.e. N=2, provided as a layer parameter) + * + * Output + * A tensor containing the matrix product of two tensors. + * When there are two inputs: rank is max(2, rank(a), rank(b)) + * When there is one input: rank is same as that of the input. + * + * This operation behaves as following: + * + * When there are two inputs: + * - If N >= 2 for both tensors, it is treated as a batch of matrices residing in the last two indices. + * All the indices, except for the last two, are broadcasted using conventional rules. + * - If the first tensor is 1-D, it is converted to a 2-D tensor by prepending a 1 to its shape. Eg. (D) -> (1,D) + * - If the second tensor is 1-D, it is converted to a 2-D tensor by appending a 1 to its shape. Eg. (D) -> (D,1) + * + * When there is one input: + * - The weight matrix corresponds to a matrix, of shape (X1, X2). Values of X1, X2 must be provided as layer parameters. + * - The input, "a", is reshaped into a matrix by combining all the leading dimensions, except the last, into a batch dimension. eg: + * - if "a" is rank 1 (X1,) --> (1, X1). Output shape will be (X2,) + * - if "a" is rank 2 (B1, X1) --> no need to reshape. Output shape will be (B1, X2) + * - if "a" is rank 3 (B1, B2, X1) --> (B1 * B2, X1). Output shape will be (B1, B2, X2) + * - etc + */ +message BatchedMatMulLayerParams { + + /* + * If transposeA is true, it transposes the left matrix on the fly before matrix multiplication. + * (is ignored when there is one input) + */ + bool transposeA = 1; + /* + * If transposeB is true, it transposes the right matrix on the fly before matrix multiplication. + * (is ignored when there is one input) + */ + bool transposeB = 2; + + /* + * Following parameters are ignored when there are two inputs. + */ + + uint64 weightMatrixFirstDimension = 5; // X1: same as the last dimension of the input tensor + uint64 weightMatrixSecondDimension = 6; // X2: same as the last dimension of the output tensor + + bool hasBias = 7; // Whether a bias is added or not. Supported only when there is one input. + + /* + * Weight matrix representing shape [X1, X2]. + * Values are however stored in column major order, + * in the "repeated float" or "bytes" fields of the message "WeightParams" + */ + WeightParams weights = 8; + WeightParams bias = 9; // Bias vector [X2]. Supported only when there is one input. + + /* + * If set, this layer, at runtime, quantizes the floating point input blob to int8 before applying the + * matrix multiplication using the INT8 weight parameters provided in weights->int8RawValue. The + * result is then dequantized. + * Requires: + * * number of inputs to be 1 + * * hasBias == false + * * QuantizationType == LinearQuantizationParams, such that + * * size of the "scale" field is 1 and "bias" field is empty in "LinearQuantizationParams" + * * numberOfBits == 8 + * * weights->rawValue_size to be empty + */ + bool int8DynamicQuantize = 10; + +} + +/* + * A layer that concatenates a list of tensors along a specified axis. + * + * .. code:: + * + * y = ConcatNDLayer(x1,x2,....) + * + * Requires at least 2 input and produces 1 output. + * + * Input + * The rank of the input tensors must match and all dimensions also must match, except for the dimension 'axis'. + * + * + * Output + * Same rank as the input. The dimension along "axis", is the sum of the dimensions of the inputs. + * + * example: + * + * in1 : shape (3, 2), value = [[1, 2], [3, 4], [5, 6]] + * in2 : shape (3, 2), value = [[7, 8], [9, 10], [11, 12]] + * axis = 0 + * + * if interleave = False (default) + * output : shape (6, 2) + * output[0:3, :] = in1 + * output[3:6, :] = in2 + * value = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]] + * + * if interleave = True + * output : shape (6, 2) + * output[0::2, :] = in1 + * output[1::2, :] = in2 + * value = [[1, 2], [7, 8], [3, 4], [9, 10], [5, 6], [11, 12]] + * + */ +message ConcatNDLayerParams { + + /* + * Dimension along which to concatenate. Supports negative values of the parameter 'axis'. + */ + int64 axis = 1; + + /* + * (Only available in Core ML Specification >= 5 (iOS >= 14, macOS >= 11.0) + * Interleave option. If True, concatenation is done via interleaving the inputs. + * This requires all inputs to have the exact same shape. + */ + bool interleave = 2; + + +} + +/* + * A layer that performs softmax normalization along a specified axis. + * + * .. code:: + * + * y = SoftmaxNDLayer(x) + * + * Requires 1 input and produces 1 output. + * + * Output shape is same as the input. + */ +message SoftmaxNDLayerParams { + + /* + * Dimension on which the softmax would be performed. Supports negative values of the parameter 'axis'. + */ + int64 axis = 1; + +} + +/* + * A layer that reverses specific dimensions of the input tensor. + * It is similar in functionality to the numpy.flip method. + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + */ +message ReverseLayerParams { + + /* + * Reverses each dimension of the input tensor for which corresponding reverseDim is set to True. + * Requires len(reverseDim) == rank(inputTensor) + */ + repeated bool reverseDim = 1; + +} + +/* + * A layer that reverses variable length slices. + * + * Requires 2 inputs and produces 1 output. + * + * 2 inputs, in order are denoted by "data", "seq_lengths". + * "seq_lenghts" must be a rank 1 tensor, i.e. seq_lengths.shape = (B,) + * which contains the lengths of the amount of sequence to be reversed, for each element of the batch. + * Dimension "batchAxis" in "data" must be equal to B, i.e, + * data.shape[batchAxis] = B. + * + * According to the batch axis, input "data" is first divided into a batch of B inputs, + * each of which is flipped along the dimension "sequenceAxis", by the amount specified in + * "seq_lengths", the second input. + * + * e.g.: + * + * data [shape = (2,4)]: + * [0 1 2 3] + * [4 5 6 7] + * seq_lengths [shape = (2,)]: + * [3, 0] + * batchAxis = 0 + * sequenceAxis = 1 + * + * output [shape = (2,4)]: + * [2 1 0 3] + * [4 5 6 7] + * + * + * data [shape = (2,3,2)]: + * [0 1] + * [2 3] + * [4 5] (slice = 0) + * [6 7] + * [8 9] + * [10 11] (slice = 1) + * seq_lengths [shape = (2,)]: + * [2, 3] + * batchAxis = 0 + * sequenceAxis = 1 + * + * output [shape = (2,3,2)]: + * [2 3] + * [0 1] + * [4 5] (slice = 0) + * [10 11] + * [8 9] + * [6 7] (slice = 1) + * + * Output shape is same as the input. + */ +message ReverseSeqLayerParams { + + int64 batchAxis = 1; // batch axis has to be strictly less than seq_axis + int64 sequenceAxis = 2; + +} + +/* + * A layer that loads data as a parameter and provides it as an output. + * + * .. code:: + * + * y = LoadConstantNDLayer() + * + * Requires no input and produces 1 output. + * + * Output: A tensor with shape as provided in the parameter "shape" + */ +message LoadConstantNDLayerParams { + + /* + * The shape of the constant to be loaded. + */ + repeated uint64 shape = 1; + WeightParams data = 2; + +} + +/* + * A layer that generates an output tensor with a constant value. + * Input is only used to determine the shape of the output. + * This layer is used to allocate a tensor with a dynamic shape (that of the input) and constant value. + * + * Requires 1 input and produces 1 output. + * + * .. code:: + * + * y = FillLikeLayer(x) + * + * Input + * A N-Dimensional tensor, whose values are ignored. Only the shape is used to + * infer the shape of the output. + * + * Output + * A N-Dimensional tensor with the same shape as the input tensor. + * + */ +message FillLikeLayerParams { + + float value = 1; + +} + +/* + * A layer that generates an output tensor with a constant value. + * This layer is used to allocate a tensor with a static shape and constant value. + * + * Requires no input and produces 1 output. + * + * .. code:: + * + * y = FillStaticLayer(x) + * + * Output + * A N-Dimensional tensor of shape "targetShape". + * + */ +message FillStaticLayerParams { + + float value = 1; + repeated uint64 targetShape = 2; + +} + +/* + * A layer that generates an output tensor with a constant value. + * This layer is used to allocate a tensor with a dynamic shape (as specified by the input) and constant value. + * + * Requires 1 input and produces 1 output. + * + * .. code:: + * + * y = FillDynamicLayer(x) + * + * Input + * A rank 1 tensor specifying the shape of the output + * + * Output + * An N-Dimensional tensor with the shape specified by the values in the input tensor. + * + */ +message FillDynamicLayerParams { + + float value = 1; + +} + +/* + * A layer that returns the elements either from tensor x or tensor y, + * depending on the value in the condition tensor. + * It is similar in functionality to the numpy.where method with 3 inputs. + * + * Requires 3 inputs and produces 1 output. + * Inputs, in order, are the condition tensor, x and y. + * + * for each vector index (i,...,j): + * output[i,...,j] = x[i,...,j] if condition[i,...,j] = True + * y[i,...,j] if condition[i,...,j] = False + * + * All the 3 inputs are first broadcasted to a common shape. + * (the shapes must be broadcastable) + * + * output.rank = max(input[0].rank, input[1].rank, input[2].rank) + * + */ +message WhereBroadcastableLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric sine function. + * + * + * .. code:: + * + * y = SinLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message SinLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric cosine function. + * + * + * .. code:: + * + * y = CosLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message CosLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric tangent function. + * + * + * .. code:: + * + * y = TanLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message TanLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric arcsine function. + * + * + * .. code:: + * + * y = AsinLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message AsinLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric arccosine function. + * + * + * .. code:: + * + * y = AcosLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message AcosLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric arctangent function. + * + * + * .. code:: + * + * y = AtanLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message AtanLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric hyperbolic sine function. + * + * + * .. code:: + * + * y = SinhLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message SinhLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric hyperbolic cosine function. + * + * + * .. code:: + * + * y = CoshLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message CoshLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric hyperbolic tangent function. + * + * + * .. code:: + * + * y = TanhLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message TanhLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric hyperbolic arcsine function. + * + * + * .. code:: + * + * y = AsinhLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message AsinhLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric hyperbolic arccosine function. + * + * + * .. code:: + * + * y = AcoshLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message AcoshLayerParams { + +} + +/* + * A layer that computes elementwise trigonometric hyperbolic arctangent function. + * + * + * .. code:: + * + * y = AtanhLayer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message AtanhLayerParams { + +} +/* + * A layer that raises each element in first tensor to the power of + * corresponding element in the second tensor. + * Supports conventional numpy-like broadcasting. + * + * .. code:: + * + * y = PowBroadcastableLayer(x) + * + * Requires 2 inputs and produces 1 output. + * + * Input + * - First N-Dimensional tensor + * - Second N-Dimensional tensor + * + * Output + * An N-Dimensional tensor with the broadcast shape. + * + */ +message PowBroadcastableLayerParams { + +} + +/* + * A layer that computes the exponential of all elements in the input tensor, with the base 2. + * + * + * .. code:: + * + * y = Exp2Layer(x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message Exp2LayerParams { + +} + +/* + * A layer that returns a tensor containing the indices of all non-zero + * elements of input tensor. + * It is similar in functionality to the numpy.where method with 1 input. + * + * Requires 1 input and produces 1 output. + * Output is of rank 2, of shape (N,R), + * where N is the number of non-zero elements in the input and R is the rank of the input. + * + * Output contains indices represented in the multi-index form + * + * e.g.: + * input {shape = (4,)}: + * [0 1 0 2] + * output {shape = (2,1)}: + * [1] + * [3] + * + * + * input {shape = (3, 3)}: + * [1 2 1] + * [0 2 2] + * [2 1 0] + * output {shape = (7,1)}: + * [0. 0.] + * [0. 1.] + * [0. 2.] + * [1. 1.] + * [1. 2.] + * [2. 0.] + * [2. 1.] + * + */ +message WhereNonZeroLayerParams { + +} + +/* + * A layer that copies a tensor setting everything outside a central band in + * each inner-most matrix to zero. + * + * Requires 1 input and produces 1 output. + * + * Parameters for matrix_band_part layer + * band(m, n) = (num_lower < 0 || (m-n) <= num_lower) && (num_upper < 0 || (n-m) <= num_upper). + * output[i, j, k, ..., m, n] = band(m, n) * input[i, j, k, ..., m, n] + * + * + * Output shape is same as the input shape. + * Rank of the input must be at least 2. + * For rank higher than 2, the last 2 dimensions are treated as the matrix, while the rest are treated as batch. + */ +message MatrixBandPartLayerParams { + + int64 numLower = 1; + int64 numUpper = 2; + +} + +/* + * A layer that copies a tensor setting everything outside upper triangular to zero. + * + * Requires 1 input and produces 1 output. + * + * Output shape is same as the input shape. + * Rank of the input must be at least 2. + * For rank higher than 2, the last 2 dimensions are treated as the matrix, while the rest are treated as batch. + */ +message UpperTriangularLayerParams { + + int64 k = 1; // Diagonal below which to zero elements. k = 0 (the default) is the main diagonal, k < 0 is below it and k > 0 is above + +} + +/* + * A layer that copies a tensor setting everything outside lower triangular to zero. + * + * Requires 1 input and produces 1 output. + * + * Output shape is same as the input shape. + * Rank of the input must be at least 2. + * For rank higher than 2, the last 2 dimensions are treated as the matrix, while the rest are treated as batch. + */ +message LowerTriangularLayerParams { + + int64 k = 1; // Diagonal above which to zero elements. k = 0 (the default) is the main diagonal, k < 0 is below it and k > 0 is above + +} + +/* + * + * A layer that broadcasts a tensor to a new shape. + * + * Requires 2 inputs and produces 1 output. + * + * First input is broadcast to produce the output, while the second input is only + * used to determine the shape of the output. Values of second input are not used. + * + * Output is a tensor with the same shape as the second input. + * + */ +message BroadcastToLikeLayerParams { + +} + +/* + * + * A layer that broadcasts a tensor to a new shape. + * + * Requires 1 input and produces 1 output. + * + * Output tensor is the broadcasted version of the input and has shape as specified in the + * parameter "targetShape". + */ +message BroadcastToStaticLayerParams { + + repeated uint64 targetShape = 1; + +} + +/* + * + * A layer that broadcasts a tensor to a new shape. + * + * Requires 2 inputs and produces 1 output. + * + * First input is the one that is broadcasted to produce the output. + * Second input is a rank 1 tensor specifying the shape of the output. + * Output tensor has shape as specified by the values in the 2nd input tensor. + */ +message BroadcastToDynamicLayerParams { + +} + +/* + * A layer that performs element-wise addition operation with broadcast support. + * + * Requires 2 inputs and produces 1 output. + */ +message AddBroadcastableLayerParams { + +} + +/* + * A layer that performs element-wise maximum operation with broadcast support. + * + * Requires 2 inputs and produces 1 output. + */ +message MaxBroadcastableLayerParams { + +} + +/* + * A layer that performs element-wise minimum operation with broadcast support. + * + * Requires 2 inputs and produces 1 output. + */ +message MinBroadcastableLayerParams { + +} + +/* + * A layer that performs element-wise modular operation with broadcast support. + * + * Requires 2 inputs and produces 1 output. + */ +message ModBroadcastableLayerParams { + +} + +/* + * A layer that performs element-wise floor division operation with broadcast support. + * + * Requires 2 inputs and produces 1 output. + */ +message FloorDivBroadcastableLayerParams { + +} + +/* + * A layer that performs element-wise subtract operation with broadcast support. + * + * Requires 2 inputs and produces 1 output. + */ +message SubtractBroadcastableLayerParams { + +} + +/* + * A layer that performs element-wise multiply operation with broadcast support. + * + * Requires 2 inputs and produces 1 output. + */ +message MultiplyBroadcastableLayerParams { + +} + +/* + * A layer that performs element-wise division operation with broadcast support. + * + * Requires 2 inputs and produces 1 output. + */ +message DivideBroadcastableLayerParams { + +} + +/* + * Gather layer that gathers elements from the first input, along a specified axis, + * at indices specified in the second input. + * It is similar in functionality to the numpy.take method. + * + * Requires 2 inputs and produces 1 output. + * + * Given two inputs, 'data' and 'indices', gather the slices of 'data' + * and store into output. + * e.g. + * for i in [0, length(indices) - 1] + * output[i] = data[indices[i]] (1-D case, axis=0) + * + * if axis = 0: + * for each vector index (i,...,j) + * output[i,...,j,:,..,:] = data[indices[i,...,j],:,..,:] + * + * output.rank = (data.rank - 1) + indices.rank + * + * Negative indices and negative axis are supported. + * + * e.g: + * + * data shape = (2, 3) + * indices shape = (6, 8) + * axis = 0 + * output shape = (6, 8) + (3,) = (6, 8, 3) + * + * data shape = (2, 3, 5) + * indices shape = (6, 8) + * axis = 1 + * output shape = (2,) + (6, 8) + (5,) = (2, 6, 8, 5) + * + */ +message GatherLayerParams { + + int64 axis = 1; + +} + +/* + * Scatter accumulation mode. + */ +enum ScatterMode { + + SCATTER_UPDATE = 0; + SCATTER_ADD = 1; // add + SCATTER_SUB = 2; // subtract + SCATTER_MUL = 3; // multiply + SCATTER_DIV = 4; // divide + SCATTER_MAX = 5; // maximum + SCATTER_MIN = 6; // minimum + +} + +/* + * A layer that scatters data into a new tensor according to indices from the input. + * This is the inverse operation of Gather. + * + * Requires 3 inputs and produces 1 output. + * + * Output is initialized with the first input. + * Then updated with the values in the third input, at indices specified by the second input. + * + * An example when axis=0: + * Given three inputs, in order, "container", "indices", "updates", where + * + * - "container" is a rank R+1 tensor of shape [D_0, D_1, ..., D_R], which + * contains D_0 number of tensors, each with shape [D_1, ..., D_R]. + * + * - "indices" is a rank 1 tensor with shape [N], where N is the number of updates. + * The values in this tensor must be in the range [0, D_0 - 1]. (negative indexing is supported) + * + * - "updates" is a rank R+1 tensor with shape [N, D_1, ..., D_R], which represents + * a total number of N tensors, each of shape [D_1, ..., D_R]. + * + * The effect of this operation is as follows: + * + * output = container; + * For each i in 0, ..., N - 1 + * output[indices[i], :, ..., :] = updates[i, :, ..., :] // if mode == "SCATTER_UPDATE" + * + * or + * For each i in 0, ..., N - 1 + * output[indices[i], :, ..., :] += updates[i, :, ..., :] // if mode == "SCATTER_ADD" + * + * etc + * + * When "indices" is a tensor of rank greater than 1, the equation becomes (for axis=0): + * For each vector index (i,...,j) + * output[indices[i,...,j],...] -= updates[i,...,j,...] // if mode == "SCATTER_SUB" + * + * + * The output has the same shape as the first input. + * "indices" input must have rank less than or equal to the "updates" input and its shape + * must be a subset of the the shape of the "updates" input. + * + * e.g: + * + * container shape = (4, 3) + * indices shape = (5, 2, 3) + * updates shape = (4, 5, 2, 3) + * axis = 1 + * output shape = (4, 3) + * + * container shape = (4, 4, 3) + * indices shape = (6,) + * updates shape = (4, 6, 3) + * axis = -2 + * output shape = (4, 4, 3) + * + * container shape = (5,) + * indices shape = (5, 7, 5, 6) + * updates shape = (5, 7, 5, 6) + * axis = -1 + * output shape = (5,) + */ + +message ScatterLayerParams { + + int64 axis = 1; + ScatterMode mode = 2; // mode of accumulation. + +} + +/* + * A layer that gathers elements from the first input, 'params', at the multi-indices specified + * by the second input, 'indices'. + * + * Requires 2 inputs and produces 1 output. + * + * 'params' = input[0], 'indices' = input[1] + * + * 'indices' is a rank K+1 tensor of shape [I_0, I_1, .., I_(K-1), I_K] which is viewed as a collection of + * indices of (I_0 * I_1 * ... * I_(K-1)) points in the I_K dimensional space. For instance, the multi-index of the first point + * is indices[0,0,...,0,:]. + * + * Here is how the output is constructed: + * + * for i = 0,1,...,(I_0-1) + * ... + * for j = 0,1,....,(I_(K-1)-1) + * output[i,....,j,:,:,..,:] = params[indices[i,...,j,:], :,:,..,:] + * + * Hence, output shape is [I_0, I_1,...,I(K-1)] + params.shape[I_K:] + * + * output.rank = indices.rank - 1 + params.rank - indices.shape[-1] + * + * e.g: + * + * input[0] shape = (4, 2, 3, 4) + * input[1] shape = (6, 2) + * output shape = (6,) + (3, 4) = (6, 3, 4) + * + * input[0] shape = (3, 3, 3, 4, 7) + * input[1] shape = (3, 5) + * output shape = (3,) + () = (3,) + * + * input[0] shape = (5, 3, 2, 5) + * input[1] shape = (2, 7, 3, 2) + * output shape = (2, 7, 3) + (2, 5) = (2, 7, 3, 2, 5) + * + */ +message GatherNDLayerParams { + +} + +/* + * A layer that scatters data into a new tensor according to multi-indices from the input. + * This is the inverse operation of GatherND. + * + * Requires 3 inputs and produces 1 output. + * 3 inputs, in order are denoted as "container", "indices", "updates". + * + * 'indices' is a rank K+1 tensor of shape [I_0, I_1, .., I_(K-1), I_K] which is viewed as a collection of + * indices of (I_0 * I_1 * ... * I_(K-1)) points in the I_K dimensional space. For instance, the multi-index of the first point + * is indices[0,0,...,0,:]. + * + * container.rank >= I_K + * updates.rank = K + (container.rank - I_K) + * shape of 'updates' = [I_0, I_1,...,I(K-1)] + container.shape[I_K:] + * + * output = container + * For each vector index (i,...,j) s.t. 0<=i shape: (3,) + * reps = N/A [Ignored] + * output shape = (2, 8, 12) + * + */ +message TileLayerParams { + + repeated uint64 reps = 1; + +} + +/* + * A layer that returns the shape of an input tensor. + * + * Requires 1 input and produces 1 output. + * + * Input: a tensor. + * Output: a vector of length R, where R is the rank of the input tensor + * Output is always a rank 1 tensor. + */ +message GetShapeLayerParams { + +} + +/* + * A layer that computes the Gauss error function, + * which is defined as: + * + * .. math:: + * f(x) = \dfrac{1}{\sqrt{\pi}}\int_{-x}^{x}{e^{-t^2}dt} + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + */ +message ErfLayerParams { + +} + +/* + * A layer that evaluates the Gaussian Error Linear Unit (GELU) activation. + * Following equations are used to compute the activation based on the value of the "mode" parameter: + * + * mode == 'EXACT': + * .. math:: + * f(x) = 0.5x\left ( 1+\rm{erf}\left ( \frac{x}{\sqrt{2}} \right ) \right ) + * + * mode == 'TANH_APPROXIMATION': + * .. math:: + * f(x) = 0.5x\left ( 1+\rm{tanh}\left ( \sqrt{2/\pi}\left ( x + 0.044715x^3 \right ) \right ) \right ) + * + * mode == 'SIGMOID_APPROXIMATION': + * .. math:: + * f(x) = x*\rm{sigmoid}(1.702x) + * + * Requires 1 input and produces 1 output. + * Output shape is same as the input. + * + */ +message GeluLayerParams { + + enum GeluMode { + + EXACT = 0; + TANH_APPROXIMATION = 1; + SIGMOID_APPROXIMATION = 2; + + } + + GeluMode mode = 1; // mode of GELU operation. + +} + +/* + * RangeStatic layer that returns a tensor that contains evenly spaced values. + * It is similar in functionality to the numpy.arange method. + * + * Requires no input and produces 1 output. + * Output is a rank 1 tensor. + */ +message RangeStaticLayerParams { + + float endValue = 1; + float startValue = 2; + float stepSizeValue = 3; + +} + +/* + * A layer that returns a tensor that contains evenly spaced values. + * Its functionality is similar to the numpy.arange method. + * + * Requires at least 1 input, up to a maximum of 3 inputs. + * Produces 1 output, which is a rank 1 tensor. + * + * Each input must be a scalar, or rank 1 and shape (1,). + * + * The first input represents the "endValue". + * The second input, if present, corresponds to "startValue". In this case the value of the "startValue" parameter is ignored. + * The third input, if present, corresponds to "stepSizeValue". In this case the value of the "stepSizeValue" parameter is ignored. + * + */ +message RangeDynamicLayerParams { + + float startValue = 2; + float stepSizeValue = 3; + +} + +/* + * A layer that returns a tensor containing all windows of size ``windowSize`` + * separated by ``step`` along the dimension ``axis``. + * + * .. code:: + * + * y = SlidingWindows(x) + * + * Requires 1 input and produces 1 output. + * + * Input + * An N-Dimensional tensor. + * + * Output + * An (N+1)-Dimensional tensor. + * + * This operation behaves as following: + * - if axis = 0 & input is rank 1 (L,). Output shape will be (M, W). + * - if axis = 1 & input is rank 3 (B1, L, C1). Output shape will be (B1, M, W, C1) + * - if axis = 2 & input is rank 5 (B1, B2, L, C1, C2) --> (B1 * B2, L, C1 * C2) --> (B1 * B2, M, W, C1 * C2). Output shape will be (B1, B2, M, W, C1, C2) + * - etc. + * where + * - L, C, B refer to input length, feature dimension length & batch size respectively + * - W is the window size. + * - M is the number of windows/slices calculated as M = (L - W) / step + 1 + */ +message SlidingWindowsLayerParams { + + int64 axis = 1; + uint64 windowSize = 2; + uint64 step = 3; + +} + +/* + * A layer that applies layer normalization over the input tensor. + * + * Requires 1 input and produces 1 output. + * + * output = gamma * (input - computed_mean) / (sqrt(computed_variance + eps)) + beta + * + * Parameters + * normalizedShape: subset of the input shape, along with layer norm is performed, rest of the input shape is treated as the batch dimension. The mean and variance are computed for the input, over the last few dimensions as specified by the normalizedShape parameter. + * gamma: must have shape = "normalizedShape" + * beta: must have shape = "normalizedShape" + * eps: small constant to avoid division by 0 + * + * Output shape is same as the input. + * + * e.g.: + * input shape = (10,5) + * normalized shape = (5,) or (10,5) + * + * input shape = (10,5,6,7) + * normalized shape = (7,) or (6,7) or (5,6,7) or (10,5,6,7) + */ +message LayerNormalizationLayerParams { + + repeated int64 normalizedShape = 1; + float eps = 2; + WeightParams gamma = 3; + WeightParams beta = 4; + +} + +/* + * Non maximum suppression (NMS) layer. + * Applies the non maximum suppression algorithm to input bounding box coordinates. + * The effect of this layer is similar to the functionality of the "NonMaximumSuppression" + * model type (for details please see NonMaximumSuppression.proto) with a couple of differences. + * One, this is a layer in a neural network model, whereas that is a different model type. Second, + * this layer supports a batch of bounding boxes. + * + * The NMS layer requires at least 2 inputs, and up to a maximum of 5 inputs. It produces 4 outputs. + * Following is the description of inputs and outputs: + * + * input 1, shape (B,N,4): coordinates of N boxes, for a batch size B. + * input 2, shape (B,N,C): class scores for each box. C can be 1 when there is only 1 score per box, i.e., no class specific score. + * + * input 3, optional, shape (1,): IoU threshold. When present, it overwrites the value provided in layer parameter "iouThreshold". + * input 4, optional, shape (1,): Score threshold. When present, it overwrites the value provided in layer parameter "scoreThreshold". + * input 5, optional, shape (1,): Maximum number of boxes. When present, it overwrites the value provided in layer parameter "maxBoxes". + * + * output 1, shape (B,maxBoxes,4): box coordinates, corresponding to the surviving boxes. + * output 2, shape (B,maxBoxes,C): box scores, corresponding to the surviving boxes. + * output 3, shape (B,maxBoxes): indices of the surviving boxes. Hence it will have values in the range [0,N-1], except for padding. + * output 4, shape (B,): number of boxes selected after the NMS algorithm, for each batch. + * + * When surviving boxes are less than "maxBoxes", the first 3 outputs are padded. + * For the first two outputs, the padding is done using values 0, whereas for the third output the + * padding value used is -1, since the output values represent indices. + * + * If no box survives, that is, all the scores are below the "scoreThreshold", + * then for that batch, number of boxes (value of the fourth output) will be 1. The first 3 outputs will + * correspond to the box with the highest score. This is to avoid generating an "empty" output. + * + * The four values that describe the box dimensions are (in order): + * + * - x (center location of the box along the horizontal axis) + * - y (center location of the box along the vertical axis) + * - width (size of box along the horizontal axis) + * - height (size of box on along the vertical axis) + * + * In each batch, + * the N scores for N boxes, used for suppression, are generated by taking the max of the matrix (N,C) + * along the columns. + * If "perClassSuppression" flag is false, suppression happens across all classes. + * If "perClassSuppression" flag is true, each box is assigned to the class with the highest + * score and then the suppression happens separately for boxes within the same class. + * + * Note that the 4th output can be used to dynamically slice the first 3 outputs, in case + * the padded outputs are not required. + * + */ +message NonMaximumSuppressionLayerParams { + /* + * The intersection over union (IoU) threshold over which boxes are suppressed. + */ + float iouThreshold = 1; + + /* + * Before IoU suppression is performed, boxes with class scores below this threshold are rejected. + */ + float scoreThreshold = 2; + + /* + * The maximum number of boxes to be given out as output. + * If the number of surviving boxes are less, output is padded up to this number. + */ + uint64 maxBoxes = 3; + + /* + * If true, suppression is performed independently within boxes of each class. + */ + bool perClassSuppression = 4; +} + +/* + * A layer that performs element-wise clamped ReLU operation. + * + * Requires 1 input and produces 1 output. + * + * This function has the following formula: + * + * .. math:: + * f(x) = \begin{cases} + * \text{min}(\text{beta},x) \;\; \text{if} \;\; x \geq 0\\ + * \text{min}(\text{beta} ,\text{alpha}\cdot x) \;\; \text{if} \;\; x<0 + * \end{cases} + * + * Output shape is same as the input. + * + * Available (iOS >= 14, macOS >= 11.0, watchOS >= 7) + */ +message ClampedReLULayerParams { + + float alpha = 1; + float beta = 2; + +} + +/* +* A layer that returns the indices that would sort the input tensor, along a specified axis. +* +* Requires 1 input and produces 1 output. +* +* Output has the same rank and shape as the input. +* +* Value of "axis" must be positive and less than the rank of the input. +* +* e.g.: +* +* input shape = (5,) +* axis = 0 +* input values = [3.1, 5.4, 32.9, 3.2, 77.0] +* output shape = (5,) +* output values = [0, 3, 1, 2, 4], descending = False +* output values = [4, 2, 1, 3, 0], descending = True +* +* input shape = (2,3) +* axis = 1 +* input values = [[3, 5, 32], [3, 77, 6]] +* output shape = (2,3) +* output values = [[0, 1, 2], [0, 2, 1]], descending = False +* output values = [[2, 1, 0], [1, 2, 0]], descending = True +* +*/ +message ArgSortLayerParams { + + int64 axis = 1; // must be between [0, input_rank - 1] + bool descending = 2; + +} + +/* + * A layer that does slice operation by providing size to be extracted + * from the given input tensor. + * + * Requires 2 inputs and produces 1 output. + * Rank of the output is same as the rank of the first input. + * + * The 1st input represents the tensor to be sliced. + * The 2nd input represents the beginning index to be sliced from. + * + * Example: + * Input 1: x (x.shape = (2, 3, 4)) + * Input 2: begin + * size: 2 + * axis: 1 + * + * Output: x[:, begin:begin+2, :] + * + */ +message SliceBySizeLayerParams { + + int64 size = 2; + int64 axis = 3; + +} + + +// Neural Network Specializations +// ------------------------------ + +/* + * A neural network specialized as a classifier. + */ +message NeuralNetworkClassifier { + + repeated NeuralNetworkLayer layers = 1; + repeated NeuralNetworkPreprocessing preprocessing = 2; + + // use this enum value to determine the input tensor shapes to the neural network, for multiarray inputs + NeuralNetworkMultiArrayShapeMapping arrayInputShapeMapping = 5; + + // use this enum value to determine the input tensor shapes to the neural network, for image inputs + NeuralNetworkImageShapeMapping imageInputShapeMapping = 6; + + NetworkUpdateParameters updateParams = 10; + + // The set of labels for every possible class. + oneof ClassLabels { + StringVector stringClassLabels = 100; + Int64Vector int64ClassLabels = 101; + } + + // The name of the output blob containing the probability of each class. + // In other words, the score vector. Must be a 1-D tensor with the same + // number and order of elements as ClassLabels. + string labelProbabilityLayerName = 200; +} + + +/* + * A layer that computes the one hot representation of the input. + * + * Requires 1 or 2 inputs and produces 1 output. + * Rank of the output is one more than the first input. + * If the second input is present, it is used to determine the value of "oneHotVectorSize" and the parameter "oneHotVectorSize" is ignored. + * + * Input values correspond to indices and should typically be in the range [0,"oneHotVectorSize" -1]. If it is outside this range, a vector of all "offValue" will be chosen. + * + * Typically one hot vectors contain 0s everywhere, except 1 at the index that the input corresponds to. + * However, instead of 0, any float value could be generated by using the "offValue" parameter. + * Similarly, instead of 1, any other value can be used by employing the "onValue" parameter. + * + * e.g.: + * input shape: (10,), "oneHotVectorSize" : 32, axis=-1, then output shape will be (10,32) + * input shape: (10,23), "oneHotVectorSize" : 32, axis=1, then output shape will be (10,32,23) + * input shape: (10,), "oneHotVectorSize" : 32, axis=0, then output shape will be (32,10) + * + * input shape: (2,), "oneHotVectorSize" : 4, axis=-1, then output shape will be (2,4) + * say input values = [2, 0], and "onValue" = 5, and "offValue" = -1, then output will be: + * [-1, -1, 5, -1 + * 5, -1, -1, -1] + * + * say input values = [2, -1], and "onValue" = 5, and "offValue" = -1, then output will be: + * [-1, -1, 5, -1 + * -1, -1, -1, -1] + * + * Available (iOS >= 14, macOS >= 11.0, watchOS >= 7) + */ + +message OneHotLayerParams { + + uint64 oneHotVectorSize = 1; // size of the one hot vector + int64 axis = 2; // negative indexing is supported. It refers to the axis in the output tensor. + float onValue = 3; + float offValue = 4; +} + + +/* + * A layer that computes the cumsum values of the input along a given axis. + * + * Requires 1 or 2 inputs and produces 1 output. + * + * Output shape and rank is same as the first input. + * If the second input is present, it is used to determine the value of "axis" and the parameter "axis" is ignored. + * + * e.g.: + * Input shape = (3,), values it has: [4, 6, 7] + * + * Then output values will be: + * + * if "excludeFinalSum" = False and "reverse" = False: + * output values : [4, 10, 17] + * + * if "excludeFinalSum" = True and "reverse" = False: + * output values : [0, 4, 10] + * + * if "excludeFinalSum" = False and "reverse" = True: + * output values : [17, 13, 7] + * + * if "excludeFinalSum" = True and "reverse" = True: + * output values : [13, 7, 0] + * + * + * Available (iOS >= 14, macOS >= 11.0, watchOS >= 7) + */ + + +message CumSumLayerParams { + + int64 axis = 1; // negative indexing is supported + + // if true, the first element of the output is 0, and the last element contains the sum of the input up to the penultimate value + // if false, the first element of the output is same as the input and the last element is the sum of all the input values + // (this behavior is reversed when "reverse" flag is True) + bool excludeFinalSum = 2; + + bool reverse = 3; // if true, cumsum is performed in the opposite direction +} + + +/* + * A neural network specialized as a regressor. + */ +message NeuralNetworkRegressor { + + repeated NeuralNetworkLayer layers = 1; + repeated NeuralNetworkPreprocessing preprocessing = 2; + + // use this enum value to determine the input tensor shapes to the neural network, for multiarray inputs + NeuralNetworkMultiArrayShapeMapping arrayInputShapeMapping = 5; + + // use this enum value to determine the input tensor shapes to the neural network, for image inputs + NeuralNetworkImageShapeMapping imageInputShapeMapping = 6; + + NetworkUpdateParameters updateParams = 10; + +} + +// --------------------------------------------------------- +// On-device Training related messages +// --------------------------------------------------------- + +/* + * Details on how the network will be updated + */ +message NetworkUpdateParameters { + + repeated LossLayer lossLayers = 1; + Optimizer optimizer = 2; + Int64Parameter epochs = 3; + + /* + * Describes whether to shuffle the batch of data between epochs. + */ + BoolParameter shuffle = 10; + + /* + * The seed to be used in an associated random number generator. + */ + Int64Parameter seed = 20; +} + +/* + * Loss layer - categorical cross entropy and mean squared error are the only supported loss functions currently + */ +message LossLayer { + + string name = 1; + oneof LossLayerType { + + CategoricalCrossEntropyLossLayer categoricalCrossEntropyLossLayer = 10; + MeanSquaredErrorLossLayer meanSquaredErrorLossLayer = 11; + + } + +} + +/* + * Categorical cross entropy loss layer + * Categorical cross entropy is used for single label categorization (only one category is applicable for each data point). + * + * The input is a vector of length N representing the distribution over N categories. It must be the output of a softmax. + * + * The target is a single value representing the true category or class label. If the target is the predictedFeatureName of a neural network classifier it will be inverse mapped to the corresponding categorical index for you. + * + * math: + * Loss_{CCE}(input, target) = -\sum_{i=1}^{N} (target == i) log( input[i] ) = - log (input[target]) + */ +message CategoricalCrossEntropyLossLayer { + + string input = 1; + string target = 2; + +} + +/* + * Mean squared error loss layer, + * specifying input and target + */ +message MeanSquaredErrorLossLayer { + + string input = 1; + string target = 2; + +} + +/* + * Optimizer - stochastic gradient descent and adam are the only supported optimizers currently + */ +message Optimizer { + + oneof OptimizerType { + + SGDOptimizer sgdOptimizer = 10; + AdamOptimizer adamOptimizer = 11; + + } + +} + +/* + * Stochastic gradient descent optimizer, + * specifying configurable learning rate, mini batch size, and momentum + */ +message SGDOptimizer { + + DoubleParameter learningRate = 1; + Int64Parameter miniBatchSize = 2; + DoubleParameter momentum = 3; + +} + +/* + * Adam optimizer, + * specifying configurable learning rate, mini batch size, betas, and eps + */ +message AdamOptimizer { + + DoubleParameter learningRate = 1; + Int64Parameter miniBatchSize = 2; + DoubleParameter beta1 = 3; + DoubleParameter beta2 = 4; + DoubleParameter eps = 5; + +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/NonMaximumSuppression.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/NonMaximumSuppression.proto new file mode 100644 index 000000000..047f74bdb --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/NonMaximumSuppression.proto @@ -0,0 +1,187 @@ +// Copyright (c) 2018, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +/* +* Non-maximum suppression of axis-aligned bounding boxes. +* +* This is used primarily for object detectors that tend to produce multiple +* boxes around a single object. This is a byproduct of the detector's +* robustness to spatial translation. If there are two or more bounding boxes +* that are very similar to one another, the algorithm should return only a +* single representative. +* +* Similarity between two bounding boxes is measured by intersection-over-union +* (IOU), the fraction between the area of intersection and area of the union. +* Here is an example where the areas can be calculated by hand by counting glyphs:: +* +* +-------+ +-------+ +* | | | | +* | +------+ +--+ | +---+ +* | | | | | | | | +* +-------+ | +--+ +----+ | +* | | | | +* +------+ +------+ +* Intersection Union +* IOU: 0.16 = 12 / 73 +* +* All IOU scores are fractions between 0.0 (fully disjoint) and 1.0 (perfect +* overlap). The standard algorithm (PickTop) is defined as follows: +* +* 1. Sort boxes by descending order of confidence +* 2. Take the top one and mark it as keep +* 3. Suppress (mark it as discard) all boxes within a fixed IOU radius of the +* keep box +* 4. Go to 2 and repeat on the subset of boxes not already kept or discarded +* 5. When all boxes are processed, output only the ones marked as keep +* +* Before the algorithm, boxes that fall below the confidence threshold are +* discarded. +*/ +message NonMaximumSuppression { + // Suppression methods: + /* + * Pick the bounding box of the top confidence, suppress all within a radius. + */ + message PickTop { + /* + * Suppression is only done among predictions with the same label + * (argmax of the confidence). + */ + bool perClass = 1; + } + + /* + * Choose which underlying suppression method to use + */ + oneof SuppressionMethod { + PickTop pickTop = 1; + } + + /* + * Optional class label mapping. + */ + oneof ClassLabels { + StringVector stringClassLabels = 100; + Int64Vector int64ClassLabels = 101; + } + + /* + * This defines the radius of suppression. A box is considered to be within + * the radius of another box if their IOU score is less than this value. + */ + double iouThreshold = 110; + + /* + * Remove bounding boxes below this threshold. The algorithm run-time is + * proportional to the square of the number of incoming bounding boxes + * (O(N^2)). This threshold is a way to reduce N to make the algorithm + * faster. The confidence threshold can be any non-negative value. Negative + * confidences are not allowed, since if the output shape is specified to be + * larger than boxes after suppression, the unused boxes are filled with + * zero confidence. If the prediction is handled by Core Vision, it is also + * important that confidences are defined with the following semantics: + * + * 1. Confidences should be between 0 and 1 + * 2. The sum of the confidences for a prediction should not exceed 1, but is + * allowed to be less than 1 + * 3. The sum of the confidences will be interpreted as the confidence of + * any object (e.g. if the confidences for two classes are 0.2 and 0.4, + it means there is a 60% (0.2 + 0.4) confidence that an object is + present) + */ + double confidenceThreshold = 111; + + /* + * Set the name of the confidence input. + * + * The input should be a multi-array of type double and shape N x C. N is + * the number of boxes and C the number of classes. Each row describes the + * confidences of each object category being present at that particular + * location. Confidences should be nonnegative, where 0.0 means the highest + * certainty the object is not present. + * + * Specifying shape is optional. + */ + string confidenceInputFeatureName = 200; + + /* + * Set the name of the coordinates input. + * + * The input should be a multi-array of type double and shape N x 4. The + * rows correspond to the rows of the confidence matrix. The four values + * describe (in order): + * + * - x (center location of the box along the horizontal axis) + * - y (center location of the box along the vertical axis) + * - width (size of box along the horizontal axis) + * - height (size of box on along the vertical axis) + * + * Specifying shape is optional. + */ + string coordinatesInputFeatureName = 201; + + /* + * The iouThreshold can be optionally overridden by specifying this string + * and providing a corresponding input of type double. This allows changing + * the value of the parameter during run-time. + * + * The input should be a scalar double between 0.0 and 1.0. Setting it to 1.0 + * means there will be no suppression based on IOU. + */ + string iouThresholdInputFeatureName = 202; + + /* + * The confidenceThreshold can be optionally overridden by specifying this + * string and providing a corresponding input. This allows changing the + * value of the parameter during run-time, which can aid setting it just + * right for a particular use case. + * + * The input should be a scalar double with nonnegative value. + */ + string confidenceThresholdInputFeatureName = 203; + + /* + * Set the name of the confidence output. The output will be the same type + * and shape as the corresponding input. The only difference is that the + * number of rows may have been reduced. + * + * Specifying shape is optional. One reason to specify shape is to limit + * the number of output boxes. This can be done is several ways: + * + * Fixed shape: + * The output can be pinned to a fixed set of boxes. If this number is larger + * than the number of boxes that would have been returned, the output is padded + * with zeros for both confidence and coordinates. Specifying a fixed shape + * can be done by setting either shape (deprecated) or allowedShapes set to + * fixedsize. + * + * Min/max: + * It is also possible to set both a minimum and a maximum. The same zero-padding + * as for fixed shape is applied when necessary. Setting min/max is done by defining + * two allowedShapes, where the first dimension uses a rangeofsizes defining lowerbound + * and upperbound. + */ + string confidenceOutputFeatureName = 210; + + /* + * Set the name of the coordinates output. The output will be the same type + * and shape as the corresponding input. The only difference is that the + * number of rows may have been reduced. + * + * Specifying shape is optional. See confidence output for a more detailed + * description. Note that to achieve either fixed shape output or a + * constraint range of boxes, only one of confidence or coordinates need to + * set a shape. Both shapes are allowed to be defined, but in such case they + * have to be consistent along dimension 0. + */ + string coordinatesOutputFeatureName = 211; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/Normalizer.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/Normalizer.proto new file mode 100644 index 000000000..0967bbf0a --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/Normalizer.proto @@ -0,0 +1,38 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +/* + * A normalization preprocessor. + */ +message Normalizer { + /* + * There are three normalization modes, + * which have the corresponding formulas: + * + * Max + * .. math:: + * max(x_i) + * + * L1 + * .. math:: + * z = ||x||_1 = \sum_{i=1}^{n} |x_i| + * + * L2 + * .. math:: + * z = ||x||_2 = \sqrt{\sum_{i=1}^{n} x_i^2} + */ + enum NormType { + LMax = 0; + L1 = 1; + L2 = 2; + } + + NormType normType = 1; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/OneHotEncoder.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/OneHotEncoder.proto new file mode 100644 index 000000000..417639908 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/OneHotEncoder.proto @@ -0,0 +1,41 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +/* + * Transforms a categorical feature into an array. The array will be all + * zeros expect a single entry of one. + * + * Each categorical value will map to an index, this mapping is given by + * either the ``stringCategories`` parameter or the ``int64Categories`` + * parameter. + */ +message OneHotEncoder { + enum HandleUnknown { + ErrorOnUnknown = 0; + IgnoreUnknown = 1; // Output will be all zeros for unknown values. + } + + /* + * Mapping to be used for the encoding. The position of the category in + * the below vector determines where the single one entry will be in the + * output. + */ + oneof CategoryType { + StringVector stringCategories = 1; + Int64Vector int64Categories = 2; + } + + // Output can be a dictionary with only one entry, instead of an array. + bool outputSparse = 10; + + HandleUnknown handleUnknown = 11; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/Parameters.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/Parameters.proto new file mode 100644 index 000000000..044b2a95a --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/Parameters.proto @@ -0,0 +1,52 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +/* + * Int64 parameter, + * consisting of a default int64 value, and allowed range or set of values + * value is unbounded if AllowedValues is not set. + */ +message Int64Parameter { + int64 defaultValue = 1; + oneof AllowedValues { + Int64Range range = 10; + Int64Set set = 11; + } +} + +/* + * Double parameter, + * consisting of a default double value, and allowed range of values + * value is unbounded if AllowedValues is not set. + */ +message DoubleParameter { + double defaultValue = 1; + oneof AllowedValues { + DoubleRange range = 10; + } +} + +/* + * String parameter, + * A default string value must be provided + */ +message StringParameter { + string defaultValue = 1; +} + +/* + * String parameter, + * A default bool value must be provided + */ +message BoolParameter { + bool defaultValue = 1; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/SVM.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/SVM.proto new file mode 100644 index 000000000..d900e9aca --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/SVM.proto @@ -0,0 +1,195 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +// Kernel Definitions +// ------------------ + +/* + * A linear kernel. + * + * This function has the following formula: + * + * .. math:: + * K(\boldsymbol{x}, \boldsymbol{x'}) = \boldsymbol{x}^T \boldsymbol{x'} + */ +message LinearKernel { +} + +/* + * A Gaussian radial basis function (RBF) kernel. + * + * This function has the following formula: + * + * .. math:: + * K(\boldsymbol{x}, \boldsymbol{x'}) = \ + * \exp(-\gamma || \boldsymbol{x} - \boldsymbol{x'} ||^2 ) + * + */ +message RBFKernel { + double gamma = 1; +} + +/* + * A polynomial kernel. + * + * This function has the following formula: + * + * .. math:: + * K(\boldsymbol{x}, \boldsymbol{x'}) = \ + * (\gamma \boldsymbol{x}^T \boldsymbol{x'} + c)^{degree} + */ +message PolyKernel { + int32 degree = 1; + double c = 2; + double gamma = 3; +} + +/* + * A sigmoid kernel. + * + * This function has the following formula: + * + * .. math:: + * K(\boldsymbol{x}, \boldsymbol{x'}) = \ + * \tanh(\gamma \boldsymbol{x}^T \boldsymbol{x'} + c) + */ +message SigmoidKernel { + double gamma = 1; + double c = 2; +} + +/* + * A kernel. + */ +message Kernel { + oneof kernel { + LinearKernel linearKernel = 1; + RBFKernel rbfKernel = 2; + PolyKernel polyKernel = 3; + SigmoidKernel sigmoidKernel = 4; + } +} + + +// Support Vector Definitions +// -------------------------- + +/* + * A sparse node. + */ +message SparseNode { + int32 index = 1; // 1-based indexes, like libsvm + double value = 2; +} + +/* + * A sparse vector. + */ +message SparseVector { + repeated SparseNode nodes = 1; +} + +/* + * One or more sparse support vectors. + */ +message SparseSupportVectors { + repeated SparseVector vectors = 1; +} + +/* + * A dense vector. + */ +message DenseVector { + repeated double values = 1; +} + +/* + * One or more dense support vectors. + */ +message DenseSupportVectors { + repeated DenseVector vectors = 1; +} + +/* + * One or more coefficients. + */ +message Coefficients { + repeated double alpha = 1; +} + +/* + * A support vector regressor. + */ +message SupportVectorRegressor { + Kernel kernel = 1; + + // Support vectors, either sparse or dense format + oneof supportVectors { + SparseSupportVectors sparseSupportVectors = 2; + DenseSupportVectors denseSupportVectors = 3; + } + + // Coefficients, one for each support vector + Coefficients coefficients = 4; + + double rho = 5; +} + +/* + * A support vector classifier + */ +message SupportVectorClassifier { + Kernel kernel = 1; + + /* + * The number of support vectors for each class. + */ + repeated int32 numberOfSupportVectorsPerClass = 2; + + /* + * The support vectors, in either sparse or dense format. + */ + oneof supportVectors { + SparseSupportVectors sparseSupportVectors = 3; + DenseSupportVectors denseSupportVectors = 4; + } + + /* + * The coefficients, essentially a two dimensional array of + * size: (numberOfClasses-1) by (total number of support vectors) + */ + repeated Coefficients coefficients = 5; + + /* + * Constants for decision function, + * with K*(K-1) / 2 elements, + * where K is the number of classes. + */ + repeated double rho = 6; + + /* + * Pairwise probability information for A vs B classifier. + * Total of K*(K-1)/2 elements where K is the number of classes. + * These fields are optional, + * and only required if you want probabilities or multi class predictions. + */ + repeated double probA = 7; + repeated double probB = 8; + + /* + * Class label mapping. + */ + oneof ClassLabels { + StringVector stringClassLabels = 100; + Int64Vector int64ClassLabels = 101; + } +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/Scaler.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/Scaler.proto new file mode 100644 index 000000000..2b389d29a --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/Scaler.proto @@ -0,0 +1,34 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification; + +/* + * A scaling operation. + * + * This function has the following formula: + * + * .. math:: + * f(x) = scaleValue \cdot (x + shiftValue) + * + * If the ``scaleValue`` is not given, the default value 1 is used. + * If the ``shiftValue`` is not given, the default value 0 is used. + * + * If ``scaleValue`` and ``shiftValue`` are each a single value + * and the input is an array, then the scale and shift are applied + * to each element of the array. + * + * If the input is an integer, then it is converted to a double to + * perform the scaling operation. If the output type is an integer, + * then it is cast to an integer. If that cast is lossy, then an + * error is generated. + */ +message Scaler { + repeated double shiftValue = 1; + repeated double scaleValue = 2; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/SoundAnalysisPreprocessing.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/SoundAnalysisPreprocessing.proto new file mode 100644 index 000000000..b08957e97 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/SoundAnalysisPreprocessing.proto @@ -0,0 +1,60 @@ +// Copyright (c) 2019, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification.CoreMLModels; + +/* + * A model which takes audio signal samples as input and outputs an array of + * preprocessed samples according to the specified preprocessing types + */ +message SoundAnalysisPreprocessing { + + // Specific preprocessing types for sound analysis + + /* Vggish preprocesses input audio samples and makes them ready to + be fed to Vggish feature extractor. + c.f. https://arxiv.org/pdf/1609.09430.pdf + + The preprocessing takes input a single channel (monophonic) audio samples + 975 milliseconds long, sampled at 16KHz, i.e., 15600 samples 1D multiarray + and produces preprocessed samples in multiarray of shape [1, 96, 64] + + (1) Splits the input audio samples into overlapping frames, where each + frame is 25 milliseconds long and hops forward by 10 milliseconds. + Any partial frames at the end are dropped. + + (2) Hann window: apply a periodic Hann with a window_length of + 25 milliseconds, which translates to 400 samples in 16KHz sampling rate + + w(n) = 0.5 - 0.5 * cos(2*pi*n/window_length_sample), + where 0 <= n <= window_lenth_samples - 1 and window_lenth_samples = 400 + + Then, the Hann window is applied to each frame as below + + windowed_frame(n) = frame(n) * w(n) + where 0 <= n <= window_lenth_samples - 1 and window_lenth_samples = 400 + + (3) Power spectrum: calculate short-time Fourier transfor magnitude, with + an FFT length of 512 + + (4) Log Mel filter bank: calculates a log magnitude mel-frequency + spectrogram minimum frequency of 125Hz and maximum frequency of 7500Hz, + number of mel bins is 64, log_offset is 0.01, number of spectrum bins + is 64. + */ + + message Vggish { + // no specific parameter + } + + // Vision feature print type + oneof SoundAnalysisPreprocessingType { + Vggish vggish = 20; + } + +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/TextClassifier.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/TextClassifier.proto new file mode 100644 index 000000000..d31113fda --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/TextClassifier.proto @@ -0,0 +1,43 @@ +// Copyright (c) 2018, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification.CoreMLModels; + +/* + * A model which takes a single input string and outputs a + * label for the input. + */ +message TextClassifier { + + /* + * Stores the resivion number for the model, revision 1 is available on + * iOS, tvOS 12.0+, macoOS 10.14+ + */ + uint32 revision = 1; + + /* + * Stores the language of the model, as specified in BCP-47 format, + * e.g. "en-US". See https://tools.ietf.org/html/bcp47 + */ + string language = 10; + + /* + * Stores the byte representation of learned model parameters + */ + bytes modelParameterData = 100; + + /* + * Stores the set of output class labels + */ + oneof ClassLabels { + StringVector stringClassLabels = 200; + } + +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/TreeEnsemble.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/TreeEnsemble.proto new file mode 100644 index 000000000..6428dc730 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/TreeEnsemble.proto @@ -0,0 +1,161 @@ +// Copyright (c) 2017, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +/* + * Each tree is a collection of nodes, + * each of which is identified by a unique identifier. + * + * Each node is either a branch or a leaf node. + * A branch node evaluates a value according to a behavior; + * if true, the node identified by ``true_child_node_id`` is evaluated next, + * if false, the node identified by ``false_child_node_id`` is evaluated next. + * A leaf node adds the evaluation value to the base prediction value + * to get the final prediction. + * + * A tree must have exactly one root node, + * which has no parent node. + * A tree must not terminate on a branch node. + * All leaf nodes must be accessible + * by evaluating one or more branch nodes in sequence, + * starting from the root node. + */ + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification; + +/* + * A tree ensemble post-evaluation transform. + */ +enum TreeEnsemblePostEvaluationTransform { + NoTransform = 0; + Classification_SoftMax = 1; + Regression_Logistic = 2; + Classification_SoftMaxWithZeroClassReference = 3; +} + +/* + * Tree ensemble parameters. + */ +message TreeEnsembleParameters { + message TreeNode { + uint64 treeId = 1; + uint64 nodeId = 2; + + enum TreeNodeBehavior { + BranchOnValueLessThanEqual = 0; + BranchOnValueLessThan = 1; + BranchOnValueGreaterThanEqual = 2; + BranchOnValueGreaterThan = 3; + BranchOnValueEqual = 4; + BranchOnValueNotEqual = 5; + LeafNode = 6; + } + + /* + * The branch mode parameters. + * + * If branch is false, + * then the parameters in this section must be filled in + * to determine how the branching functions. + */ + TreeNodeBehavior nodeBehavior = 3; + + /* + * If the node behavior mode is a branch mode, + * then these values must be filled in. + */ + uint64 branchFeatureIndex = 10; + double branchFeatureValue = 11; + uint64 trueChildNodeId = 12; + uint64 falseChildNodeId = 13; + bool missingValueTracksTrueChild = 14; + + /* + * The leaf mode. + * + * If ``nodeBahavior`` == ``LeafNode``, + * then the evaluationValue is added to the base prediction value + * in order to get the final prediction. + * To support multiclass classification + * as well as regression and binary classification, + * the evaluation value is encoded here as a sparse vector, + * with evaluationIndex being the index of the base vector + * that evaluation value is added to. + * In the single class case, + * it is expected that evaluationIndex is exactly 0. + */ + message EvaluationInfo { + uint64 evaluationIndex = 1; + double evaluationValue = 2; + } + + repeated EvaluationInfo evaluationInfo = 20; + + /* + * The relative hit rate of a node for optimization purposes. + * + * This value has no effect on the accuracy of the result; + * it allows the tree to optimize for frequent branches. + * The value is relative, + * compared to the hit rates of other branch nodes. + * + * You typically use a proportion of training samples + * that reached this node + * or some similar metric to derive this value. + */ + double relativeHitRate = 30; + } + + repeated TreeNode nodes = 1; + + /* + * The number of prediction dimensions or classes in the model. + * + * All instances of ``evaluationIndex`` in a leaf node + * must be less than this value, + * and the number of values in the ``basePredictionValue`` field + * must be equal to this value. + * + * For regression, + * this is the dimension of the prediction. + * For classification, + * this is the number of classes. + */ + uint64 numPredictionDimensions = 2; + + /* + * The base prediction value. + * + * The number of values in this must match + * the default values of the tree model. + */ + repeated double basePredictionValue = 3; +} + +/* + * A tree ensemble classifier. + */ +message TreeEnsembleClassifier { + TreeEnsembleParameters treeEnsemble = 1; + TreeEnsemblePostEvaluationTransform postEvaluationTransform = 2; + + // Required class label mapping + oneof ClassLabels { + StringVector stringClassLabels = 100; + Int64Vector int64ClassLabels = 101; + } +} + +/* + * A tree ensemble regressor. + */ +message TreeEnsembleRegressor { + TreeEnsembleParameters treeEnsemble = 1; + TreeEnsemblePostEvaluationTransform postEvaluationTransform = 2; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/VisionFeaturePrint.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/VisionFeaturePrint.proto new file mode 100644 index 000000000..a87fdd40f --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/VisionFeaturePrint.proto @@ -0,0 +1,67 @@ +// Copyright (c) 2018, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +package CoreML.Specification.CoreMLModels; + +/* + * A model which takes an input image and outputs array(s) of features + * according to the specified feature types + */ +message VisionFeaturePrint { + + // Specific vision feature print types + + // Scene extracts features useful for identifying contents of natural images + // in both indoor and outdoor environments + message Scene { + enum SceneVersion { + SCENE_VERSION_INVALID = 0; + // VERSION_1 is available on iOS,tvOS 12.0+, macOS 10.14+ + // It uses a 299x299 input image and yields a 2048 float feature vector + SCENE_VERSION_1 = 1; + + // VERSION_2 is available on iOS,tvOS 17.0+, macOS 14.0+ + // It uses a 360x360 input image and yields a 768 float feature vector + SCENE_VERSION_2 = 2; + } + + SceneVersion version = 1; + } + + // Objects extracts features useful for identifying and localizing + // objects in natural images + message Objects { + enum ObjectsVersion { + OBJECTS_VERSION_INVALID = 0; + // VERSION_1 is available on iOS,tvOS 14.0+, macOS 11.0+ + // It uses a 299x299 input image and yields two multiarray + // features: one at high resolution of shape (288, 35, 35) + // the other at low resolution of shape (768, 17, 17) + OBJECTS_VERSION_1 = 1; + } + + ObjectsVersion version = 1; + + /* + * Stores the names of the output features according to the + * order of them being computed from the neural network, i.e., + * the first element in the output is the earliest being + * computed, while the last is the latest being computed. In + * general, the order reflects the resolution of the feature. + * The earlier it is computed, the higher the feature resolution. + */ + repeated string output = 100; + } + + // Vision feature print type + oneof VisionFeaturePrintType { + Scene scene = 20; + Objects objects = 21; + } + +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/WordEmbedding.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/WordEmbedding.proto new file mode 100644 index 000000000..349a068d9 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/WordEmbedding.proto @@ -0,0 +1,35 @@ +// Copyright (c) 2019, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification.CoreMLModels; + +/* + * A model which maps a set of strings into a finite-dimensional real vector space. + */ +message WordEmbedding { + + /* + * Stores the revision number for the model, revision 2 is available on + * iOS, tvOS 13.0+, macOS 10.15+ + */ + uint32 revision = 1; + + /* + * Stores the language of the model, as specified in BCP-47 format, + * e.g. "en-US". See https://tools.ietf.org/html/bcp47 + */ + string language = 10; + + /* + * Stores efficient representation of emebedding as encoded by the Natural Language Framework + */ + bytes modelParameterData = 100; + +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/format/WordTagger.proto b/cpp/external/katagocoreml/vendor/mlmodel/format/WordTagger.proto new file mode 100644 index 000000000..c8452a4ec --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/format/WordTagger.proto @@ -0,0 +1,75 @@ +// Copyright (c) 2018, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in LICENSE.txt or at https://opensource.org/licenses/BSD-3-Clause + +syntax = "proto3"; +option optimize_for = LITE_RUNTIME; + +import public "DataStructures.proto"; + +package CoreML.Specification.CoreMLModels; + +/* + * A model which takes a single input string and outputs a + * sequence of tokens, tags for tokens, along with their + * locations and lengths, in the original string. + */ +message WordTagger { + + /* + * Stores the resivion number for the model, revision 1 is available on + * iOS, tvOS 12.0+, macoOS 10.14+ + */ + uint32 revision = 1; + + /* + * Stores the language of the model, as specified in BCP-47 format, + * e.g. "en-US". See https://tools.ietf.org/html/bcp47 + */ + string language = 10; + + /* + * Stores the name of tokens output. The output will be + * a sequence of strings that contains the tokens in the + * input string + */ + string tokensOutputFeatureName = 20; + + /* + * Stores the name of token tags output. The output will be + * a sequence of strings that contains the tags for each + * token in the input string + */ + string tokenTagsOutputFeatureName = 21; + + /* + * Stores the name of token locations output. The output will be + * a sequence of integers that contains the locations (indices) + * for each token in the input string, location starts from 0 + */ + string tokenLocationsOutputFeatureName = 22; + + /* + * Stores the name of token lengths output. The output will be + * a sequence of integers that contains the lengths for each + * token in the input string + */ + string tokenLengthsOutputFeatureName = 23; + + /* + * Stores the byte representation of learned model parameters + */ + bytes modelParameterData = 100; + + /* + * Stores the set of output tags + */ + oneof Tags { + StringVector stringTags = 200; + } + + + +} + diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Bf16.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Bf16.hpp new file mode 100644 index 000000000..125e59c45 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Bf16.hpp @@ -0,0 +1,57 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include +#include + +namespace MILBlob { + +/** + * Struct for holding bytes that represent a bf16 number. + * Floating point interface treats "bytes" as brain float16 floating point + * (https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) + */ +struct Bf16 { + explicit Bf16(uint16_t bs) : bytes(bs) {} + Bf16() : bytes(0) {} + + static Bf16 FromFloat(float f); + + float GetFloat() const; + void SetFloat(float f); + + // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes) + uint16_t bytes; +}; + +inline bool operator==(const Bf16& first, const Bf16& second) noexcept +{ + // Note this comparison is quick and dirty - it will give incorrect results + // for (-0.0 == 0.0) and, depending on bit pattern, (NaN == NaN). + return first.bytes == second.bytes; +} + +inline bool operator!=(const Bf16& first, const Bf16& second) noexcept +{ + // Note this comparison is quick and dirty - it will give incorrect results + // for (-0.0 != 0.0) and, depending on bit pattern, (NaN != NaN). + return first.bytes != second.bytes; +} + +} // namespace MILBlob + +namespace std { + +template <> +struct hash { + size_t operator()(const MILBlob::Bf16& fp) const + { + return fp.bytes; + } +}; + +} // namespace std diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/BlobDataType.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/BlobDataType.hpp new file mode 100644 index 000000000..4dee4cc06 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/BlobDataType.hpp @@ -0,0 +1,131 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include "MILBlob/Bf16.hpp" +#include "MILBlob/Fp16.hpp" +#include "MILBlob/Fp8.hpp" +#include "MILBlob/SubByteTypes.hpp" + +namespace MILBlob { +namespace Blob { + +enum class BlobDataType : uint32_t { + // *** WARNING *** + // For binary compatibility, values should ONLY be added at the end. + // + // this file needs to remain in sync across multiple repos. + // please be cognizant of that when making changes to the + // format. + Float16 = 1, + Float32 = 2, + UInt8 = 3, + Int8 = 4, + BFloat16 = 5, + Int16 = 6, + UInt16 = 7, + Int4 = 8, + UInt1 = 9, + UInt2 = 10, + UInt4 = 11, + UInt3 = 12, + UInt6 = 13, + Int32 = 14, + UInt32 = 15, + Float8E4M3FN = 16, + Float8E5M2 = 17, +}; + +template +struct BlobDataTypeTraits; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::Float32; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::Float16; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::Float8E4M3FN; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::Float8E5M2; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::BFloat16; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::UInt8; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::Int8; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::Int16; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::UInt16; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::Int32; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::UInt32; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::Int4; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::UInt6; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::UInt4; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::UInt3; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::UInt2; +}; + +template <> +struct BlobDataTypeTraits { + static constexpr BlobDataType DataType = BlobDataType::UInt1; +}; + +} // namespace Blob +} // namespace MILBlob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/FileWriter.cpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/FileWriter.cpp new file mode 100644 index 000000000..f30e7352f --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/FileWriter.cpp @@ -0,0 +1,94 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#include "MILBlob/Blob/FileWriter.hpp" +#include "MILBlob/Blob/StorageFormat.hpp" + +#include +#include +#include +#include + +using namespace MILBlob; +using namespace MILBlob::Blob; +using namespace MILBlob::Util; + +namespace { +std::ios_base::openmode GetWriterMode(bool truncate) +{ + std::ios_base::openmode result = (std::ios::in | std::ios::out | std::ios::binary); + if (truncate) { + result |= std::ios::trunc; + } + return result; +} +} // anonymous namespace + +FileWriter::~FileWriter() = default; + +FileWriter::FileWriter(const std::string& filePath, bool truncateFile) +{ + m_fileStream.open(filePath, GetWriterMode(truncateFile)); + if (!m_fileStream) { + // If file does not exists, ios::in does not create one + // Let's create a file and re-open with required flags + m_fileStream.open(filePath, std::ofstream::binary | std::ios::out); + m_fileStream.close(); + m_fileStream.open(filePath, GetWriterMode(truncateFile)); + } + MILVerifyIsTrue(m_fileStream, + std::runtime_error, + "[MIL FileWriter]: Unable to open " + filePath + " file stream for writing"); +} + +uint64_t FileWriter::GetNextAlignedOffset() +{ + m_fileStream.seekg(0, std::ios::end); + uint64_t offset = static_cast(m_fileStream.tellg()); + if (offset % DefaultStorageAlignment == 0) { + return offset; + } + auto pad = DefaultStorageAlignment - (offset % DefaultStorageAlignment); + return offset + pad; +} + +uint64_t FileWriter::GetFileSize() +{ + m_fileStream.seekg(0, std::ios::end); + return static_cast(m_fileStream.tellg()); +} + +uint64_t FileWriter::AppendData(Span data) +{ + auto offset = GetNextAlignedOffset(); + m_fileStream.seekp(static_cast(offset), std::ios::beg); + m_fileStream.write(reinterpret_cast(data.Data()), static_cast(data.Size())); + MILVerifyIsTrue(m_fileStream.good(), + std::runtime_error, + "[MIL FileWriter]: Unknown error occurred while writing data to the file."); + return offset; +} + +void FileWriter::WriteData(Span data, uint64_t offset) +{ + MILVerifyIsTrue(offset % DefaultStorageAlignment == 0, + std::runtime_error, + "[MIL FileWriter]: Provided offset not aligned. offset=" + std::to_string(offset) + + " alignment=" + std::to_string(DefaultStorageAlignment) + "."); + m_fileStream.seekp(static_cast(offset), std::ios::beg); + m_fileStream.write(reinterpret_cast(data.Data()), static_cast(data.Size())); + MILVerifyIsTrue(m_fileStream.good(), + std::runtime_error, + "[MIL FileWriter]: Unknown error occurred while writing data to the file."); +} + +void FileWriter::ReadData(uint64_t offset, Util::Span destData) +{ + m_fileStream.seekg(static_cast(offset), std::ios::beg); + m_fileStream.read(reinterpret_cast(destData.Data()), static_cast(destData.Size())); + MILVerifyIsTrue(m_fileStream.good(), + std::runtime_error, + "[MIL FileWriter]: Unknown error occurred while reading data from the file."); +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/FileWriter.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/FileWriter.hpp new file mode 100644 index 000000000..2bc994033 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/FileWriter.hpp @@ -0,0 +1,63 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include "MILBlob/Util/Span.hpp" + +#include +#include +#include + +namespace MILBlob { +namespace Blob { +/** + * Utility for interfacing with files + */ +class FileWriter final { +public: + FileWriter() = delete; + FileWriter(const FileWriter&) = delete; + FileWriter(FileWriter&&) = delete; + FileWriter& operator=(const FileWriter&) = delete; + FileWriter& operator=(FileWriter&&) = delete; + + FileWriter(const std::string& filePath, bool truncateFile); + ~FileWriter(); + + /** + * Appends given data to file at next aligned offset + * @throws std::runtime_error if error occurs while writing to file stream + */ + uint64_t AppendData(Util::Span data); + + /** + * Writes data to given offset + * @throws std::runtime_error if error occurs while writing to file stream or offset is not aligned + */ + void WriteData(Util::Span data, uint64_t offset); + + /** + * Returns next available aligned offset for writing + */ + uint64_t GetNextAlignedOffset(); + + /** + * Returns size in byte of file currently open + */ + uint64_t GetFileSize(); + + /** + * Reads data from current stream from given offset and writes into destData + * @throws std:runtime_error if error occurs during reading data + */ + void ReadData(uint64_t offset, Util::Span destData); + +private: + std::fstream m_fileStream; +}; + +} // namespace Blob +} // namespace MILBlob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReader.cpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReader.cpp new file mode 100644 index 000000000..e773c98e6 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReader.cpp @@ -0,0 +1,62 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#include "MILBlob/Blob/MMapFileReader.hpp" + +#include +#include +#include + +using namespace MILBlob; +using namespace MILBlob::Blob; + +MMapFileReader::~MMapFileReader() = default; + +MMapFileReader::MMapFileReader(const std::string& filename) : m_isEncrypted(false) +{ + // verify file exists and find its length + struct stat fileInfo; + if (stat(filename.c_str(), &fileInfo) != 0) { + throw std::runtime_error("Could not open " + filename); + } + + // mmap works in size_t units to be compatible with virtual address space units + auto fileLength = static_cast(fileInfo.st_size); + + // wrap fopen/fclose in exception-safe type + std::unique_ptr f(fopen(filename.c_str(), "r"), fclose); + + MILVerifyIsTrue(f != nullptr, std::runtime_error, "Unable to read file " + filename); + + // wrap mmap/munmap in exception-safe type + std::unique_ptr> mmapPtr( + mmap(nullptr, fileLength, PROT_READ, MAP_PRIVATE, fileno(f.get()), 0 /*offset*/), + [length = fileLength](void* ptr) { munmap(ptr, length); }); + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-cstyle-cast) -- MAP_FAILED is (void*) -1. + MILVerifyIsTrue(mmapPtr.get() != nullptr && mmapPtr.get() != MAP_FAILED, + std::runtime_error, + "Unable to mmap file " + filename); + + m_dataSpan = Util::Span(reinterpret_cast(mmapPtr.get()), fileLength); + + // Keep mmaping alive + m_mmap = std::move(mmapPtr); +} + +uint64_t MMapFileReader::GetLength() const +{ + return m_dataSpan.Size(); +} + +Util::Span MMapFileReader::ReadData(uint64_t offset, uint64_t length) const +{ + return m_dataSpan.Slice(static_cast(offset), static_cast(length)); +} + +bool MMapFileReader::IsEncrypted() const +{ + return m_isEncrypted; +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReader.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReader.hpp new file mode 100644 index 000000000..3a4f72522 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReader.hpp @@ -0,0 +1,67 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include "MILBlob/Util/Span.hpp" + +#include +#include +#include + +namespace MILBlob { +namespace Blob { + +/** + * Memory-mapped file reader. + */ +class MMapFileReader { +public: + MMapFileReader() = delete; + MMapFileReader(const MMapFileReader&) = delete; + MMapFileReader(MMapFileReader&&) = delete; + MMapFileReader& operator=(const MMapFileReader&) = delete; + MMapFileReader& operator=(MMapFileReader&&) = delete; + + /** + * Maps the file specified into virtual memory space. + * @throws std::runtime_error if the file cannot be loaded or mapping fails. + */ + MMapFileReader(const std::string& filename); + + /** Unmaps the loaded file from virtual memory space. */ + ~MMapFileReader(); + + uint64_t GetLength() const; + + /** + * Provides a read-only Span of bytes at the requested offset and length. + * @throws std::range_error if offset or length are invalid. + */ + Util::Span ReadData(uint64_t offset, uint64_t length) const; + + /** + * Interprets mapped data as a C++ struct at the provided offset. + */ + template + const T& ReadStruct(uint64_t offset) const + { + auto region = ReadData(offset, sizeof(T)); + return *reinterpret_cast(region.Data()); + } + + /** Returns true if the underlying file is encrypted. */ + bool IsEncrypted() const; + +protected: + std::unique_ptr> m_mmap; + + Util::Span m_dataSpan; + + bool m_isEncrypted; +}; + +} // namespace Blob +} // namespace MILBlob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReaderFactory.cpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReaderFactory.cpp new file mode 100644 index 000000000..fe932d37f --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReaderFactory.cpp @@ -0,0 +1,16 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#include "MILBlob/Blob/MMapFileReader.hpp" +#include "MILBlob/Blob/MMapFileReaderFactory.hpp" + +namespace MILBlob::Blob { + +std::unique_ptr MakeMMapFileReader(const std::string& filePath) +{ + return std::make_unique(filePath); +} + +} // namespace MILBlob::Blob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReaderFactory.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReaderFactory.hpp new file mode 100644 index 000000000..4c031ac96 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/MMapFileReaderFactory.hpp @@ -0,0 +1,19 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include + +namespace MILBlob::Blob { + +class MMapFileReader; + +/** + * MakeMMapFileReader: Returns MMapedFileReader for file present at given filePath + */ +std::unique_ptr MakeMMapFileReader(const std::string& filePath); + +} // namespace MILBlob::Blob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageFormat.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageFormat.hpp new file mode 100644 index 000000000..135669b97 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageFormat.hpp @@ -0,0 +1,92 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include "MILBlob/Blob/BlobDataType.hpp" + +namespace MILBlob { +namespace Blob { + +// +// ---: Blob Storage File Format :--- +// Default file format for CoreML (iOS15 onwards) +// +// ---: File sturcture :--- +// File is structured as below: +// 1. Storage header: `struct storage_header` +// 2. Followed by pair: `struct blob_metadata` and `raw_data` +// Each entry i.e. blob_metadata and raw data is 64 bytes aligned. +// +// Example file structure: +// ||||...|||| +// +// Example (file structure and associated mil_program usage): +// |storage_header>|,|...|,| // file structure +// | |64 ,128 | |256 ,320 | // byte offset +// +// Example usage in MIL program: +// a = const(BlobFile(file_path="weights/file.wt", offset=64)) +// b = const(BlobFile(file_path="weights/file.wt", offset=256)) +// +// Reference: https://quip-apple.com/V5zFA91jmjL3 +// + +// Default alignment being used for reading-writing Blob Storage format. +constexpr uint64_t DefaultStorageAlignment = 64; +// Default sentinel for validation for metadata +constexpr uint64_t BlobMetadataSentinel = 0xDEADBEEF; + +/** + * blob_metadata: stores information of blob present in weight file + * + * Before ios18, the reserved fields were uninitialized and could have any values if not specified. + * From ios18 on, the reserved fields are initialized to 0 by default. + * To extend the format, make sure to bump the version number in storage_header. + */ +struct blob_metadata { + uint32_t sentinel = BlobMetadataSentinel; // for validating correctness of metadata. + + BlobDataType mil_dtype = BlobDataType::Float16; // data type of the blob data. + uint64_t sizeInBytes = 0; // size of the blob data in bytes. + uint64_t offset = 0; // offset in file for blob data. + uint64_t padding_size_in_bits = 0; // describes the number of unused bits in this blob, + // required to calculate the actual size for spans of + // sub-btye-sized types. Unused otherwise + // Reserve fields + uint64_t reserved_1 = 0; + uint64_t reserved_2 = 0; + uint64_t reserved_3 = 0; + uint64_t reserved_4 = 0; +}; + +/** + * storage_header: Header for MIL Blob Storage format + * - stores count of number of blobs present in current weight file + * - stores version (this format currently only supports version=2) + * version=1 in file header is Espresso `blob_v1` format + */ +struct storage_header { + uint32_t count = 0; // Number of blob data. + uint32_t version = 2; // default=2 + + uint64_t reserved_0 = 0; + uint64_t reserved_1 = 0; + uint64_t reserved_2 = 0; + uint64_t reserved_3 = 0; + uint64_t reserved_4 = 0; + uint64_t reserved_5 = 0; + uint64_t reserved_6 = 0; +}; + +// storage_header and blob_metadata are 64 bytes aligned. +// This allows first metadata to be aligned by default +// and data following blob_metadata aligned by default as well. +static_assert(sizeof(blob_metadata) == sizeof(uint64_t) * 8, "blob_metadata must be of size 64 bytes"); +static_assert(sizeof(storage_header) == sizeof(uint64_t) * 8, "storage_header must be of size 64 bytes"); + +} // namespace Blob +} // namespace MILBlob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageReader.cpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageReader.cpp new file mode 100644 index 000000000..3f4147035 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageReader.cpp @@ -0,0 +1,309 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#include "MILBlob/Bf16.hpp" +#include "MILBlob/Blob/MMapFileReader.hpp" +#include "MILBlob/Blob/MMapFileReaderFactory.hpp" +#include "MILBlob/Blob/StorageFormat.hpp" +#include "MILBlob/Blob/StorageReader.hpp" +#include "MILBlob/Fp16.hpp" +#include "MILBlob/Fp8.hpp" +#include "MILBlob/Util/SpanCast.hpp" + +#include +#include +#include + +using namespace MILBlob; +using namespace MILBlob::Blob; + +class StorageReader::Impl final { +public: + Impl(const Impl&) = delete; + Impl(Impl&&) = delete; + Impl& operator=(const Impl&) = delete; + Impl& operator=(Impl&&) = delete; + + explicit Impl(std::string filename) : m_filePath(std::move(filename)) {} + ~Impl() = default; + + const std::string& GetFilename() const + { + return m_filePath; + } + + blob_metadata GetMetadata(uint64_t offset) const + { + EnsureLoaded(); + + blob_metadata metadata = m_reader->ReadStruct(offset); + + // validate sentinel + MILVerifyIsTrue(metadata.sentinel == BlobMetadataSentinel, + std::runtime_error, + "Invalid sentinel in blob_metadata."); + return metadata; + } + + Util::Span GetRawDataView(uint64_t offset) const + { + auto metadata = GetMetadata(offset); + + return m_reader->ReadData(metadata.offset, metadata.sizeInBytes); + } + + template + Util::Span GetDataViewForByteAligned(uint64_t offset) const + { + auto metadata = GetAndCheckMetadata(offset, BlobDataTypeTraits::DataType); + + return Util::SpanCast(m_reader->ReadData(metadata.offset, metadata.sizeInBytes)); + } + + template + Util::Span GetDataViewForSubByteSized(uint64_t offset) const + { + auto metadata = GetAndCheckMetadata(offset, BlobDataTypeTraits::DataType); + + Util::Span rawSpan = m_reader->ReadData(metadata.offset, metadata.sizeInBytes); + + MILVerifyIsTrue(metadata.padding_size_in_bits < 8, + std::runtime_error, + "8 or more bits of padding for sub-byte sized data is incorrect"); + + if constexpr (MILBlob::SubByteIsByteAligned()) { + MILVerifyIsTrue(metadata.padding_size_in_bits % T::SizeInBits == 0, + std::runtime_error, + "Invalid padding for byte-aligned sub-byte-sized type"); + } + + // metadata.sizeInBytes includes the padding to make the data byte aligned + + size_t numBits = metadata.sizeInBytes * 8; + numBits -= metadata.padding_size_in_bits; + MILVerifyIsTrue(numBits % T::SizeInBits == 0, std::runtime_error, "Invalid padding for blob"); + size_t numElements = numBits / T::SizeInBits; + + return Util::CastToBitSpan(rawSpan, numElements); + } + + template + Util::Span GetDataView(uint64_t offset) const + { + if constexpr (MILBlob::IsSubByteSized::value) { + return this->GetDataViewForSubByteSized(offset); + } else { + return this->GetDataViewForByteAligned(offset); + } + } + + uint64_t GetDataOffset(uint64_t offset) const + { + auto metadata = GetMetadata(offset); + return metadata.offset; + } + + uint64_t GetDataPaddingInBits(uint64_t offset) const + { + auto metadata = GetMetadata(offset); + return metadata.padding_size_in_bits; + } + + uint64_t GetDataSize(uint64_t metadataOffset) const + { + auto metadata = GetMetadata(metadataOffset); + return metadata.sizeInBytes; + } + + bool IsEncrypted() const + { + EnsureLoaded(); + return m_reader->IsEncrypted(); + } + + BlobDataType GetDataType(uint64_t metadataOffset) const + { + auto metadata = GetMetadata(metadataOffset); + return metadata.mil_dtype; + } + + std::vector GetAllOffsets() const + { + EnsureLoaded(); + + const auto& header = m_reader->ReadStruct(0); + auto numBlobs = header.count; + + std::vector allOffsets; + allOffsets.reserve(numBlobs); + // The first metadata offset lies just after the file header. + uint64_t currMetadataOffset = sizeof(storage_header); + for (uint32_t i = 0; i < numBlobs; ++i) { + allOffsets.push_back(currMetadataOffset); + auto metadata = GetMetadata(currMetadataOffset); + // Update offset for next iteration to aligned value. + currMetadataOffset = metadata.offset + metadata.sizeInBytes; + if (currMetadataOffset % DefaultStorageAlignment != 0) { + currMetadataOffset += DefaultStorageAlignment - currMetadataOffset % DefaultStorageAlignment; + } + } + return allOffsets; + } + +private: + void EnsureLoaded() const + { + auto load = [this]() { + auto reader = MakeMMapFileReader(m_filePath); + const auto& header = reader->ReadStruct(0); + MILVerifyIsTrue(header.version == 2, std::runtime_error, "Storage Reader expects file format version 2."); + + // once we're good with the structure of the file, then set class state + m_reader = std::move(reader); + }; + + std::call_once(m_loadedFlag, [&load]() { load(); }); + } + + blob_metadata GetAndCheckMetadata(uint64_t offset, MILBlob::Blob::BlobDataType blobDType) const + { + auto metadata = GetMetadata(offset); + + MILVerifyIsTrue(metadata.mil_dtype == blobDType, + std::runtime_error, + "Metadata data type does not match requested type."); + + return metadata; + } + + const std::string m_filePath; + + mutable std::once_flag m_loadedFlag; + mutable std::unique_ptr m_reader; +}; + +// -------------------------------------------------------------------------------------- + +StorageReader::~StorageReader() = default; + +StorageReader::StorageReader(std::string filename) : m_impl(std::make_unique(std::move(filename))) {} + +const std::string& StorageReader::GetFilename() const +{ + return m_impl->GetFilename(); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +// StorageReader::GetDataView specializations for sub byte types +#define DECLARE_SUB_BYTE_TYPE(TYPE_NAME) \ + template <> \ + Util::Span StorageReader::GetDataView(uint64_t offset) const \ + { \ + return m_impl->GetDataView(offset); \ + } + +#include "MILBlob/SubByteTypeList.hpp" + +#undef DECLARE_SUB_BYTE_TYPE + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +template <> +Util::Span StorageReader::GetDataView(uint64_t offset) const +{ + return m_impl->GetDataView(offset); +} + +Util::Span StorageReader::GetRawDataView(uint64_t offset) const +{ + return m_impl->GetRawDataView(offset); +} + +uint64_t StorageReader::GetDataOffset(uint64_t metadataOffset) const +{ + return m_impl->GetDataOffset(metadataOffset); +} + +uint64_t StorageReader::GetDataSize(uint64_t metadataOffset) const +{ + return m_impl->GetDataSize(metadataOffset); +} + +bool StorageReader::IsEncrypted() const +{ + return m_impl->IsEncrypted(); +} + +BlobDataType StorageReader::GetDataType(uint64_t metadataOffset) const +{ + return m_impl->GetDataType(metadataOffset); +} + +std::vector StorageReader::GetAllOffsets() const +{ + return m_impl->GetAllOffsets(); +} + +uint64_t StorageReader::GetDataPaddingInBits(uint64_t metadataOffset) const +{ + return m_impl->GetDataPaddingInBits(metadataOffset); +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageReader.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageReader.hpp new file mode 100644 index 000000000..bc8c7b687 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageReader.hpp @@ -0,0 +1,137 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include "MILBlob/Bf16.hpp" +#include "MILBlob/Blob/BlobDataType.hpp" +#include "MILBlob/Fp16.hpp" +#include "MILBlob/Fp8.hpp" +#include "MILBlob/SubByteTypes.hpp" +#include "MILBlob/Util/Span.hpp" +#include +#include +#include + +namespace MILBlob { +namespace Blob { + +/** + * StorageReader encapsulates memory-mapped reading of the Storage Blob Format. + * + * Memory-mapping is performed lazily on first access to the underlying data. + * + * This file format supports the following types: + * - uint1,2,4 + * - int4 + * - uint8_t + * - Bf16 + * - Fp16 + * - float + * - int16_t + * - uint16_t + * - int32_t + * - uint32_t + */ +class StorageReader final { +public: + StorageReader() = delete; + StorageReader(const StorageReader&) = delete; + StorageReader(StorageReader&&) = delete; + StorageReader& operator=(const StorageReader&) = delete; + StorageReader& operator=(StorageReader&&) = delete; + + StorageReader(std::string filename); + ~StorageReader(); + + const std::string& GetFilename() const; + + /** + * Returns a Span view into the underlying memory-mapped storage. The + * file will be mapped into memory on first access. This is valid for the + * supported types noted above. + * NOTE: `offset` should be the metadata offset. + * @throws std::range_error if offset is not valid. + */ + template + Util::Span GetDataView(uint64_t offset) const; + + /** + * Returns an uint8_t Span view into the underlying memory-mapped storage. The + * file will be mapped into memory on first access. This is valid for the + * supported types noted above. + * NOTE: `offset` should be the metadata offset. + * @throws std::range_error if offset is not valid. + */ + Util::Span GetRawDataView(uint64_t offset) const; + + /** + * Returns file offset of data from given metadata offset + * @throws std::range_error if metadataOffset is not valid. + */ + uint64_t GetDataOffset(uint64_t metadataOffset) const; + + /** + * Returns the size of the data blob for the given metadata offset + * @throws std::range_error if metadataOffset is not valid. + */ + uint64_t GetDataSize(uint64_t metadataOffset) const; + + /** Returns true if the underlying file is encrypted. */ + bool IsEncrypted() const; + + /** + * Returns the storage type of the data blob for the given metadata offset + * @throws std::range_error if metadataOffset is not valid. + */ + BlobDataType GetDataType(uint64_t metadataOffset) const; + + /** Returns a vector containing the metadata offsets for all blobs in the file, in order. */ + std::vector GetAllOffsets() const; + + uint64_t GetDataPaddingInBits(uint64_t metadataOffset) const; + +private: + class Impl; + const std::unique_ptr m_impl; +}; + +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; +template <> +Util::Span StorageReader::GetDataView(uint64_t) const; + +} // namespace Blob +} // namespace MILBlob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageWriter.cpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageWriter.cpp new file mode 100644 index 000000000..2cc077e9c --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageWriter.cpp @@ -0,0 +1,234 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#include "MILBlob/Bf16.hpp" +#include "MILBlob/Blob/FileWriter.hpp" +#include "MILBlob/Blob/StorageFormat.hpp" +#include "MILBlob/Blob/StorageWriter.hpp" +#include "MILBlob/Fp16.hpp" +#include "MILBlob/Fp8.hpp" +#include "MILBlob/Util/Span.hpp" +#include "MILBlob/Util/SpanCast.hpp" + +#include +#include + +using namespace MILBlob; +using namespace MILBlob::Blob; + +namespace { +template +Util::Span CastAndMakeSpan(T& x) +{ + return Util::Span(reinterpret_cast(&x), sizeof(x)); +} +} // anonymous namespace + +class StorageWriter::Impl final { +public: + Impl(const Impl&) = delete; + Impl(Impl&&) = delete; + Impl& operator=(const Impl&) = delete; + Impl& operator=(Impl&&) = delete; + + ~Impl() = default; + + Impl(const std::string& filePath, bool truncateFile) + : m_filePath(filePath) + , m_fileWriter(std::make_unique(filePath, truncateFile)) + { + if (truncateFile) { + m_fileWriter->WriteData(CastAndMakeSpan(m_header), 0); + } else { + auto fileSize = m_fileWriter->GetFileSize(); + if (fileSize == 0) { + // File exists and is empty + m_fileWriter->WriteData(CastAndMakeSpan(m_header), 0); + } else if (static_cast(fileSize) >= sizeof(m_header)) { + m_fileWriter->ReadData(0, CastAndMakeSpan(m_header)); + if (m_header.version != 2) { + // File exists and header is incorrect + // File is not empty, please use truncate option + throw std::runtime_error( + "[MIL StorageWriter]: Incorrect file header, please use truncateFile=true"); + } + } else { + // File is not empty, please use truncate option + throw std::runtime_error("[MIL StorageWriter]: Incorrect file header, please use truncateFile=true"); + } + } + } + + template + uint64_t WriteData(Util::Span data); + + std::string GetFilePath() const + { + return m_filePath; + } + +private: + std::string m_filePath; + std::unique_ptr m_fileWriter; + storage_header m_header; +}; + +template +uint64_t SpanSizeInBytes(Util::Span data) +{ + if constexpr (MILBlob::IsSubByteSized::value) { + auto uint8Span = MILBlob::Util::CastFromBitSpan(data); + return SpanSizeInBytes(uint8Span); + } else { + return data.Size() * sizeof(T); + } +} + +template +void WritePaddingBits(blob_metadata& metadata, size_t numElements) +{ + // types aligned to byte boundaries don't need this padding + if constexpr (MILBlob::IsSubByteSized::value) { + metadata.padding_size_in_bits = 0; + std::size_t numBitsRemaining = (numElements * T::SizeInBits) % 8; + if (numBitsRemaining != 0) { + metadata.padding_size_in_bits = 8 - numBitsRemaining; + } + } +} + +template +uint64_t StorageWriter::Impl::WriteData(Util::Span data) +{ + // 1. Write data + blob_metadata metadata; + metadata.mil_dtype = BlobDataTypeTraits::type>::DataType; + metadata.sizeInBytes = SpanSizeInBytes(data); + + // populate padding_size_in_bits, if we're writing a sub-byte-sized type + WritePaddingBits>(metadata, data.Size()); + + // Get offset for data + auto metadataOffset = m_fileWriter->GetNextAlignedOffset(); + // metadata is 64 bit aligned. + auto dataOffset = metadataOffset + sizeof(metadata); + MILVerifyIsTrue(dataOffset % DefaultStorageAlignment == 0, + std::runtime_error, + "[MIL StorageWriter]: dataOffset is expected to be 64 bits aligned."); + metadata.offset = dataOffset; + // We don't expect m_fileWriter to produce different offset for metadata and data + auto actualMetadataOffset = m_fileWriter->AppendData(CastAndMakeSpan(metadata)); + MILVerifyIsTrue(metadataOffset == actualMetadataOffset, + std::runtime_error, + "[MIL StorageWriter]: Metadata written to different offset than expected."); + Util::Span byteSpan; + if constexpr (MILBlob::IsSubByteSized::value) { + byteSpan = Util::CastFromBitSpan(data); + } else { + byteSpan = Util::SpanCast(data); + } + auto actualDataOffset = m_fileWriter->AppendData(byteSpan); + MILVerifyIsTrue(dataOffset == actualDataOffset, + std::runtime_error, + "[MIL StorageWriter]: Metadata written to different offset than expected."); + + // 2. Update count in header + m_header.count++; + // Write header with new count + m_fileWriter->WriteData(CastAndMakeSpan(m_header), 0); + // return offset in file to blob_metadata + return metadataOffset; +} + +// -------------------------------------------------------------------------------------- + +StorageWriter::~StorageWriter() = default; + +StorageWriter::StorageWriter(const std::string& filePath, bool truncateFile) + : m_impl(std::make_unique(filePath, truncateFile)) +{} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +// Implement WriteData forwarding stubs for all sub byte types +#define DECLARE_SUB_BYTE_TYPE(TYPE_NAME) \ + template <> \ + uint64_t StorageWriter::WriteData(Util::Span data) \ + { \ + return m_impl->WriteData(data); \ + } + +#include "MILBlob/SubByteTypeList.hpp" + +#undef DECLARE_SUB_BYTE_TYPE + +template <> +uint64_t StorageWriter::WriteData(Util::Span data) +{ + return m_impl->WriteData(data); +} + +std::string StorageWriter::GetFilePath() const +{ + return m_impl->GetFilePath(); +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageWriter.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageWriter.hpp new file mode 100644 index 000000000..58e3c95ca --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Blob/StorageWriter.hpp @@ -0,0 +1,88 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include "MILBlob/Bf16.hpp" +#include "MILBlob/Fp16.hpp" +#include "MILBlob/Fp8.hpp" +#include "MILBlob/SubByteTypes.hpp" +#include "MILBlob/Util/Span.hpp" +#include +#include + +namespace MILBlob { +namespace Blob { + +/** + * Utility for writing MIL Blob Storage format + * details of new file format: MIL/Blob/StorageFormat.hpp + */ +class StorageWriter final { +public: + StorageWriter() = delete; + StorageWriter(const StorageWriter&) = delete; + StorageWriter(StorageWriter&&) = delete; + StorageWriter& operator=(const StorageWriter&) = delete; + StorageWriter& operator=(StorageWriter&&) = delete; + + StorageWriter(const std::string& filePath, bool truncateFile = true); + ~StorageWriter(); + + /** + * Writes data to the next available aligned location into opened file stream + * Writes blob_metadata followed by data (both at next aligned offset specified by MILBlob::Blob::DefaultAlignment) + * @throws std::runtime_error if error occurs while writing data to file + */ + template + uint64_t WriteData(Util::Span data); + + /** + * Returns the file path of the blob storage file. + */ + std::string GetFilePath() const; + +private: + class Impl; + const std::unique_ptr m_impl; +}; + +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); +template <> +uint64_t StorageWriter::WriteData(Util::Span); + +} // namespace Blob +} // namespace MILBlob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp16.cpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp16.cpp new file mode 100644 index 000000000..ae1e71a10 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp16.cpp @@ -0,0 +1,31 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#include "MILBlob/Fp16.hpp" + +// fp16 lib code has some conversion warnings we don't want to globally ignore +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wincompatible-pointer-types" +#pragma clang diagnostic ignored "-Wsign-conversion" +#pragma clang diagnostic ignored "-Wconversion" +#include "fp16/fp16.h" +#pragma clang diagnostic pop + +using namespace MILBlob; + +/* static */ Fp16 Fp16::FromFloat(float f) +{ + return Fp16(fp16_ieee_from_fp32_value(f)); +} + +float Fp16::GetFloat() const +{ + return fp16_ieee_to_fp32_value(bytes); +} + +void Fp16::SetFloat(float f) +{ + bytes = fp16_ieee_from_fp32_value(f); +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp16.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp16.hpp new file mode 100644 index 000000000..300e4566f --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp16.hpp @@ -0,0 +1,53 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include +#include + +namespace MILBlob { + +/** + * Struct for holding bytes that represent a fp16 number. + * Floating point interface treats "bytes" as IEEE 754 half precision floating point + * (https://ieeexplore.ieee.org/document/8766229) + */ +struct Fp16 { + explicit Fp16(uint16_t bs) : bytes(bs) {} + Fp16() : bytes(0) {} + + static Fp16 FromFloat(float f); + + float GetFloat() const; + void SetFloat(float f); + + // NOLINTNEXTLINE(misc-non-private-member-variables-in-classes) + uint16_t bytes; +}; + +inline bool operator==(const Fp16& first, const Fp16& second) noexcept +{ + return first.bytes == second.bytes; +} + +inline bool operator!=(const Fp16& first, const Fp16& second) noexcept +{ + return first.bytes != second.bytes; +} + +} // namespace MILBlob + +namespace std { + +template <> +struct hash { + size_t operator()(const MILBlob::Fp16& fp) const + { + return fp.bytes; + } +}; + +} // namespace std diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp8.cpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp8.cpp new file mode 100644 index 000000000..2176fad97 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp8.cpp @@ -0,0 +1,189 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#include "MILBlob/Fp8.hpp" + +#include +#include + +using namespace MILBlob; + +// Some global constants. +constexpr uint8_t fp32MantissaBits = 23; +constexpr int8_t fp32ExponentBias = 127; + +// Helper function to handle Fp32 -> Fp8 exponent and mantissa. +template +void HandleFp32ToFp8ExponentMantissa(FP8_CAST& fp8, FloatCast& fp32) +{ + int32_t unbiasedExponent = fp32.components.exponent - fp32ExponentBias; + if (unbiasedExponent + FP8_TYPE::fp8ExponentBias > 0) { + // Normal. + fp8.components.exponent = uint8_t(fp32.components.exponent - fp32ExponentBias + FP8_TYPE::fp8ExponentBias); + } else { + // Denormal. + FloatCast fp32_bias; + fp32_bias.components.sign = fp32.components.sign; + fp32_bias.components.exponent = -1 * FP8_TYPE::fp8ExponentBias + fp32ExponentBias + 1; + fp32_bias.components.mantissa = 0; + fp32.f += fp32_bias.f; + fp8.components.exponent = 0; + } + if ((fp32.components.mantissa & ((0x1 << (fp32MantissaBits - FP8_TYPE::fp8MantissaBits)) - 1)) != 0) { + throw std::range_error("FP8 SetFloat requires rounding for the given value."); + } + fp8.components.mantissa = fp32.components.mantissa >> (fp32MantissaBits - FP8_TYPE::fp8MantissaBits); +} + +// Helper function to handle normalizing the denormalized case for fp8. +// For denormalized fp8's, we need to normalize by subtracting a bias of 2^(1 - fp8ExponentBias) +template +void HandleFp8ToFp32Denormalize(FP8_CAST& fp8, FloatCast& fp32) +{ + if (fp8.components.exponent == 0 && fp8.components.mantissa != 0) { + fp32.components.exponent++; + FloatCast fp32_bias; + fp32_bias.components.sign = fp8.components.sign; + fp32_bias.components.exponent = fp32.components.exponent; + fp32_bias.components.mantissa = 0; + fp32.f -= fp32_bias.f; + } +} + +// Helper function to handle exponent and mantissa for Fp8 -> Fp32 conversion. +template +void HandleFp8ToFp32ExponentMantissa(const FP8_CAST& fp8, FloatCast& fp32) +{ + if (fp8.components.exponent == 0 && fp8.components.mantissa == 0) { + fp32.components.exponent = 0; + fp32.components.mantissa = 0; + return; + } + int32_t unbiasedExponent = fp8.components.exponent - FP8_TYPE::fp8ExponentBias; + fp32.components.exponent = uint32_t(unbiasedExponent + fp32ExponentBias); + fp32.components.mantissa = + uint32_t(int32_t(fp8.components.mantissa << (fp32MantissaBits - FP8_TYPE::fp8MantissaBits))); +} + +float Fp8E5M2::GetFloat() const +{ + FloatCast fp32 = {.f = 0}; + // Set the sign bit. + fp32.components.sign = data.components.sign; + + // Standard NaN/Inf case. We just use the fp8 mantissa as there's + // no strong requirements for mantissa in the NaN case. + if (data.components.exponent == (0x1 << fp8ExponentBits) - 1) { + fp32.components.exponent = 0xFF; + fp32.components.mantissa = data.components.mantissa; + return fp32.f; + } + HandleFp8ToFp32ExponentMantissa(data, fp32); + HandleFp8ToFp32Denormalize(data, fp32); + return fp32.f; +} + +float Fp8E4M3FN::GetFloat() const +{ + FloatCast fp32 = {.f = 0}; + // Set the sign bit. + fp32.components.sign = data.components.sign; + // NaN case, infinity is not supported. We just use the mantissa from the fp8. + if (data.components.exponent == (0x1 << fp8ExponentBits) - 1 && data.components.mantissa == 0x7) { + fp32.components.exponent = 0xFF; + fp32.components.mantissa = data.components.mantissa; + return fp32.f; + } + HandleFp8ToFp32ExponentMantissa(data, fp32); + HandleFp8ToFp32Denormalize(data, fp32); + return fp32.f; +} + +void Fp8E5M2::SetFloat(float f) +{ + FloatCast fp32 = {.f = f}; + data = {.byte = 0}; + // Set sign bit. + data.components.sign = fp32.components.sign; + + // If f is nan or inf, set exponent to all 1's. + if (std::isnan(f)) { + data.components.exponent = (0x1 << fp8ExponentBits) - 1; + data.components.mantissa = 1; + } else if (std::isinf(f)) { + data.components.exponent = (0x1 << fp8ExponentBits) - 1; + data.components.mantissa = 0; + } else if (f == 0) { + data.components.exponent = 0; + data.components.mantissa = 0; + } else { + int32_t unbiasedExponent = fp32.components.exponent - fp32ExponentBias; + // Float is normal or denormal, check the exponent and set it. + // For now, we throw on over/underflows. There are alternative ways to handle + // this (round to zero). + if (unbiasedExponent > fp8ExponentBias) { + throw std::range_error("Fp8E5M2 SetFloat exponent overflow."); + } else if (unbiasedExponent < (-1 * fp8ExponentBias - int32_t(fp8MantissaBits) + 1)) { + throw std::range_error("Fp8E5M2 SetFloat exponent underflow."); + } + HandleFp32ToFp8ExponentMantissa(data, fp32); + } +} + +void Fp8E4M3FN::SetFloat(float f) +{ + FloatCast fp32 = {.f = f}; + data = {.byte = 0}; + // Set sign bit. + data.components.sign = fp32.components.sign; + + // If f is nan or inf, set exponent to all 1's. + if (std::isnan(f)) { + data.components.exponent = (0x1 << fp8ExponentBits) - 1; + data.components.mantissa = 7; + } else if (std::isinf(f)) { + throw std::range_error("Fp8E4M3FN SetFloat infinity not supported."); + } else if (f == 0) { + data.components.exponent = 0; + data.components.mantissa = 0; + } else { + int32_t unbiasedExponent = fp32.components.exponent - fp32ExponentBias; + // Float is normal or denormal, check the exponent and set it. + // For now, we throw on over/underflows. There are alternative ways to handle + // this (round to zero). + if (unbiasedExponent > fp8ExponentBias + 1) { + throw std::range_error("Fp8E4M3FN SetFloat exponent overflow."); + } else if (unbiasedExponent < (-1 * fp8ExponentBias - int32_t(fp8MantissaBits) + 1)) { + // Underflow occurs when the exponent is below the minimum denormal value. + // This means unbiased exponent is less than -fp8ExponentBias - fp8MantissaBits + 1 + throw std::range_error("Fp8E4M3FN SetFloat exponent underflow."); + } + HandleFp32ToFp8ExponentMantissa(data, fp32); + } +} + +Fp8E5M2 Fp8E5M2::FromFloat(float f) +{ + Fp8E5M2 result; + result.SetFloat(f); + return result; +} + +Fp8E4M3FN Fp8E4M3FN::FromFloat(float f) +{ + Fp8E4M3FN result; + result.SetFloat(f); + return result; +} + +bool Fp8E5M2::IsNaN() const +{ + return (data.components.exponent == 0x1F && data.components.mantissa != 0); +} + +bool Fp8E4M3FN::IsNaN() const +{ + return (data.components.exponent == 0xF && data.components.mantissa == 7); +} diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp8.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp8.hpp new file mode 100644 index 000000000..1a99e9e69 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Fp8.hpp @@ -0,0 +1,107 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include +#include + +namespace MILBlob { + +// General helper typedef to help process an FP32 in different forms/its +// constituent components. +typedef union { + float f; + uint32_t bytes; + struct { + uint32_t mantissa : 23; + uint32_t exponent : 8; + uint32_t sign : 1; + } components; +} FloatCast; + +// Macro for FP8 types. +#define DECLARE_FP8_TYPE(NAME, EXPONENT_BITS, MANTISSA_BITS, EXPONENT_BIAS) \ + struct NAME { \ + typedef union { \ + uint8_t byte; \ + struct { \ + uint8_t mantissa : MANTISSA_BITS; \ + uint8_t exponent : EXPONENT_BITS; \ + uint8_t sign : 1; \ + } components; \ + } Cast; \ + explicit NAME(uint8_t d) \ + { \ + data.byte = d; \ + }; \ + NAME() \ + { \ + data.byte = 0; \ + } \ + static NAME FromFloat(float f); \ + float GetFloat() const; \ + void SetFloat(float f); \ + uint8_t GetByte() const \ + { \ + return data.byte; \ + } \ + void SetByte(uint8_t byte) \ + { \ + data.byte = byte; \ + } \ + bool IsNaN() const; \ + Cast data; \ + static constexpr int8_t fp8ExponentBias = EXPONENT_BIAS; \ + static constexpr uint8_t fp8ExponentBits = EXPONENT_BITS; \ + static constexpr uint8_t fp8MantissaBits = MANTISSA_BITS; \ + static_assert(fp8ExponentBits + fp8MantissaBits == 7, "Number of exponent and mantissa bits should be 7"); \ + }; \ + inline bool operator==(const NAME& first, const NAME& second) noexcept \ + { \ + if ((first.data.byte & 0x7F) == 0 && (second.data.byte & 0x7F) == 0) { \ + return true; \ + } \ + if (first.IsNaN() && second.IsNaN()) { \ + return false; \ + } \ + return first.data.byte == second.data.byte; \ + } \ + inline bool operator!=(const NAME& first, const NAME& second) noexcept \ + { \ + if ((first.data.byte & 0x7F) == 0 && (second.data.byte & 0x7F) == 0) { \ + return false; \ + } \ + if (first.IsNaN() && second.IsNaN()) { \ + return true; \ + } \ + return first.data.byte != second.data.byte; \ + } + +// Define the types. +DECLARE_FP8_TYPE(Fp8E5M2, 5, 2, 15) +DECLARE_FP8_TYPE(Fp8E4M3FN, 4, 3, 7) + +} // namespace MILBlob + +namespace std { + +template <> +struct hash { + size_t operator()(const MILBlob::Fp8E5M2& fp) const + { + return fp.data.byte; + } +}; + +template <> +struct hash { + size_t operator()(const MILBlob::Fp8E4M3FN& fp) const + { + return fp.data.byte; + } +}; + +} // namespace std diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypeList.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypeList.hpp new file mode 100644 index 000000000..295313c33 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypeList.hpp @@ -0,0 +1,13 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +// Listing of sub-byte-sized types in MIL +// Template file used for generating stub functionality +DECLARE_SUB_BYTE_TYPE(Int4) +DECLARE_SUB_BYTE_TYPE(UInt6) +DECLARE_SUB_BYTE_TYPE(UInt4) +DECLARE_SUB_BYTE_TYPE(UInt3) +DECLARE_SUB_BYTE_TYPE(UInt2) +DECLARE_SUB_BYTE_TYPE(UInt1) diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypes.cpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypes.cpp new file mode 100644 index 000000000..e2611bd6e --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypes.cpp @@ -0,0 +1,209 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#include "MILBlob/Util/Verify.hpp" + +#include "MILBlob/SubByteTypes.hpp" +#include "MILBlob/Util/SubByteConversionUtils.hpp" +#include +#include + +namespace MILBlob { + +struct IndexAndOffset { + uint64_t index; + uint8_t offset; +}; + +static IndexAndOffset GetIndexAndOffsetForSubByteValue(uint64_t i, uint8_t numBits) +{ + IndexAndOffset ret; + + uint64_t startBit = numBits * i; + + ret.index = startBit / 8; + ret.offset = startBit % 8; + + return ret; +} + +template +std::vector PackSubByteVecForNonByteAligned(Util::Span span) +{ + std::vector ret(MILBlob::SizeInBytes(span.Size()), 0); + + for (uint64_t i = 0; i < span.Size(); i++) { + MILVerifyIsTrue(span[i] <= T::MAX && span[i] >= T::MIN, + std::range_error, + "Value " + std::to_string(span[i]) + " is outside allowed subbyte datatype range [" + + std::to_string(T::MIN) + ", " + std::to_string(T::MAX) + "]."); + + auto indexAndOffset = GetIndexAndOffsetForSubByteValue(i, T::SizeInBits); + auto idx = indexAndOffset.index; + auto offset = indexAndOffset.offset; + + ret[idx] |= ((uint8_t)(span[i] << offset)); + if (offset > 8 - T::SizeInBits) { + // part of the i'th element of span spills over to idx+1 + // uint8_t rshift = T::SizeInBits - (8 - offset); + uint8_t rshift = 8 - offset; + ret[idx + 1] |= ((uint8_t)span[i] >> rshift); + } + } + + return ret; +} + +template +std::vector PackSubByteVecImpl(Util::Span vec) +{ + if constexpr (!MILBlob::SubByteIsByteAligned()) { + return PackSubByteVecForNonByteAligned(vec); + } + const auto ElementsPerByte = 8 / T::SizeInBits; + std::vector ret(MILBlob::SizeInBytes(vec.Size())); + for (size_t i = 0; i < vec.Size(); i++) { + size_t shiftAmmount = T::SizeInBits * (i % ElementsPerByte); + MILVerifyIsTrue(vec[i] <= T::MAX && vec[i] >= T::MIN, + std::range_error, + "Value " + std::to_string(vec[i]) + " is outside allowed subbyte datatype range [" + + std::to_string(T::MIN) + ", " + std::to_string(T::MAX) + "]."); + ret[i / ElementsPerByte] |= (static_cast((vec[i] & T::BitMask) << shiftAmmount)); + } + return ret; +} + +#define DEFINE_PACK_SUB_BYTE_VEC(TYPE) \ + std::vector PackSubByteVec(const std::vector& vec) \ + { \ + using impl_t = decltype(TYPE::data); \ + Util::Span int8Span(reinterpret_cast(vec.data()), vec.size()); \ + return PackSubByteVecImpl(int8Span); \ + } + +#define DECLARE_SUB_BYTE_TYPE(TYPE_NAME) DEFINE_PACK_SUB_BYTE_VEC(TYPE_NAME) +#include "MILBlob/SubByteTypeList.hpp" +#undef DECLARE_SUB_BYTE_TYPE + +#define DEFINE_UNPACK_SUB_BYTE_VEC(TYPE) \ + template <> \ + std::vector UnPackSubByteVec(const std::vector& vec, size_t numElements) \ + { \ + return UnPackSubByteVecImpl(vec, numElements); \ + } + +template +std::vector UnPackSubByteVecImpl(const std::vector& vec, size_t numElements) +{ + std::vector ret(numElements); + MILVerifyIsTrue( + vec.size() == MILBlob::SizeInBytes(numElements), + std::invalid_argument, + "Unpacking to sub-byte type vector has invalid number of elements. Sub-byte vector with NumElements " + "requires exactly vec.size() bytes."); + Util::Span subByteSpan((typename MILBlob::Util::voidType::type)(vec.data()), numElements); + for (size_t i = 0; i < numElements; i++) { + ret[i] = subByteSpan.ValueAt(i); + } + return ret; +} + +#define DECLARE_SUB_BYTE_TYPE(TYPE_NAME) DEFINE_UNPACK_SUB_BYTE_VEC(TYPE_NAME) +#include "MILBlob/SubByteTypeList.hpp" +#undef DECLARE_SUB_BYTE_TYPE + +template <> +std::vector PackInt8Span(Util::Span unpackedValues) +{ + return PackSubByteVecImpl(unpackedValues); +} + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues) +{ + return PackSubByteVecImpl(unpackedValues); +} + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues) +{ + return PackSubByteVecImpl(unpackedValues); +} + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues) +{ + return PackSubByteVecImpl(unpackedValues); +} + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues) +{ + return PackSubByteVecImpl(unpackedValues); +} + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues) +{ + return PackSubByteVecImpl(unpackedValues); +} + +// Class methods for Int4, UInt4, etc. +#define IMPLEMENT_METHODS_FOR_SUB_BYTE_TYPE(TYPE_NAME) \ + TYPE_NAME::TYPE_NAME(decltype(TYPE_NAME::data) d) \ + { \ + MILVerifyIsTrue(d <= TYPE_NAME::MAX && d >= TYPE_NAME::MIN, \ + std::range_error, \ + #TYPE_NAME " value is out of range."); \ + data = d; \ + } \ + /* static */ TYPE_NAME TYPE_NAME::FromInt(int i) \ + { \ + TYPE_NAME result; \ + result.SetInt(i); \ + return result; \ + } \ + int TYPE_NAME::GetInt() const \ + { \ + return static_cast(data); \ + } \ + void TYPE_NAME::SetInt(int i) \ + { \ + MILVerifyIsTrue(i <= TYPE_NAME::MAX && i >= TYPE_NAME::MIN, \ + std::range_error, \ + #TYPE_NAME " value is out of range."); \ + data = static_cast(i); \ + return; \ + } \ + bool operator==(const TYPE_NAME& first, const TYPE_NAME& second) noexcept \ + { \ + return first.data == second.data; \ + } \ + bool operator!=(const TYPE_NAME& first, const TYPE_NAME& second) noexcept \ + { \ + return first.data != second.data; \ + } \ + static_assert(sizeof(TYPE_NAME) == 1, #TYPE_NAME " struct must be of size 1 byte"); + +#define DECLARE_SUB_BYTE_TYPE(TYPE_NAME) IMPLEMENT_METHODS_FOR_SUB_BYTE_TYPE(TYPE_NAME) +#include "MILBlob/SubByteTypeList.hpp" +#undef DECLARE_SUB_BYTE_TYPE + +}; // namespace MILBlob + +namespace std { + +// +128 here so that casting i.data to size_t, for T==Int4, is safe +#define DEFINE_HASH_FOR_SUB_BYTE_TYPE(TYPE) \ + size_t hash::operator()(const MILBlob::TYPE& i) const \ + { \ + return static_cast(i.data + 128); \ + } + +#define DECLARE_SUB_BYTE_TYPE(TYPE_NAME) DEFINE_HASH_FOR_SUB_BYTE_TYPE(TYPE_NAME) +#include "MILBlob/SubByteTypeList.hpp" +#undef DECLARE_SUB_BYTE_TYPE + +} // namespace std diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypes.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypes.hpp new file mode 100644 index 000000000..96be5e7a5 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/SubByteTypes.hpp @@ -0,0 +1,134 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include +#include +#include +#include + +// A sub-byte type of is represented in MIL by a byte-sized struct which wraps +// an value of type IMPL_TYPE +#define DEFINE_SUB_BYTE_TYPE(NAME, IMPL_TYPE, BIT_SIZE, MASK, MAX_VAL, MIN_VAL) \ + struct NAME { \ + explicit NAME(IMPL_TYPE d); \ + NAME() : data(0) {} \ + static NAME FromInt(int i); \ + int GetInt() const; \ + void SetInt(int i); \ + IMPL_TYPE data; \ + static constexpr uint8_t SizeInBits = BIT_SIZE; \ + static constexpr uint8_t BitMask = MASK; \ + static constexpr IMPL_TYPE MAX = MAX_VAL; \ + static constexpr IMPL_TYPE MIN = MIN_VAL; \ + static_assert(MAX >= MIN, "Incompatible values for MIN and MAX"); \ + }; + +// Declares the following exports for sub-byte-type NAME +// operator == +// operator != +// +// Packs a sub byte vector into uint8_t representation since a vector of sub byte type +// cannot be packed. +// std::vector PackSubByteVec(const std::vector& vec); +// +// Unpacks a sub byte vector in uint8_t representation to a vector of the sub byte type. +// template <> +// std::vector UnPackSubByteVec(const std::vector& vec, size_t numElements); +#define DECLARE_SUB_BYTE_TYPE_METHODS(NAME) \ + bool operator==(const NAME& first, const NAME& second) noexcept; \ + bool operator!=(const NAME& first, const NAME& second) noexcept; \ + std::vector PackSubByteVec(const std::vector& vec); \ + template <> \ + std::vector UnPackSubByteVec(const std::vector& vec, size_t numElements); + +namespace MILBlob { + +template +class IsSubByteSized { + struct S { + char a; + char b; + }; + template + static char Tester(decltype(&U::SizeInBits)); + template + static S Tester(...); + +public: + enum { + value = sizeof(Tester(0)) == sizeof(char) + }; +}; + +template +constexpr bool SubByteIsByteAligned() +{ + return (8 / T::SizeInBits) * T::SizeInBits == 8; +} + +template +constexpr std::size_t SizeInBytes(std::size_t numElements) +{ + return (std::size_t)std::ceil((numElements * T::SizeInBits) / 8.0); +} + +template +std::vector UnPackSubByteVec(const std::vector& vec, std::size_t numElements); + +DEFINE_SUB_BYTE_TYPE(Int4, int8_t, 4, 0xF, 7, -8) +DECLARE_SUB_BYTE_TYPE_METHODS(Int4) + +DEFINE_SUB_BYTE_TYPE(UInt6, uint8_t, 6, 0b111111, 63, 0) +DECLARE_SUB_BYTE_TYPE_METHODS(UInt6) + +DEFINE_SUB_BYTE_TYPE(UInt4, uint8_t, 4, 0xF, 15, 0) +DECLARE_SUB_BYTE_TYPE_METHODS(UInt4) + +DEFINE_SUB_BYTE_TYPE(UInt3, uint8_t, 3, 0b111, 7, 0) +DECLARE_SUB_BYTE_TYPE_METHODS(UInt3) + +DEFINE_SUB_BYTE_TYPE(UInt2, uint8_t, 2, 0b11, 3, 0) +DECLARE_SUB_BYTE_TYPE_METHODS(UInt2) + +DEFINE_SUB_BYTE_TYPE(UInt1, uint8_t, 1, 0b1, 1, 0) +DECLARE_SUB_BYTE_TYPE_METHODS(UInt1) + +} // namespace MILBlob + +namespace std { + +template <> +struct hash { + size_t operator()(const MILBlob::Int4& i) const; +}; + +template <> +struct hash { + size_t operator()(const MILBlob::UInt6& i) const; +}; + +template <> +struct hash { + size_t operator()(const MILBlob::UInt4& i) const; +}; + +template <> +struct hash { + size_t operator()(const MILBlob::UInt3& i) const; +}; + +template <> +struct hash { + size_t operator()(const MILBlob::UInt2& i) const; +}; + +template <> +struct hash { + size_t operator()(const MILBlob::UInt1& i) const; +}; + +} // namespace std diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/Span.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/Span.hpp new file mode 100644 index 000000000..9ce9a8596 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/Span.hpp @@ -0,0 +1,674 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include "MILBlob/SubByteTypes.hpp" +#include "MILBlob/Util/Verify.hpp" +#include +#include +#include +#include +#include +#include + +namespace MILBlob { +namespace Util { + +constexpr std::size_t DynamicExtent = std::numeric_limits::max(); + +namespace span_helpers { + +//---------------------------------------------------------------------- +// helper traits +//---------------------------------------------------------------------- + +template +struct IsDynamicExtent { + static constexpr bool value = false; +}; + +template <> +struct IsDynamicExtent { + static constexpr bool value = true; +}; + +template +struct IsIndexValid { + static constexpr bool value = (Index < Extent); +}; + +template +struct IsIndexValid { + static constexpr bool value = false; +}; + +//---------------------------------------------------------------------- +// helper storage size +//---------------------------------------------------------------------- + +template +class SpanSize final { +public: + SpanSize() = default; + ~SpanSize() = default; + SpanSize(const SpanSize&) = default; + SpanSize(SpanSize&&) noexcept = default; + SpanSize& operator=(const SpanSize&) = default; + SpanSize& operator=(SpanSize&&) noexcept = default; + + constexpr size_t Size() const + { + return m_size; + } + +private: + static constexpr size_t m_size = Extent; +}; + +template <> +class SpanSize final { +public: + SpanSize() = delete; + ~SpanSize() = default; + SpanSize(const SpanSize&) = default; + SpanSize(SpanSize&&) noexcept = default; + SpanSize& operator=(const SpanSize&) = default; + SpanSize& operator=(SpanSize&&) noexcept = default; + + explicit SpanSize(size_t size) : m_size(size) {} + + size_t Size() const + { + return m_size; + } + +private: + size_t m_size; +}; + +} // namespace span_helpers + +//---------------------------------------------------------------------- +// Span is a custom implementation of an array view, similar +// to std::span introduced in C++20. +// +// If Extent is specified, Span supports compile-time bounds checking +// when the Get<> method is used. +// +// For underlying types of at least byte-size, this version of Span also +// supports iterating slices and dimensions of multi-dimensional +// contiguous memory blocks. +// +// For sub-byte types, only basic access to the data pointer and size +// are supported. +//---------------------------------------------------------------------- + +// Span types of at least byte-size. +template +class Span final { +public: + using value_type = T; + using pointer = typename std::add_pointer::type; + using reference = typename std::add_lvalue_reference::type; + using iterator = pointer; + + using const_value_type = typename std::add_const::type; + using const_pointer = typename std::add_pointer::type; + using const_iterator = const_pointer; + + template + using SpanSize = span_helpers::SpanSize; + + template + using IsDynamicExtent = span_helpers::IsDynamicExtent; + + template + using IsIndexValid = span_helpers::IsIndexValid; + + static_assert(!MILBlob::IsSubByteSized::value, "Sub byte-sized types must use the reduced Span implementation"); + + class SliceIterator final { + public: + SliceIterator(pointer p, size_t stride) : m_ptr(p), m_stride(stride) {} + + bool operator==(const SliceIterator& other) const + { + return m_ptr == other.m_ptr && m_stride == other.m_stride; + } + + bool operator!=(const SliceIterator& other) const + { + return !(*this == other); + } + + SliceIterator& operator++() + { + m_ptr += m_stride; + return *this; + } + + // NOLINTNEXTLINE(cert-dcl21-cpp) + SliceIterator operator++(int) const + { + return SliceIterator(m_ptr + m_stride, m_stride); + } + + Span operator*() const + { + return Span(m_ptr, m_stride); + } + + private: + pointer m_ptr; + size_t m_stride; + }; + + template + class StaticSliceIterator final { + public: + explicit StaticSliceIterator(pointer p) : m_ptr(p) {} + + bool operator==(const StaticSliceIterator& other) const + { + return m_ptr == other.m_ptr; + } + + bool operator!=(const StaticSliceIterator& other) const + { + return !(*this == other); + } + + StaticSliceIterator& operator++() + { + m_ptr += Stride; + return *this; + } + + // NOLINTNEXTLINE(cert-dcl21-cpp) + StaticSliceIterator operator++(int) const + { + return StaticSliceIterator(m_ptr + Stride); + } + + Span operator*() const + { + return Span(m_ptr); + } + + private: + pointer m_ptr; + }; + + template + class IteratorProvider final { + public: + IteratorProvider(Iterator begin, Iterator end) : m_begin(begin), m_end(end) {} + + Iterator begin() const + { + return m_begin; + } + + Iterator end() const + { + return m_end; + } + + private: + Iterator m_begin; + Iterator m_end; + }; + + ~Span() = default; + + Span(const Span&) = default; + Span(Span&&) noexcept = default; + + Span& operator=(const Span&) = default; + Span& operator=(Span&&) noexcept = default; + + /** Implicit copy constructor for converting a mutable span to a const span. Extent and type must be the same. */ + template ::value && + std::is_same::type>::value, + int>::type = 0> + Span(const Span& other) : m_ptr(other.Data()) + , m_size(other.Size()) + {} + + /** Implicit move constructor for converting a mutable span to a const span. Extent and type must be the same. */ + template ::value && + std::is_same::type>::value, + int>::type = 0> + Span(Span&& other) : m_ptr(other.Data()) + , m_size(other.Size()) + {} + + template ::value, int>::type = 0> + Span() : m_ptr(nullptr) + , m_size(0) + {} + + template ::value, int>::type = 0> + explicit Span(pointer p) : m_ptr(p) + {} + + template ::value, int>::type = 0> + Span(pointer p, size_t size) : m_ptr(size == 0 ? nullptr : p) + , m_size(size) + {} + + // + // properties + // + + pointer Data() const + { + return m_ptr; + } + + size_t Size() const + { + return m_size.Size(); + } + + constexpr bool IsEmpty() const + { + return Size() == 0; + } + + // + // random access + // + + reference operator[](size_t index) const + { + MILDebugVerifyIsTrue(index < Size(), std::range_error, "index out of bounds"); + return m_ptr[index]; + } + + reference At(size_t index) const + { + MILVerifyIsTrue(index < Size(), std::range_error, "index out of bounds"); + return m_ptr[index]; + } + + // Get() returns a reference to the value at index N. + // This method only exists for fixed-sized Span instantiations. + // The bounds of N are compile-time checked. + template < + size_t Index, + typename std::enable_if::value && IsIndexValid::value, int>::type = 0> + reference Get() const + { + return (*this)[Index]; + } + + // + // slicing + // + + /** Gets a sub-span starting at index */ + Span Slice(size_t index) const + { + MILVerifyIsTrue(index < Size(), std::range_error, "index out of bounds"); + return Span(Data() + index, Size() - index); + } + + /** Gets a sub-span starting at index with length size */ + Span Slice(size_t index, size_t size) const + { + MILVerifyIsTrue(size > 0 && index < Size() && index + size <= Size(), std::range_error, "index out of bounds"); + return Span(Data() + index, size); + } + + /** Slices into num_slices dimensions, and returns the span corresponding to slice_index */ + Span SliceByDimension(size_t num_slices, size_t slice_index) const + { + MILVerifyIsTrue(Size() % num_slices == 0, std::range_error, "index out of bounds"); + size_t stride = Size() / num_slices; + return Slice(slice_index * stride, stride); + } + + // + // reinterpreting data + // + + template + Span StaticResize() const + { + MILVerifyIsTrue(NewExtent <= Size(), std::range_error, "index out of bounds"); + return Span(Data()); + } + + // + // basic C++ iterators + // + + iterator begin() const + { + return Data(); + } + + iterator end() const + { + return Data() + Size(); + } + + const_iterator cbegin() const + { + return Data(); + } + + const_iterator cend() const + { + return Data() + Size(); + } + + std::reverse_iterator rbegin() const + { + return std::reverse_iterator(Data() + Size()); + } + + std::reverse_iterator rend() const + { + return std::reverse_iterator(Data()); + } + + std::reverse_iterator crbegin() const + { + return std::reverse_iterator(Data() + Size()); + } + + std::reverse_iterator crend() const + { + return std::reverse_iterator(Data()); + } + + // + // complex C++ iterators + // + + /** Iterates based on slices. This iterator will produce Size() % sliceSice slices. */ + IteratorProvider IterateSlices(size_t sliceSize) const + { + MILVerifyIsTrue(Size() % sliceSize == 0, std::range_error, "index out of bounds"); + + return IteratorProvider(SliceIterator(Data(), sliceSize), + SliceIterator(Data() + Size(), sliceSize)); + } + + template + IteratorProvider> IterateSlices() const + { + MILVerifyIsTrue(Size() % SliceSize == 0, std::range_error, "index out of bounds"); + + return IteratorProvider>(StaticSliceIterator(Data()), + StaticSliceIterator(Data() + Size())); + } + + /** + Iterates based on dimensions. Similar to IterateBySlices, but based on the number of slices (dimensions) rather + than the size of the slice. + */ + IteratorProvider IterateByDimension(size_t dim) const + { + return IterateSlices(Size() / dim); + } + +private: + pointer m_ptr; + SpanSize m_size; +}; + +template +struct voidType { + using type = void*; +}; +template +struct voidType::value>::type> { + using type = const void*; +}; +// Specializations for sub-byte types. +// This should ideally be implemented with std::enable_if but that involves an ABI breaking change. +// The pointer referenced by m_ptr and returned by Data() is byte aligned and packed, with possible +// padding in the last byte. +#define DEFINE_SPAN_CLASS_FOR_SUBBYTE(subByteType) \ +public: \ + template \ + using SpanSize = span_helpers::SpanSize; \ + \ + template \ + using IsDynamicExtent = span_helpers::IsDynamicExtent; \ + \ + ~Span() = default; \ + \ + Span(const Span&) = default; \ + Span(Span&&) noexcept = default; \ + \ + Span& operator=(const Span&) = default; \ + Span& operator=(Span&&) noexcept = default; \ + \ + /** Implicit copy constructor for converting a mutable span to a const span. Extent and type must be the same. */ \ + template ::value && \ + std::is_same::type>::value, \ + int>::type = 0> \ + Span(const Span& other) : m_ptr(other.Data()) \ + , m_size(other.Size()) \ + {} \ + \ + /** Implicit move constructor for converting a mutable span to a const span. Extent and type must be the same. */ \ + template ::value && \ + std::is_same::type>::value, \ + int>::type = 0> \ + Span(Span&& other) : m_ptr(other.Data()) \ + , m_size(other.Size()) \ + {} \ + \ + template ::value, int>::type = 0> \ + Span() : m_ptr(nullptr) \ + , m_size(0) \ + {} \ + \ + template ::value, int>::type = 0> \ + explicit Span(voidType::type p) : m_ptr(p) \ + {} \ + \ + template ::value, int>::type = 0> \ + Span(voidType::type p, size_t size) : m_ptr(size == 0 ? nullptr : p) \ + , m_size(size) \ + {} \ + \ + voidType::type Data() const \ + { \ + return m_ptr; \ + } \ + \ + size_t Size() const \ + { \ + return m_size.Size(); \ + } \ + \ + constexpr bool IsEmpty() const \ + { \ + return Size() == 0; \ + } \ + template \ + Span StaticResize() const \ + { \ + MILVerifyIsTrue(NewExtent <= Size(), std::range_error, "index out of bounds"); \ + return Span(Data()); \ + } \ + \ + std::remove_const::type ValueAt(std::size_t index) \ + { \ + if (index >= Size()) { \ + throw std::out_of_range("index out of bounds."); \ + } \ + using nonConstSubByteType = std::remove_const::type; \ + using impl_t = decltype(nonConstSubByteType::data); \ + \ + uint8_t bitSize = nonConstSubByteType::SizeInBits; \ + size_t elementIndex = index % Size(); \ + size_t packedBitsIndex = elementIndex * bitSize / 8; \ + size_t startBitIndex = elementIndex * bitSize % 8; \ + uint8_t bitMask = static_cast(nonConstSubByteType::BitMask << startBitIndex); \ + uint8_t restoredElement_uint8 = (*((const uint8_t*)Data() + packedBitsIndex) & bitMask) >> startBitIndex; \ + \ + /* For non-byte-aligned dtypes like UInt3, the required bits can be spread across 2 bytes. \ + Create mask and retrieve bits from the second byte if needed. \ + Look at SpanTests::testSubByteUIntValueAt*/ \ + size_t retrievedBits = 8 - startBitIndex; \ + if (retrievedBits < bitSize) { \ + bitMask = 0; \ + for (size_t i = 0; i < (bitSize - retrievedBits); ++i) { \ + bitMask |= 1 << i; \ + } \ + restoredElement_uint8 |= (*((const uint8_t*)Data() + packedBitsIndex + 1) & bitMask) << retrievedBits; \ + } \ + \ + /* If sign=1, fill all 1s in the prefix. \ + e.g., say the Int4 value is 1011 which is -5 in 2s complement. At this point, restoredElement_uint8 is \ + 00001011. To represent -5 correctly in 1 byte, we fill prefix 1s, resulting in 111110111. */ \ + if (nonConstSubByteType::MIN < 0) { \ + uint8_t sign_bit = (restoredElement_uint8 >> (bitSize - 1)) & 1; \ + if (sign_bit == 1) { \ + for (size_t i = 0; i < 8 - bitSize; ++i) { \ + restoredElement_uint8 |= 1 << (i + bitSize); \ + } \ + } \ + } \ + return nonConstSubByteType(*reinterpret_cast(&restoredElement_uint8)); \ + } \ + \ +private: \ + voidType::type m_ptr; \ + SpanSize m_size; + +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(Int4) +}; +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(const Int4) +}; + +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(UInt6) +}; +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(const UInt6) +}; + +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(UInt4) +}; +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(const UInt4) +}; + +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(UInt3) +}; +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(const UInt3) +}; + +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(UInt2) +}; +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(const UInt2) +}; + +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(UInt1) +}; +template +class Span final { + DEFINE_SPAN_CLASS_FOR_SUBBYTE(const UInt1) +}; + +// MakeSpan for std::vector yields Span +// Examples: +// (1) create a mutable span +// std::vector v = { 1, 2, 3 }; +// auto span = MakeSpan(v); // span is Span +// (2) create an immutable span +// const std::vector v = { 1, 2, 3 }; +// auto span = MakeSpan(v); // span is Span +// (3) create an immutable span from a mutable vector +// std::vector v = { 1, 2, 3 }; +// auto span = MakeSpan(v); // span is Span + +template class C, typename... Args> +Span MakeSpan(C& c) +{ + return Span(c.data(), c.size()); +} + +template class C, typename... Args> +Span MakeSpan(const C& c) +{ + return Span(c.data(), c.size()); +} + +template + class C, + typename... Args, + std::enable_if_t::value, bool> = true> +Span MakeSpan(const C& c) +{ + return Span(c.data(), c.size()); +} + +// MakeSpan for std::array yields Span. +// Examples: +// (1) create a mutable span +// std::array v = { 1, 2, 3 }; +// auto span = MakeSpan(v); // span is Span +// (2) create an immutable span from a mutable vector +// std::array v = { 1, 2, 3 }; +// auto span = MakeSpan(v); // span is Span +// (3) create an immutable span +// const std::array v = { 1, 2, 3 }; +// auto span = MakeSpan(v); // span is Span + +template +Span MakeSpan(std::array& v) +{ + return Span(v.data()); +} + +template ::type> +Span MakeSpan(const std::array& v) +{ + return Span(v.data()); +} + +template ::type> +Span MakeSpan(const std::array& v) +{ + return Span(v.data()); +} + +} // namespace Util +} // namespace MILBlob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/SpanCast.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/SpanCast.hpp new file mode 100644 index 000000000..d6337eef6 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/SpanCast.hpp @@ -0,0 +1,65 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include "MILBlob/SubByteTypes.hpp" +#include "MILBlob/Util/Span.hpp" +#include + +namespace MILBlob { +namespace Util { + +/** + reinterpret_casts the underlying pointer in Span to Span. Callers are responsible for ensuring + that SourceT can be interpreted as TargetT in a meaningful way as there are neither compile- nor run-time safety + guards in place. +*/ + +template +Span SpanCast(Span span) +{ + static_assert(!MILBlob::IsSubByteSized::value && !MILBlob::IsSubByteSized::value, + "SpanCast for sub-byte sized types is not supported"); + auto ptr = reinterpret_cast(span.Data()); + auto size = (span.Size() * sizeof(SourceT)) / sizeof(TargetT); + return Span(ptr, size); +} + +/** + Reinterpret casts the underlying Span to a sub-byte type span. numElements indicates the number of + sub-byte elements in the case where the last byte contains some padding due to round to nearest byte. +*/ + +template ::value, bool> = true> +Span CastToBitSpan(Span span, size_t numElements) +{ + static_assert(std::is_same::value || std::is_same::value, + "CastToBitSpan is only possible when casting from a uint8_t span"); + if (span.Size() != MILBlob::SizeInBytes(numElements)) { + throw std::invalid_argument( + "BitSpanCast to sub-byte type span has invalid number of elements. Sub-byte span with NumElements " + "requires exactly Span.Size() bytes."); + } + return Span((typename MILBlob::Util::voidType::type)(span.Data()), numElements); +} + +/** + Reinterpret casts the underlying sub-byte-sized Span to a Span +*/ +template ::value, bool> = true> +Span CastFromBitSpan(Span span) +{ + size_t numBits = span.Size() * SourceT::SizeInBits; + size_t numElements = numBits / 8; + // need 1 more byte-sized element to hold remainder, if it exists + if (numBits % 8 != 0) { + numElements++; + } + return Span((const uint8_t*)span.Data(), numElements); +} + +} // namespace Util +} // namespace MILBlob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/SubByteConversionUtils.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/SubByteConversionUtils.hpp new file mode 100644 index 000000000..1a5bb8c82 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/SubByteConversionUtils.hpp @@ -0,0 +1,41 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#include "MILBlob/Util/Span.hpp" +#include + +namespace MILBlob { + +// This header contains the utils used by coremltools to pack subbyte datatype values. + +// Packs a span of int8_t containing unpacked values into a packed uint8_t vector +template +std::vector PackInt8Span(Util::Span unpackedValues); + +template <> +std::vector PackInt8Span(Util::Span unpackedValues); + +// Packs a span of uint8_t containing unpacked values into a packed uint8_t vector +template +std::vector PackUInt8Span(Util::Span unpackedValues); + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues); + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues); + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues); + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues); + +template <> +std::vector PackUInt8Span(Util::Span unpackedValues); + +} // namespace MILBlob diff --git a/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/Verify.hpp b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/Verify.hpp new file mode 100644 index 000000000..59125422c --- /dev/null +++ b/cpp/external/katagocoreml/vendor/mlmodel/src/MILBlob/Util/Verify.hpp @@ -0,0 +1,31 @@ +// Copyright (c) 2021, Apple Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-3-clause license that can be +// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause + +#pragma once + +#define MILVerifyImpl(condition, ex_type, ...) \ + do { \ + if (!(condition)) { \ + throw ex_type(__VA_ARGS__); \ + } \ + } while (0) + +#if defined(NDEBUG) +#define MILVerifyDebugImpl(condition, ex_type, ...) +#else +#define MILVerifyDebugImpl(condition, ex_type, ...) MILVerifyImpl(condition, ex_type, __VA_ARGS__) +#endif + +// MILVerifyIsNotNull verifies a pointer is not null. Upon failure, it throws the exception +// with the provided arguments. +#define MILVerifyIsNotNull(pointer, ex_type, ...) MILVerifyImpl(pointer != nullptr, ex_type, __VA_ARGS__) + +// MILVerifyIsTrue verifies condition is true. Upon failure, it throws the exception +// with the provided arguments. +#define MILVerifyIsTrue(condition, ex_type, ...) MILVerifyImpl(condition, ex_type, __VA_ARGS__) + +// MILDebugVerifyIsTrue verifies condition is true in debug builds only. Upon failure, +// it throws the exception with the provided arguments. +#define MILDebugVerifyIsTrue(condition, ex_type, ...) MILVerifyDebugImpl(condition, ex_type, __VA_ARGS__) diff --git a/cpp/external/katagocoreml/vendor/modelpackage/LICENSE.txt b/cpp/external/katagocoreml/vendor/modelpackage/LICENSE.txt new file mode 100644 index 000000000..b4570ec56 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/modelpackage/LICENSE.txt @@ -0,0 +1,11 @@ +Copyright (c) 2017, Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder(s) nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cpp/external/katagocoreml/vendor/modelpackage/src/ModelPackage.cpp b/cpp/external/katagocoreml/vendor/modelpackage/src/ModelPackage.cpp new file mode 100644 index 000000000..2bd2e89d9 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/modelpackage/src/ModelPackage.cpp @@ -0,0 +1,603 @@ +// +// ModelPackage.cpp +// modelpackage +// +// Copyright © 2021 Apple Inc. All rights reserved. +// + +#include "ModelPackage.hpp" + +#include "utils/JsonMap.hpp" + +#include +#include +#include +#include +#include +#include + +#if __has_include() +#include +#elif __has_include() +#include +namespace std { + namespace filesystem = std::experimental::filesystem; +} +#else +#error "missing required header " +#endif +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +static const char *kModelPackageManifestFileName = "Manifest.json"; +static const char *kModelPackageFileFormatVersionKey = "fileFormatVersion"; + +static const int kModelPackageFileFormatMajorVersion = 1; +static const int kModelPackageFileFormatMinorVersion = 0; +static const int kModelPackageFileFormatPatchVersion = 0; + +static const char *kModelPackageItemInfoEntriesKey = "itemInfoEntries"; + +static const char *kModelPackageItemInfoPathKey = "path"; +static const char *kModelPackageItemInfoNameKey = "name"; +static const char *kModelPackageItemInfoAuthorKey = "author"; +static const char *kModelPackageItemInfoDescriptionKey = "description"; + +static const char *kModelPackageDataDir = "Data"; + +static const char *kModelPackageRootModelKey = "rootModelIdentifier"; + +using namespace MPL; +using namespace detail; +using namespace std::filesystem; + +class detail::ModelPackageItemInfoImpl { + +private: + + std::string m_identifier; + std::string m_path; + std::string m_name; + std::string m_author; + std::string m_description; + +public: + + ModelPackageItemInfoImpl(const std::string& identifier, const std::string& path, const std::string& name, const std::string& author, const std::string& description); + + ~ModelPackageItemInfoImpl(); + + inline const std::string& identifier() { + return m_identifier; + } + + inline const std::string& path() { + return m_path; + } + + inline const std::string& name() { + return m_name; + } + + inline const std::string& author() { + return m_author; + } + + inline const std::string& description() { + return m_description; + } +}; + +ModelPackageItemInfoImpl::ModelPackageItemInfoImpl(const std::string& identifier, const std::string& path, const std::string& name, const std::string& author, const std::string& description) +: m_identifier(identifier), + m_path(path), + m_name(name), + m_author(author), + m_description(description) +{ +} + +ModelPackageItemInfoImpl::~ModelPackageItemInfoImpl() +{ +} + +ModelPackageItemInfo::ModelPackageItemInfo(std::shared_ptr modelPackageItemInfoImpl) +: m_modelPackageItemInfoImpl(modelPackageItemInfoImpl) +{ +} + +ModelPackageItemInfo::~ModelPackageItemInfo() +{ +} + +const std::string& ModelPackageItemInfo::identifier() const +{ + return m_modelPackageItemInfoImpl->identifier(); +} + +const std::string& ModelPackageItemInfo::path() const +{ + return m_modelPackageItemInfoImpl->path(); +} + +const std::string& ModelPackageItemInfo::name() const +{ + return m_modelPackageItemInfoImpl->name(); +} + +const std::string& ModelPackageItemInfo::author() const +{ + return m_modelPackageItemInfoImpl->author(); +} + +const std::string& ModelPackageItemInfo::description() const +{ + return m_modelPackageItemInfoImpl->description(); +} + +class detail::ModelPackageImpl { + +private: + + std::filesystem::path m_packagePath; + std::filesystem::path m_manifestPath; + std::filesystem::path m_packageDataDirPath; + + std::unique_ptr m_manifest; + + bool m_readOnly; + + void validate(); + + std::unique_ptr getItemInfoEntries() const; + std::unique_ptr getItemInfoEntry(const std::string& identifier) const; + + void createItemInfoEntry(const std::string& identifier, const std::string& path, const std::string& name, const std::string& author, const std::string& description); + void removeItemInfoEntry(const std::string& identifier); + + std::string generateIdentifier() const; + + std::filesystem::path getItemPath(const std::string& name, const std::string& author) const; + +public: + + ModelPackageImpl(const std::filesystem::path& path, bool createIfNecessary = true, bool readOnly = false); + ~ModelPackageImpl(); + + inline const std::filesystem::path& path() const { + return m_packagePath; + } + + std::string setRootModel(const std::filesystem::path& path, const std::string& name, const std::string& author, const std::string& description); + std::string replaceRootModel(const std::filesystem::path& path, const std::string& name, const std::string& author, const std::string& description); + std::shared_ptr getRootModel() const; + + std::string addItem(const std::filesystem::path& path, const std::string& name, const std::string& author, const std::string& description); + std::shared_ptr findItem(const std::string& identifier) const; + std::shared_ptr findItem(const std::string& name, const std::string& author) const; + std::vector findItemsByAuthor(const std::string& author) const; + + void removeItem(const std::string& identifier); + static bool isValid(const std::filesystem::path& path); + + ModelPackageItemInfo createFile(const std::string& name, const std::string& author, const std::string& description); +}; + +#pragma mark ModelPackageImpl + +ModelPackageImpl::ModelPackageImpl(const std::filesystem::path& path, bool createIfNecessary, bool readOnly) +: m_packagePath(path), + m_manifestPath(path / kModelPackageManifestFileName), + m_packageDataDirPath(path / kModelPackageDataDir), + m_manifest(nullptr), + m_readOnly(readOnly) +{ + if (std::filesystem::exists(m_packagePath)) { + if (std::filesystem::exists(m_manifestPath)) { + std::ifstream manifestStream(m_manifestPath, std::ios::binary); + m_manifest = std::make_unique(manifestStream); + manifestStream.close(); + } else { + throw std::runtime_error("A valid manifest does not exist at path: " + m_manifestPath.string() + ". Remove the .mlpackage directory and try again."); + } + } + // Create the package structure at specified path + else if (createIfNecessary) { + if (false == create_directory(m_packagePath)) { + throw std::runtime_error("Failed to create model package at path: " + m_packagePath.string()); + } + + if (false == create_directory(m_packageDataDirPath)) { + throw std::runtime_error("Failed to create data directory at path: " + m_packageDataDirPath.string()); + } + + m_manifest = std::make_unique(); + std::stringstream ss; + ss << kModelPackageFileFormatMajorVersion << "." << kModelPackageFileFormatMinorVersion << "." << kModelPackageFileFormatPatchVersion; + m_manifest->setString(kModelPackageFileFormatVersionKey, ss.str()); + } + // Error out since package does not exist + else { + throw std::runtime_error("Failed to open model package at path: " + m_packagePath.string()); + } + + validate(); +} + +ModelPackageImpl::~ModelPackageImpl() +{ + if (m_readOnly) { + return; + } + + std::filesystem::path uniquedDestination(m_manifestPath); + std::filesystem::path suffix(generateIdentifier()); // std::filesystem::path from stringified UUID + uniquedDestination.replace_extension(suffix); // unique filename in the presumed writable directory where Manifest.json is sited + + std::ofstream uniquedStream(uniquedDestination, std::ios::binary); + m_manifest->serialize(uniquedStream); + uniquedStream.close(); + if (uniquedStream.fail()) { // If any of the above fail do not go on to move uniquedDestination to m_manifestPath. + return; + } + + std::error_code ecode; + std::filesystem::rename(uniquedDestination, m_manifestPath, ecode); // On failure sets ecode and makes no changes. Does not throw. + if (ecode.value()) { + std::filesystem::remove(uniquedDestination); + } +} + +void ModelPackageImpl::validate() +{ + const std::string versionString = m_manifest->getString(kModelPackageFileFormatVersionKey); + + std::istringstream versionStringStream(versionString); + std::vector versionTokens; + for (std::string token; std::getline(versionStringStream, token, '.');) { + versionTokens.push_back(token); + } + + if (versionTokens.size() != 3) { + throw std::runtime_error("File format version must be in the form of major.minor.patch, but the specified value was: " + versionString); + } + + int majorVersion = 0; + int minorVersion = 0; + int patchVersion = 0; + try { + majorVersion = std::stoi(versionTokens[0]); + minorVersion = std::stoi(versionTokens[1]); + patchVersion = std::stoi(versionTokens[2]); + } catch (std::invalid_argument& e) { + throw std::runtime_error("Failed to parse file format version: " + versionString + " because: " + e.what()); + } + + if (majorVersion < 0 || + minorVersion < 0 || + patchVersion < 0 ) { + throw std::runtime_error("File format version uses negative number(s): " + versionString); + } + + if ((majorVersion > kModelPackageFileFormatMajorVersion) || + (majorVersion == kModelPackageFileFormatMajorVersion && minorVersion > kModelPackageFileFormatMinorVersion) || + (minorVersion == kModelPackageFileFormatMinorVersion && patchVersion > kModelPackageFileFormatPatchVersion)) { + throw std::runtime_error("Unsupported version: " + versionString); + } + + // Validate 1.0.0 model package + + auto itemInfoEntries = getItemInfoEntries(); + if (itemInfoEntries != nullptr) { + std::vector identifiers; + itemInfoEntries->getKeys(identifiers); + for (const auto& identifier : identifiers) { + auto itemInfoEntry = getItemInfoEntry(identifier); + + if (false == itemInfoEntry->hasKey(kModelPackageItemInfoPathKey) || + false == itemInfoEntry->hasKey(kModelPackageItemInfoNameKey) || + false == itemInfoEntry->hasKey(kModelPackageItemInfoAuthorKey) || + false == itemInfoEntry->hasKey(kModelPackageItemInfoDescriptionKey)) { + throw std::runtime_error("Invalid itemInfo for identifier: " + identifier); + } + + auto path = m_packageDataDirPath / itemInfoEntry->getString(kModelPackageItemInfoPathKey); + if (false == exists(path)) { + throw std::runtime_error("Item does not exist for identifier: " + identifier); + } + } + } +} + +std::unique_ptr ModelPackageImpl::getItemInfoEntries() const +{ + if (m_manifest->hasKey(kModelPackageItemInfoEntriesKey)) { + return m_manifest->getObject(kModelPackageItemInfoEntriesKey); + } + + return std::make_unique(); +} + +std::unique_ptr ModelPackageImpl::getItemInfoEntry(const std::string& identifier) const +{ + auto itemInfoEntries = getItemInfoEntries(); + + if (itemInfoEntries->hasKey(identifier)) { + return itemInfoEntries->getObject(identifier); + } + + return nullptr; +} + +void ModelPackageImpl::removeItemInfoEntry(const std::string& identifier) +{ + auto itemInfoEntries = getItemInfoEntries(); + + std::vector identifiers; + itemInfoEntries->getKeys(identifiers); + + auto newItemInfoEntries = std::make_unique(); + for (const auto& localIdentifier : identifiers) { + if (localIdentifier != identifier) { + newItemInfoEntries->setObject(localIdentifier, itemInfoEntries->getObject(localIdentifier)); + } + } + + m_manifest->setObject(kModelPackageItemInfoEntriesKey, std::move(newItemInfoEntries)); +} + +void ModelPackageImpl::createItemInfoEntry(const std::string& identifier, const std::string& path, const std::string& name, const std::string& author, const std::string& description) { + auto itemInfoEntry = getItemInfoEntry(identifier); + + if (nullptr == itemInfoEntry) { + itemInfoEntry = std::make_unique(); + } + + itemInfoEntry->setString(kModelPackageItemInfoPathKey, path); + itemInfoEntry->setString(kModelPackageItemInfoNameKey, name); + itemInfoEntry->setString(kModelPackageItemInfoAuthorKey, author); + itemInfoEntry->setString(kModelPackageItemInfoDescriptionKey, description); + + auto itemInfoEntries = getItemInfoEntries(); + itemInfoEntries->setObject(identifier, std::move(itemInfoEntry)); + m_manifest->setObject(kModelPackageItemInfoEntriesKey, std::move(itemInfoEntries)); +} + +std::filesystem::path ModelPackageImpl::getItemPath(const std::string& name, const std::string& author) const { + return std::filesystem::path(author) / name; +} + +std::string ModelPackageImpl::generateIdentifier() const { + uuid_t uuid; + + // uuid_unparse generates a 36-character null-terminated string (37 bytes). + // they provide no mechanisms for us to deduce this length, therefore + // we have to hardcode it here. + char buf[37] = ""; + + uuid_generate(uuid); + uuid_unparse(uuid, buf); + + return std::string(buf); +} + +ModelPackageItemInfo ModelPackageImpl::createFile(const std::string& name, const std::string& author, const std::string& description) { + + if (findItem(name, author) != nullptr) { + throw std::runtime_error("The package already contains a file with name: " + name + " author: " + author); + } + + auto filePath = getItemPath(name, author); + auto dstPath = m_packageDataDirPath / filePath; + + create_directories(dstPath.parent_path()); + + std::ofstream stream(dstPath, std::ios::binary); + if (!stream.is_open()) { + throw std::runtime_error("Failed to create file at path: " + dstPath.string()); + } + + auto identifier = generateIdentifier(); + createItemInfoEntry(identifier, filePath.string(), name, author, description); + return *(findItem(identifier)); +} + +std::string ModelPackageImpl::addItem(const std::filesystem::path& path, const std::string& name, const std::string& author, const std::string& description) +{ + if (findItem(name, author) != nullptr) { + throw std::runtime_error("The package already contains a file with name: " + name + " author: " + author); + } + + auto filePath = getItemPath(name, author); + auto dstPath = m_packageDataDirPath / filePath; + + create_directories(dstPath.parent_path()); + std::filesystem::copy(path, dstPath); + + auto identifier = generateIdentifier(); + createItemInfoEntry(identifier, filePath.string(), name, author, description); + return identifier; +} + +std::string ModelPackageImpl::setRootModel(const std::filesystem::path& path, const std::string& name, const std::string& author, const std::string& description) +{ + if (m_manifest->hasKey(kModelPackageRootModelKey)) { + throw std::runtime_error("A root model already exists in this package. Remove the existing root model or the .mlpackage directory and try again."); + } + + auto identifier = addItem(path, name, author, description); + m_manifest->setString(kModelPackageRootModelKey, identifier); + return identifier; +} + +std::string ModelPackageImpl::replaceRootModel(const std::filesystem::path& path, const std::string& name, const std::string& author, const std::string& description) +{ + if (m_manifest->hasKey(kModelPackageRootModelKey)) { + auto rootModelIdentifier = m_manifest->getString(kModelPackageRootModelKey); + removeItem(rootModelIdentifier); + } + + auto identifier = addItem(path, name, author, description); + m_manifest->setString(kModelPackageRootModelKey, identifier); + return identifier; +} + +std::shared_ptr ModelPackageImpl::getRootModel() const +{ + if (false == m_manifest->hasKey(kModelPackageRootModelKey)) { + throw std::runtime_error("Failed to look up root model"); + } + + auto rootModelIdentifier = m_manifest->getString(kModelPackageRootModelKey); + return findItem(rootModelIdentifier); +} + +std::shared_ptr ModelPackageImpl::findItem(const std::string& identifier) const +{ + auto itemInfoEntry = getItemInfoEntry(identifier); + if (itemInfoEntry == nullptr) { + return nullptr; + } + + auto path = m_packageDataDirPath / itemInfoEntry->getString(kModelPackageItemInfoPathKey); + auto name = itemInfoEntry->getString(kModelPackageItemInfoNameKey); + auto author = itemInfoEntry->getString(kModelPackageItemInfoAuthorKey); + auto description = itemInfoEntry->getString(kModelPackageItemInfoDescriptionKey); + + return std::make_shared(std::make_shared(identifier, path, name, author, description)); +} + +std::shared_ptr ModelPackageImpl::findItem(const std::string& name, const std::string& author) const +{ + auto itemInfoEntries = getItemInfoEntries(); + if (itemInfoEntries != nullptr) { + std::vector identifiers; + itemInfoEntries->getKeys(identifiers); + for (const auto& identifier : identifiers) { + auto itemInfo = findItem(identifier); + if (itemInfo->author() == author && itemInfo->name() == name) { + return itemInfo; + } + } + } + + return nullptr; +} + +std::vector ModelPackageImpl::findItemsByAuthor(const std::string& author) const +{ + auto itemInfoVector = std::vector(); + auto itemInfoEntries = getItemInfoEntries(); + if (itemInfoEntries != nullptr) { + std::vector identifiers; + itemInfoEntries->getKeys(identifiers); + for (const auto& identifier : identifiers) { + auto itemInfo = findItem(identifier); + if (itemInfo->author() == author) { + itemInfoVector.push_back(*itemInfo); + } + } + } + + return itemInfoVector; +} + +void ModelPackageImpl::removeItem(const std::string& identifier) +{ + auto itemInfoEntry = getItemInfoEntry(identifier); + if (itemInfoEntry == nullptr) { + throw std::runtime_error("Failed to look up file with identifier: " + identifier); + } + + auto path = m_packageDataDirPath / itemInfoEntry->getString(kModelPackageItemInfoPathKey); + if (0 != std::remove(path.c_str())) { + throw std::runtime_error("Failed to remove file at path: " + path.string()); + } + + removeItemInfoEntry(identifier); +} + +bool ModelPackageImpl::isValid(const std::filesystem::path& path) +{ + try { + ModelPackageImpl(path, false, true); + } catch (std::runtime_error& e) { + return false; + } + return true; +} + +#pragma mark ModelPackage + +ModelPackage::ModelPackage(const std::string& packagePath, bool createIfNecessary, bool readOnly) +: m_modelPackageImpl(std::make_shared(packagePath, createIfNecessary, readOnly)) +{ +} + +ModelPackage::~ModelPackage() +{ +} + +std::string ModelPackage::path() const +{ + return m_modelPackageImpl->path(); +} + +std::string ModelPackage::setRootModel(const std::string& path, const std::string& name, const std::string& author, const std::string& description) +{ + return m_modelPackageImpl->setRootModel(path, name, author, description); +} + +std::string ModelPackage::replaceRootModel(const std::string& path, const std::string& name, const std::string& author, const std::string& description) +{ + return m_modelPackageImpl->replaceRootModel(path, name, author, description); +} + +std::shared_ptr ModelPackage::getRootModel() const +{ + return m_modelPackageImpl->getRootModel(); +} + +std::string ModelPackage::addItem(const std::string& path, const std::string& name, const std::string& author, const std::string& description) +{ + return m_modelPackageImpl->addItem(path, name, author, description); +} + +std::shared_ptr ModelPackage::findItem(const std::string& identifier) const +{ + return m_modelPackageImpl->findItem(identifier); +} + +std::shared_ptr ModelPackage::findItem(const std::string& name, const std::string& author) const +{ + return m_modelPackageImpl->findItem(name, author); +} + +std::vector ModelPackage::findItemsByAuthor(const std::string& author) const +{ + return m_modelPackageImpl->findItemsByAuthor(author); +} + +void ModelPackage::removeItem(const std::string& identifier) +{ + return m_modelPackageImpl->removeItem(identifier); +} + +bool ModelPackage::isValid(const std::string& path) +{ + return ModelPackageImpl::isValid(path); +} + +ModelPackageItemInfo ModelPackage::createFile(const std::string& name, const std::string& author, const std::string& description) +{ + return m_modelPackageImpl->createFile(name, author, description); +} + +#if defined(__cplusplus) +} // extern "C" +#endif + diff --git a/cpp/external/katagocoreml/vendor/modelpackage/src/ModelPackage.hpp b/cpp/external/katagocoreml/vendor/modelpackage/src/ModelPackage.hpp new file mode 100644 index 000000000..2e44a0fb9 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/modelpackage/src/ModelPackage.hpp @@ -0,0 +1,160 @@ +// +// ModelPackage.hpp +// modelpackage +// +// Copyright © 2021 Apple Inc. All rights reserved. +// + +#ifndef ModelPackage_hpp +#define ModelPackage_hpp + +#include +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +/** MPL = Model Package Library. */ +namespace MPL { + +namespace detail { + +class ModelPackageItemInfoImpl; +class ModelPackageImpl; + +} // namespace detail + +class ModelPackageItemInfo { + +private: + + std::shared_ptr m_modelPackageItemInfoImpl; + +public: + + /** Creates an instance of file info to hold information about a file that exists in a model package. */ + ModelPackageItemInfo(std::shared_ptr modelPackageItemInfoImpl); + ~ModelPackageItemInfo(); + + /** Unique file identifier of the file in the model package. */ + const std::string& identifier() const; + + /** Path of the file inside the model package. */ + const std::string& path() const; + + /** Name specified while storing the file in the model package. */ + const std::string& name() const; + + /** Author specified while storing the file in the model package. */ + const std::string& author() const; + + /** Description specified while storing the file in the model package. Defaults to "". */ + const std::string& description() const; +}; + + +class ModelPackage { + +private: + + std::shared_ptr m_modelPackageImpl; + +public: + + /** Creates an instance of model package that exists at the specified path. + @param path Path of the model package (with extension .mlpackage). + @param createIfNecessary Create a new model package if one does not exist at the specified path. Defaults to true. + @param readOnly The model package will not be mutated Defaults to false. + @throw Runtime exception if an invalid model package exists at the specified path. */ + explicit ModelPackage(const std::string& path, bool createIfNecessary = true, bool readOnly = false); + + ~ModelPackage(); + + /** Returns the path of the model package. */ + std::string path() const; + + /** + Set a root model in model package. Each model package has a unique root model, which can be retrieved without needing for an identifier. + @param path Path of the model file. + @param name Name of the model file. + @param author Author of the model file. Reverse DNS identifier of the author application is recommended. Example: com.apple.coremltools. + @param description Optional description to describe the model file. + @return Unique file identifier that can be used to retrieve the model file. + @throw a runtime exception if the model package already contains a root model. */ + std::string setRootModel(const std::string& path, const std::string& name, const std::string& author, const std::string& description = ""); + + /** + replace a root model in model package. model package may or may not already contain a root model. Each model package has a unique root model, which can be retrieved without needing for an identifier. + @param path Path of the model file. + @param name Name of the model file. + @param author Author of the model file. Reverse DNS identifier of the author application is recommended. Example: com.apple.coremltools. + @param description Optional description to describe the model file. + @return Unique file identifier that can be used to retrieve the model file. */ + std::string replaceRootModel(const std::string& path, const std::string& name, const std::string& author, const std::string& description = ""); + + /** + Retrieve previously set root model from the model package. + @return ModelPackageItemInfo with information about the retrieved root model file. + @throw Runtime exception if the model package does not contain a root model. */ + std::shared_ptr getRootModel() const; + + /** + Add a file or directory in the model package using name and author as a uniqueing key. + @param path Path of the file. + @param name Name of the file. + @param author Author of the file. Reverse DNS identifier of the author application is recommended. Example: com.apple.coremltools. + @param description Optional description to describe the file. + @return Unique file identifier that can be used to look up the file. + @throw a runtime exception if the model package already contains a file with provided name and author. */ + std::string addItem(const std::string& path, const std::string& name, const std::string& author, const std::string& description = ""); + + /** + Retrieve previously added file or directory from the model package by providing an identifier. + @param identifier Unique identifier of a previous added file + @return A pointer to ModelPackageItemInfo with information about the retrieved file or directory. nullptr if a file or directory with given identifier does not exist. */ + std::shared_ptr findItem(const std::string& identifier) const; + + /** + Retrieve previously added file or directory from the model package by providing name and author. + @param name Name of a previous added file + @param author Author of a previous added file + @return A pointer to ModelPackageItemInfo with information about the retrieved file or directory by providing name and author. nullptr if a file or directory with given name and author does not exist. */ + std::shared_ptr findItem(const std::string& name, const std::string& author) const; + + /** + Retrieve previously added files or directories from the model package by providing an author. + @param author Name of the author. + @return Vector of ModelPackageItemInfo objects with information about the retrieved files by providing the author. */ + std::vector findItemsByAuthor(const std::string& author) const; + + /** + Remove previously added file or directory from the model package by providing an identifier. + @param identifier Unique file identifier corresponding to a file that was added previously. + @throw Runtime exception if the model package does not contain file with provided identifier. */ + void removeItem(const std::string& identifier); + + /** + Tells if the input path corresponds to a valid model package. + @param path Path of model package. + @return True if the path corresponds to a valid model package. False, otherwise. */ + static bool isValid(const std::string& path); + + /** + Creates an empty file in the model package and returns corresponding file identifier. + @param name Name of the file. + @param author Author of the file. Reverse DNS identifier of the author application is recommended. Example: com.apple.coremltools. + @param description Optional description to describe the file. + @return ModelPackageItemInfo with information about the created file. */ + ModelPackageItemInfo createFile(const std::string& name, const std::string& author, const std::string& description); +}; + +} // namespace MPL + +#if defined(__cplusplus) +} // extern "C" +#endif + +#endif /* ModelPackage_hpp */ + diff --git a/cpp/external/katagocoreml/vendor/modelpackage/src/utils/JsonMap.cpp b/cpp/external/katagocoreml/vendor/modelpackage/src/utils/JsonMap.cpp new file mode 100644 index 000000000..400dcf222 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/modelpackage/src/utils/JsonMap.cpp @@ -0,0 +1,171 @@ +// JsonMap.cpp +// modelpackage +// +// Copyright © 2021 Apple. All rights reserved. + +#include +#include +#include +#include +#include +#include + +#include "JsonMap.hpp" +#include "json.hpp" + +using namespace nlohmann; + +class JsonMapImpl { + +public: + + nlohmann::json m_jsonObject; + + JsonMapImpl(); + JsonMapImpl(std::istream& stream); + JsonMapImpl(nlohmann::json j_init); + + JsonMapImpl(const JsonMapImpl&) = delete; + JsonMapImpl(JsonMapImpl&&) = delete; + JsonMapImpl& operator=(const JsonMapImpl&) = delete; + JsonMapImpl& operator=(JsonMapImpl&&) = delete; + + /* ==== Key operations ==== */ + + bool hasKey(const std::string& key) const; + void getKeys(std::vector& keys); + + /* ==== Getter methods ==== */ + + std::string getString(const std::string& key) const; + std::unique_ptr getObject(const std::string& key) const; + + /* ==== Setter methods ==== */ + + void setString(const std::string& key, const std::string& value); + void setObject(const std::string& key, std::unique_ptr value); + + void serialize(std::ostream& stream); + void deserialize(std::istream& stream); +}; + +JsonMapImpl::JsonMapImpl() { + m_jsonObject = nlohmann::json({}); +} + +JsonMapImpl::JsonMapImpl(std::istream& stream) { + deserialize(stream); +} + +JsonMapImpl::JsonMapImpl(nlohmann::json j_init) +: m_jsonObject(j_init) { +} + +/* ==== Key operations ==== */ + +bool JsonMapImpl::hasKey(const std::string& key) const { + return m_jsonObject.count(key) > 0; +} + +void JsonMapImpl::getKeys(std::vector& keys) { + for(json::iterator it = m_jsonObject.begin(); it != m_jsonObject.end(); ++it) { + keys.push_back(it.key()); + } +} + +/* ==== Getter methods ==== */ + +std::string JsonMapImpl::getString(const std::string& key) const { + return m_jsonObject.at(key).get(); +} + +std::unique_ptr JsonMapImpl::getObject(const std::string& key) const { + auto childCopy = m_jsonObject.at(key); + return std::make_unique(childCopy); +} + +/* ==== Setter methods ==== */ + +void JsonMapImpl::setString(const std::string& key, const std::string& value) { + m_jsonObject[key] = value; +} + +void JsonMapImpl::setObject(const std::string& key, std::unique_ptr value) { + m_jsonObject[key] = value->m_jsonObject; +} + +void JsonMapImpl::deserialize(std::istream& stream) { + if(!stream.good()) { + throw std::runtime_error("Input stream is not valid"); + } + + try { + stream >> m_jsonObject; + } catch (std::exception& e) { + // nlohmann::json raises std::exception on parser errors, but the client of JsonMap only + // handles std::runtime_error because they don't want to "handle" programming errors + // (std::logic_error). + // + // As such, we translate the exception type here. + throw std::runtime_error(e.what()); + } +} + +void JsonMapImpl::serialize(std::ostream& stream) { + // write prettified JSON to another file + stream << std::setw(4) << m_jsonObject << std::endl; + +} + +/* ==== JsonMap ==== */ + +JsonMap::JsonMap() +: m_jsonMapImpl(std::make_unique()) +{ +} + +JsonMap::JsonMap(std::istream& stream) +: m_jsonMapImpl(std::make_unique(stream)) +{ +} + +JsonMap::JsonMap(std::unique_ptr jsonMapImpl) +: m_jsonMapImpl(std::move(jsonMapImpl)) +{ +} + +JsonMap::~JsonMap() = default; + +/* ==== Key operations ==== */ + +bool JsonMap::hasKey(const std::string& key) const { + return m_jsonMapImpl->hasKey(key); +} + +void JsonMap::getKeys(std::vector& keys) { + return m_jsonMapImpl->getKeys(keys); +} + +/* ==== Getter methods ==== */ + +std::string JsonMap::getString(const std::string& key) const { + return m_jsonMapImpl->getString(key); +} + +std::unique_ptr JsonMap::getObject(const std::string& key) const { + return std::make_unique(m_jsonMapImpl->getObject(key)); +} + +/* ==== Setter methods ==== */ + +void JsonMap::setString(const std::string& key, const std::string& value) { + return m_jsonMapImpl->setString(key, value); +} + +void JsonMap::setObject(const std::string& key, std::unique_ptr value) { + m_jsonMapImpl->setObject(key, std::move(value->m_jsonMapImpl)); +} + +void JsonMap::serialize(std::ostream& stream) { + return m_jsonMapImpl->serialize(stream); +} diff --git a/cpp/external/katagocoreml/vendor/modelpackage/src/utils/JsonMap.hpp b/cpp/external/katagocoreml/vendor/modelpackage/src/utils/JsonMap.hpp new file mode 100644 index 000000000..962d25b61 --- /dev/null +++ b/cpp/external/katagocoreml/vendor/modelpackage/src/utils/JsonMap.hpp @@ -0,0 +1,52 @@ +// +// JsonMap.hpp +// modelpackage +// +// Copyright © 2021 Apple. All rights reserved. +// + +#pragma once + +#include +#include +#include +#include + +class JsonMapImpl; + +class JsonMap { + +private: + + std::unique_ptr m_jsonMapImpl; + +public: + + JsonMap(); + JsonMap(std::istream& stream); + JsonMap(std::unique_ptr jsonMapImpl); + + ~JsonMap(); + + JsonMap(const JsonMap&) = delete; + JsonMap(JsonMap&&) = delete; + JsonMap& operator=(const JsonMap&) = delete; + JsonMap& operator=(JsonMap&&) = delete; + + /* ==== Key operations ==== */ + + bool hasKey(const std::string& key) const; + void getKeys(std::vector& keys); + + /* ==== Getter methods ==== */ + + std::string getString(const std::string& key) const; + std::unique_ptr getObject(const std::string& key) const; + + /* ==== Setter methods ==== */ + + void setString(const std::string& key, const std::string& value); + void setObject(const std::string& key, std::unique_ptr value); + + void serialize(std::ostream& stream); +}; From 32e7209168de32f5cd6839f844fb55ad27bb63e9 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Sun, 22 Mar 2026 20:59:59 +0800 Subject: [PATCH 33/34] Deduplicate nlohmann/json by reusing existing copy in cpp/external/ The vendored katagocoreml library included its own copy of nlohmann/json (v3.9.1, ~26K lines) while KataGo already has one at cpp/external/nlohmann_json/. Point katagocoreml's CMake include path to the shared copy and remove the duplicate. Co-Authored-By: Claude Opus 4.6 (1M context) --- cpp/external/katagocoreml/CMakeLists.txt | 2 +- .../vendor/deps/nlohmann/CODE_OF_CONDUCT.md | 46 - .../vendor/deps/nlohmann/LICENSE.MIT | 21 - .../vendor/deps/nlohmann/README.md | 1643 - .../vendor/deps/nlohmann/json.hpp | 25855 ---------------- 5 files changed, 1 insertion(+), 27566 deletions(-) delete mode 100644 cpp/external/katagocoreml/vendor/deps/nlohmann/CODE_OF_CONDUCT.md delete mode 100644 cpp/external/katagocoreml/vendor/deps/nlohmann/LICENSE.MIT delete mode 100644 cpp/external/katagocoreml/vendor/deps/nlohmann/README.md delete mode 100644 cpp/external/katagocoreml/vendor/deps/nlohmann/json.hpp diff --git a/cpp/external/katagocoreml/CMakeLists.txt b/cpp/external/katagocoreml/CMakeLists.txt index e0ef5a0c2..38a543b9e 100644 --- a/cpp/external/katagocoreml/CMakeLists.txt +++ b/cpp/external/katagocoreml/CMakeLists.txt @@ -121,7 +121,7 @@ target_include_directories(katagocoreml ${PROTO_GENERATED_DIR} ${MILBLOB_DIR}/.. ${MODELPACKAGE_DIR} - ${COREMLTOOLS_ROOT}/deps/nlohmann + ${CMAKE_CURRENT_SOURCE_DIR}/../nlohmann_json ${COREMLTOOLS_ROOT}/deps/FP16/include ${Protobuf_INCLUDE_DIRS} ${KATAGOCOREML_ABSEIL_INCLUDE_DIRS} diff --git a/cpp/external/katagocoreml/vendor/deps/nlohmann/CODE_OF_CONDUCT.md b/cpp/external/katagocoreml/vendor/deps/nlohmann/CODE_OF_CONDUCT.md deleted file mode 100644 index 770b8173e..000000000 --- a/cpp/external/katagocoreml/vendor/deps/nlohmann/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at mail@nlohmann.me. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/cpp/external/katagocoreml/vendor/deps/nlohmann/LICENSE.MIT b/cpp/external/katagocoreml/vendor/deps/nlohmann/LICENSE.MIT deleted file mode 100644 index f0622d6dc..000000000 --- a/cpp/external/katagocoreml/vendor/deps/nlohmann/LICENSE.MIT +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2013-2021 Niels Lohmann - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/cpp/external/katagocoreml/vendor/deps/nlohmann/README.md b/cpp/external/katagocoreml/vendor/deps/nlohmann/README.md deleted file mode 100644 index 5d354f1ed..000000000 --- a/cpp/external/katagocoreml/vendor/deps/nlohmann/README.md +++ /dev/null @@ -1,1643 +0,0 @@ -[![JSON for Modern C++](https://raw.githubusercontent.com/nlohmann/json/master/doc/json.gif)](https://github.com/nlohmann/json/releases) - -[![Build Status](https://travis-ci.org/nlohmann/json.svg?branch=master)](https://travis-ci.org/nlohmann/json) -[![Build Status](https://ci.appveyor.com/api/projects/status/1acb366xfyg3qybk/branch/develop?svg=true)](https://ci.appveyor.com/project/nlohmann/json) -[![Ubuntu](https://github.com/nlohmann/json/workflows/Ubuntu/badge.svg)](https://github.com/nlohmann/json/actions?query=workflow%3AUbuntu) -[![macOS](https://github.com/nlohmann/json/workflows/macOS/badge.svg)](https://github.com/nlohmann/json/actions?query=workflow%3AmacOS) -[![Windows](https://github.com/nlohmann/json/workflows/Windows/badge.svg)](https://github.com/nlohmann/json/actions?query=workflow%3AWindows) -[![Build Status](https://circleci.com/gh/nlohmann/json.svg?style=svg)](https://circleci.com/gh/nlohmann/json) -[![Coverage Status](https://coveralls.io/repos/github/nlohmann/json/badge.svg?branch=develop)](https://coveralls.io/github/nlohmann/json?branch=develop) -[![Coverity Scan Build Status](https://scan.coverity.com/projects/5550/badge.svg)](https://scan.coverity.com/projects/nlohmann-json) -[![Codacy Badge](https://api.codacy.com/project/badge/Grade/f3732b3327e34358a0e9d1fe9f661f08)](https://www.codacy.com/app/nlohmann/json?utm_source=github.com&utm_medium=referral&utm_content=nlohmann/json&utm_campaign=Badge_Grade) -[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/nlohmann/json.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/nlohmann/json/context:cpp) -[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/json.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:json) -[![Try online](https://img.shields.io/badge/try-online-blue.svg)](https://wandbox.org/permlink/3lCHrFUZANONKv7a) -[![Documentation](https://img.shields.io/badge/docs-doxygen-blue.svg)](https://nlohmann.github.io/json/doxygen/index.html) -[![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/nlohmann/json/master/LICENSE.MIT) -[![GitHub Releases](https://img.shields.io/github/release/nlohmann/json.svg)](https://github.com/nlohmann/json/releases) -[![GitHub Downloads](https://img.shields.io/github/downloads/nlohmann/json/total)](https://github.com/nlohmann/json/releases) -[![GitHub Issues](https://img.shields.io/github/issues/nlohmann/json.svg)](https://github.com/nlohmann/json/issues) -[![Average time to resolve an issue](https://isitmaintained.com/badge/resolution/nlohmann/json.svg)](https://isitmaintained.com/project/nlohmann/json "Average time to resolve an issue") -[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/289/badge)](https://bestpractices.coreinfrastructure.org/projects/289) -[![GitHub Sponsors](https://img.shields.io/badge/GitHub-Sponsors-ff69b4)](https://github.com/sponsors/nlohmann) - -- [Design goals](#design-goals) -- [Sponsors](#sponsors) -- [Integration](#integration) - - [CMake](#cmake) - - [Package Managers](#package-managers) - - [Pkg-config](#pkg-config) -- [Examples](#examples) - - [JSON as first-class data type](#json-as-first-class-data-type) - - [Serialization / Deserialization](#serialization--deserialization) - - [STL-like access](#stl-like-access) - - [Conversion from STL containers](#conversion-from-stl-containers) - - [JSON Pointer and JSON Patch](#json-pointer-and-json-patch) - - [JSON Merge Patch](#json-merge-patch) - - [Implicit conversions](#implicit-conversions) - - [Conversions to/from arbitrary types](#arbitrary-types-conversions) - - [Specializing enum conversion](#specializing-enum-conversion) - - [Binary formats (BSON, CBOR, MessagePack, and UBJSON)](#binary-formats-bson-cbor-messagepack-and-ubjson) -- [Supported compilers](#supported-compilers) -- [License](#license) -- [Contact](#contact) -- [Thanks](#thanks) -- [Used third-party tools](#used-third-party-tools) -- [Projects using JSON for Modern C++](#projects-using-json-for-modern-c) -- [Notes](#notes) -- [Execute unit tests](#execute-unit-tests) - -## Design goals - -There are myriads of [JSON](https://json.org) libraries out there, and each may even have its reason to exist. Our class had these design goals: - -- **Intuitive syntax**. In languages such as Python, JSON feels like a first class data type. We used all the operator magic of modern C++ to achieve the same feeling in your code. Check out the [examples below](#examples) and you'll know what I mean. - -- **Trivial integration**. Our whole code consists of a single header file [`json.hpp`](https://github.com/nlohmann/json/blob/develop/single_include/nlohmann/json.hpp). That's it. No library, no subproject, no dependencies, no complex build system. The class is written in vanilla C++11. All in all, everything should require no adjustment of your compiler flags or project settings. - -- **Serious testing**. Our class is heavily [unit-tested](https://github.com/nlohmann/json/tree/develop/test/src) and covers [100%](https://coveralls.io/r/nlohmann/json) of the code, including all exceptional behavior. Furthermore, we checked with [Valgrind](https://valgrind.org) and the [Clang Sanitizers](https://clang.llvm.org/docs/index.html) that there are no memory leaks. [Google OSS-Fuzz](https://github.com/google/oss-fuzz/tree/master/projects/json) additionally runs fuzz tests against all parsers 24/7, effectively executing billions of tests so far. To maintain high quality, the project is following the [Core Infrastructure Initiative (CII) best practices](https://bestpractices.coreinfrastructure.org/projects/289). - -Other aspects were not so important to us: - -- **Memory efficiency**. Each JSON object has an overhead of one pointer (the maximal size of a union) and one enumeration element (1 byte). The default generalization uses the following C++ data types: `std::string` for strings, `int64_t`, `uint64_t` or `double` for numbers, `std::map` for objects, `std::vector` for arrays, and `bool` for Booleans. However, you can template the generalized class `basic_json` to your needs. - -- **Speed**. There are certainly [faster JSON libraries](https://github.com/miloyip/nativejson-benchmark#parsing-time) out there. However, if your goal is to speed up your development by adding JSON support with a single header, then this library is the way to go. If you know how to use a `std::vector` or `std::map`, you are already set. - -See the [contribution guidelines](https://github.com/nlohmann/json/blob/master/.github/CONTRIBUTING.md#please-dont) for more information. - - -## Sponsors - -You can sponsor this library at [GitHub Sponsors](https://github.com/sponsors/nlohmann). - -### :label: Named Sponsors - -- [Michael Hartmann](https://github.com/reFX-Mike) -- [Stefan Hagen](https://github.com/sthagen) -- [Steve Sperandeo](https://github.com/homer6) -- [Robert Jefe Lindstädt](https://github.com/eljefedelrodeodeljefe) -- [Steve Wagner](https://github.com/ciroque) - -Thanks everyone! - - -## Integration - -[`json.hpp`](https://github.com/nlohmann/json/blob/develop/single_include/nlohmann/json.hpp) is the single required file in `single_include/nlohmann` or [released here](https://github.com/nlohmann/json/releases). You need to add - -```cpp -#include - -// for convenience -using json = nlohmann::json; -``` - -to the files you want to process JSON and set the necessary switches to enable C++11 (e.g., `-std=c++11` for GCC and Clang). - -You can further use file [`include/nlohmann/json_fwd.hpp`](https://github.com/nlohmann/json/blob/develop/include/nlohmann/json_fwd.hpp) for forward-declarations. The installation of json_fwd.hpp (as part of cmake's install step), can be achieved by setting `-DJSON_MultipleHeaders=ON`. - -### CMake - -You can also use the `nlohmann_json::nlohmann_json` interface target in CMake. This target populates the appropriate usage requirements for `INTERFACE_INCLUDE_DIRECTORIES` to point to the appropriate include directories and `INTERFACE_COMPILE_FEATURES` for the necessary C++11 flags. - -#### External - -To use this library from a CMake project, you can locate it directly with `find_package()` and use the namespaced imported target from the generated package configuration: - -```cmake -# CMakeLists.txt -find_package(nlohmann_json 3.2.0 REQUIRED) -... -add_library(foo ...) -... -target_link_libraries(foo PRIVATE nlohmann_json::nlohmann_json) -``` - -The package configuration file, `nlohmann_jsonConfig.cmake`, can be used either from an install tree or directly out of the build tree. - -#### Embedded - -To embed the library directly into an existing CMake project, place the entire source tree in a subdirectory and call `add_subdirectory()` in your `CMakeLists.txt` file: - -```cmake -# Typically you don't care so much for a third party library's tests to be -# run from your own project's code. -set(JSON_BuildTests OFF CACHE INTERNAL "") - -# If you only include this third party in PRIVATE source files, you do not -# need to install it when your main project gets installed. -# set(JSON_Install OFF CACHE INTERNAL "") - -# Don't use include(nlohmann_json/CMakeLists.txt) since that carries with it -# unintended consequences that will break the build. It's generally -# discouraged (although not necessarily well documented as such) to use -# include(...) for pulling in other CMake projects anyways. -add_subdirectory(nlohmann_json) -... -add_library(foo ...) -... -target_link_libraries(foo PRIVATE nlohmann_json::nlohmann_json) -``` - -##### Embedded (FetchContent) - -Since CMake v3.11, -[FetchContent](https://cmake.org/cmake/help/v3.11/module/FetchContent.html) can -be used to automatically download the repository as a dependency at configure type. - -Example: -```cmake -include(FetchContent) - -FetchContent_Declare(json - GIT_REPOSITORY https://github.com/nlohmann/json.git - GIT_TAG v3.7.3) - -FetchContent_GetProperties(json) -if(NOT json_POPULATED) - FetchContent_Populate(json) - add_subdirectory(${json_SOURCE_DIR} ${json_BINARY_DIR} EXCLUDE_FROM_ALL) -endif() - -target_link_libraries(foo PRIVATE nlohmann_json::nlohmann_json) -``` - -**Note**: The repository https://github.com/nlohmann/json download size is huge. -It contains all the dataset used for the benchmarks. You might want to depend on -a smaller repository. For instance, you might want to replace the URL above by -https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent - -#### Supporting Both - -To allow your project to support either an externally supplied or an embedded JSON library, you can use a pattern akin to the following: - -``` cmake -# Top level CMakeLists.txt -project(FOO) -... -option(FOO_USE_EXTERNAL_JSON "Use an external JSON library" OFF) -... -add_subdirectory(thirdparty) -... -add_library(foo ...) -... -# Note that the namespaced target will always be available regardless of the -# import method -target_link_libraries(foo PRIVATE nlohmann_json::nlohmann_json) -``` -```cmake -# thirdparty/CMakeLists.txt -... -if(FOO_USE_EXTERNAL_JSON) - find_package(nlohmann_json 3.2.0 REQUIRED) -else() - set(JSON_BuildTests OFF CACHE INTERNAL "") - add_subdirectory(nlohmann_json) -endif() -... -``` - -`thirdparty/nlohmann_json` is then a complete copy of this source tree. - -### Package Managers - -:beer: If you are using OS X and [Homebrew](https://brew.sh), just type `brew tap nlohmann/json` and `brew install nlohmann-json` and you're set. If you want the bleeding edge rather than the latest release, use `brew install nlohmann-json --HEAD`. - -If you are using the [Meson Build System](https://mesonbuild.com), add this source tree as a [meson subproject](https://mesonbuild.com/Subprojects.html#using-a-subproject). You may also use the `include.zip` published in this project's [Releases](https://github.com/nlohmann/json/releases) to reduce the size of the vendored source tree. Alternatively, you can get a wrap file by downloading it from [Meson WrapDB](https://wrapdb.mesonbuild.com/nlohmann_json), or simply use `meson wrap install nlohmann_json`. Please see the meson project for any issues regarding the packaging. - -The provided meson.build can also be used as an alternative to cmake for installing `nlohmann_json` system-wide in which case a pkg-config file is installed. To use it, simply have your build system require the `nlohmann_json` pkg-config dependency. In Meson, it is preferred to use the [`dependency()`](https://mesonbuild.com/Reference-manual.html#dependency) object with a subproject fallback, rather than using the subproject directly. - -If you are using [Conan](https://www.conan.io/) to manage your dependencies, merely add `nlohmann_json/x.y.z` to your `conanfile`'s requires, where `x.y.z` is the release version you want to use. Please file issues [here](https://github.com/conan-io/conan-center-index/issues) if you experience problems with the packages. - -If you are using [Spack](https://www.spack.io/) to manage your dependencies, you can use the [`nlohmann-json` package](https://spack.readthedocs.io/en/latest/package_list.html#nlohmann-json). Please see the [spack project](https://github.com/spack/spack) for any issues regarding the packaging. - -If you are using [hunter](https://github.com/cpp-pm/hunter) on your project for external dependencies, then you can use the [nlohmann_json package](https://hunter.readthedocs.io/en/latest/packages/pkg/nlohmann_json.html). Please see the hunter project for any issues regarding the packaging. - -If you are using [Buckaroo](https://buckaroo.pm), you can install this library's module with `buckaroo add github.com/buckaroo-pm/nlohmann-json`. Please file issues [here](https://github.com/buckaroo-pm/nlohmann-json). There is a demo repo [here](https://github.com/njlr/buckaroo-nholmann-json-example). - -If you are using [vcpkg](https://github.com/Microsoft/vcpkg/) on your project for external dependencies, then you can use the [nlohmann-json package](https://github.com/Microsoft/vcpkg/tree/master/ports/nlohmann-json). Please see the vcpkg project for any issues regarding the packaging. - -If you are using [cget](https://cget.readthedocs.io/en/latest/), you can install the latest development version with `cget install nlohmann/json`. A specific version can be installed with `cget install nlohmann/json@v3.1.0`. Also, the multiple header version can be installed by adding the `-DJSON_MultipleHeaders=ON` flag (i.e., `cget install nlohmann/json -DJSON_MultipleHeaders=ON`). - -If you are using [CocoaPods](https://cocoapods.org), you can use the library by adding pod `"nlohmann_json", '~>3.1.2'` to your podfile (see [an example](https://bitbucket.org/benman/nlohmann_json-cocoapod/src/master/)). Please file issues [here](https://bitbucket.org/benman/nlohmann_json-cocoapod/issues?status=new&status=open). - -If you are using [NuGet](https://www.nuget.org), you can use the package [nlohmann.json](https://www.nuget.org/packages/nlohmann.json/). Please check [this extensive description](https://github.com/nlohmann/json/issues/1132#issuecomment-452250255) on how to use the package. Please files issues [here](https://github.com/hnkb/nlohmann-json-nuget/issues). - -If you are using [conda](https://conda.io/), you can use the package [nlohmann_json](https://github.com/conda-forge/nlohmann_json-feedstock) from [conda-forge](https://conda-forge.org) executing `conda install -c conda-forge nlohmann_json`. Please file issues [here](https://github.com/conda-forge/nlohmann_json-feedstock/issues). - -If you are using [MSYS2](https://www.msys2.org/), your can use the [mingw-w64-nlohmann-json](https://packages.msys2.org/base/mingw-w64-nlohmann-json) package, just type `pacman -S mingw-w64-i686-nlohmann-json` or `pacman -S mingw-w64-x86_64-nlohmann-json` for installation. Please file issues [here](https://github.com/msys2/MINGW-packages/issues/new?title=%5Bnlohmann-json%5D) if you experience problems with the packages. - -If you are using [`build2`](https://build2.org), you can use the [`nlohmann-json`](https://cppget.org/nlohmann-json) package from the public repository https://cppget.org or directly from the [package's sources repository](https://github.com/build2-packaging/nlohmann-json). In your project's `manifest` file, just add `depends: nlohmann-json` (probably with some [version constraints](https://build2.org/build2-toolchain/doc/build2-toolchain-intro.xhtml#guide-add-remove-deps)). If you are not familiar with using dependencies in `build2`, [please read this introduction](https://build2.org/build2-toolchain/doc/build2-toolchain-intro.xhtml). -Please file issues [here](https://github.com/build2-packaging/nlohmann-json) if you experience problems with the packages. - -If you are using [`wsjcpp`](https://wsjcpp.org), you can use the command `wsjcpp install "https://github.com/nlohmann/json:develop"` to get the latest version. Note you can change the branch ":develop" to an existing tag or another branch. - -If you are using [`CPM.cmake`](https://github.com/TheLartians/CPM.cmake), you can check this [`example`](https://github.com/TheLartians/CPM.cmake/tree/master/examples/json). After [adding CPM script](https://github.com/TheLartians/CPM.cmake#adding-cpm) to your project, implement the following snippet to your CMake: - -```cmake -CPMAddPackage( - NAME nlohmann_json - GITHUB_REPOSITORY nlohmann/json - VERSION 3.9.1) -``` - -### Pkg-config - -If you are using bare Makefiles, you can use `pkg-config` to generate the include flags that point to where the library is installed: - -```sh -pkg-config nlohmann_json --cflags -``` - -Users of the Meson build system will also be able to use a system wide library, which will be found by `pkg-config`: - -```meson -json = dependency('nlohmann_json', required: true) -``` - -## Examples - -Beside the examples below, you may want to check the [documentation](https://nlohmann.github.io/json/) where each function contains a separate code example (e.g., check out [`emplace()`](https://nlohmann.github.io/json/api/basic_json/emplace/)). All [example files](https://github.com/nlohmann/json/tree/develop/doc/examples) can be compiled and executed on their own (e.g., file [emplace.cpp](https://github.com/nlohmann/json/blob/develop/doc/examples/emplace.cpp)). - -### JSON as first-class data type - -Here are some examples to give you an idea how to use the class. - -Assume you want to create the JSON object - -```json -{ - "pi": 3.141, - "happy": true, - "name": "Niels", - "nothing": null, - "answer": { - "everything": 42 - }, - "list": [1, 0, 2], - "object": { - "currency": "USD", - "value": 42.99 - } -} -``` - -With this library, you could write: - -```cpp -// create an empty structure (null) -json j; - -// add a number that is stored as double (note the implicit conversion of j to an object) -j["pi"] = 3.141; - -// add a Boolean that is stored as bool -j["happy"] = true; - -// add a string that is stored as std::string -j["name"] = "Niels"; - -// add another null object by passing nullptr -j["nothing"] = nullptr; - -// add an object inside the object -j["answer"]["everything"] = 42; - -// add an array that is stored as std::vector (using an initializer list) -j["list"] = { 1, 0, 2 }; - -// add another object (using an initializer list of pairs) -j["object"] = { {"currency", "USD"}, {"value", 42.99} }; - -// instead, you could also write (which looks very similar to the JSON above) -json j2 = { - {"pi", 3.141}, - {"happy", true}, - {"name", "Niels"}, - {"nothing", nullptr}, - {"answer", { - {"everything", 42} - }}, - {"list", {1, 0, 2}}, - {"object", { - {"currency", "USD"}, - {"value", 42.99} - }} -}; -``` - -Note that in all these cases, you never need to "tell" the compiler which JSON value type you want to use. If you want to be explicit or express some edge cases, the functions [`json::array()`](https://nlohmann.github.io/json/api/basic_json/array/) and [`json::object()`](https://nlohmann.github.io/json/api/basic_json/object/) will help: - -```cpp -// a way to express the empty array [] -json empty_array_explicit = json::array(); - -// ways to express the empty object {} -json empty_object_implicit = json({}); -json empty_object_explicit = json::object(); - -// a way to express an _array_ of key/value pairs [["currency", "USD"], ["value", 42.99]] -json array_not_object = json::array({ {"currency", "USD"}, {"value", 42.99} }); -``` - -### Serialization / Deserialization - -#### To/from strings - -You can create a JSON value (deserialization) by appending `_json` to a string literal: - -```cpp -// create object from string literal -json j = "{ \"happy\": true, \"pi\": 3.141 }"_json; - -// or even nicer with a raw string literal -auto j2 = R"( - { - "happy": true, - "pi": 3.141 - } -)"_json; -``` - -Note that without appending the `_json` suffix, the passed string literal is not parsed, but just used as JSON string value. That is, `json j = "{ \"happy\": true, \"pi\": 3.141 }"` would just store the string `"{ "happy": true, "pi": 3.141 }"` rather than parsing the actual object. - -The above example can also be expressed explicitly using [`json::parse()`](https://nlohmann.github.io/json/api/basic_json/parse/): - -```cpp -// parse explicitly -auto j3 = json::parse("{ \"happy\": true, \"pi\": 3.141 }"); -``` - -You can also get a string representation of a JSON value (serialize): - -```cpp -// explicit conversion to string -std::string s = j.dump(); // {"happy":true,"pi":3.141} - -// serialization with pretty printing -// pass in the amount of spaces to indent -std::cout << j.dump(4) << std::endl; -// { -// "happy": true, -// "pi": 3.141 -// } -``` - -Note the difference between serialization and assignment: - -```cpp -// store a string in a JSON value -json j_string = "this is a string"; - -// retrieve the string value -auto cpp_string = j_string.get(); -// retrieve the string value (alternative when an variable already exists) -std::string cpp_string2; -j_string.get_to(cpp_string2); - -// retrieve the serialized value (explicit JSON serialization) -std::string serialized_string = j_string.dump(); - -// output of original string -std::cout << cpp_string << " == " << cpp_string2 << " == " << j_string.get() << '\n'; -// output of serialized value -std::cout << j_string << " == " << serialized_string << std::endl; -``` - -[`.dump()`](https://nlohmann.github.io/json/api/basic_json/dump/) returns the originally stored string value. - -Note the library only supports UTF-8. When you store strings with different encodings in the library, calling [`dump()`](https://nlohmann.github.io/json/api/basic_json/dump/) may throw an exception unless `json::error_handler_t::replace` or `json::error_handler_t::ignore` are used as error handlers. - -#### To/from streams (e.g. files, string streams) - -You can also use streams to serialize and deserialize: - -```cpp -// deserialize from standard input -json j; -std::cin >> j; - -// serialize to standard output -std::cout << j; - -// the setw manipulator was overloaded to set the indentation for pretty printing -std::cout << std::setw(4) << j << std::endl; -``` - -These operators work for any subclasses of `std::istream` or `std::ostream`. Here is the same example with files: - -```cpp -// read a JSON file -std::ifstream i("file.json"); -json j; -i >> j; - -// write prettified JSON to another file -std::ofstream o("pretty.json"); -o << std::setw(4) << j << std::endl; -``` - -Please note that setting the exception bit for `failbit` is inappropriate for this use case. It will result in program termination due to the `noexcept` specifier in use. - -#### Read from iterator range - -You can also parse JSON from an iterator range; that is, from any container accessible by iterators whose `value_type` is an integral type of 1, 2 or 4 bytes, which will be interpreted as UTF-8, UTF-16 and UTF-32 respectively. For instance, a `std::vector`, or a `std::list`: - -```cpp -std::vector v = {'t', 'r', 'u', 'e'}; -json j = json::parse(v.begin(), v.end()); -``` - -You may leave the iterators for the range [begin, end): - -```cpp -std::vector v = {'t', 'r', 'u', 'e'}; -json j = json::parse(v); -``` - -#### Custom data source - -Since the parse function accepts arbitrary iterator ranges, you can provide your own data sources by implementing the `LegacyInputIterator` concept. - -```cpp -struct MyContainer { - void advance(); - const char& get_current(); -}; - -struct MyIterator { - using difference_type = std::ptrdiff_t; - using value_type = char; - using pointer = const char*; - using reference = const char&; - using iterator_category = std::input_iterator_tag; - - MyIterator& operator++() { - MyContainer.advance(); - return *this; - } - - bool operator!=(const MyIterator& rhs) const { - return rhs.target != target; - } - - reference operator*() const { - return target.get_current(); - } - - MyContainer* target = nullptr; -}; - -MyIterator begin(MyContainer& tgt) { - return MyIterator{&tgt}; -} - -MyIterator end(const MyContainer&) { - return {}; -} - -void foo() { - MyContainer c; - json j = json::parse(c); -} -``` - -#### SAX interface - -The library uses a SAX-like interface with the following functions: - -```cpp -// called when null is parsed -bool null(); - -// called when a boolean is parsed; value is passed -bool boolean(bool val); - -// called when a signed or unsigned integer number is parsed; value is passed -bool number_integer(number_integer_t val); -bool number_unsigned(number_unsigned_t val); - -// called when a floating-point number is parsed; value and original string is passed -bool number_float(number_float_t val, const string_t& s); - -// called when a string is parsed; value is passed and can be safely moved away -bool string(string_t& val); -// called when a binary value is parsed; value is passed and can be safely moved away -bool binary(binary_t& val); - -// called when an object or array begins or ends, resp. The number of elements is passed (or -1 if not known) -bool start_object(std::size_t elements); -bool end_object(); -bool start_array(std::size_t elements); -bool end_array(); -// called when an object key is parsed; value is passed and can be safely moved away -bool key(string_t& val); - -// called when a parse error occurs; byte position, the last token, and an exception is passed -bool parse_error(std::size_t position, const std::string& last_token, const detail::exception& ex); -``` - -The return value of each function determines whether parsing should proceed. - -To implement your own SAX handler, proceed as follows: - -1. Implement the SAX interface in a class. You can use class `nlohmann::json_sax` as base class, but you can also use any class where the functions described above are implemented and public. -2. Create an object of your SAX interface class, e.g. `my_sax`. -3. Call `bool json::sax_parse(input, &my_sax)`; where the first parameter can be any input like a string or an input stream and the second parameter is a pointer to your SAX interface. - -Note the `sax_parse` function only returns a `bool` indicating the result of the last executed SAX event. It does not return a `json` value - it is up to you to decide what to do with the SAX events. Furthermore, no exceptions are thrown in case of a parse error - it is up to you what to do with the exception object passed to your `parse_error` implementation. Internally, the SAX interface is used for the DOM parser (class `json_sax_dom_parser`) as well as the acceptor (`json_sax_acceptor`), see file [`json_sax.hpp`](https://github.com/nlohmann/json/blob/develop/include/nlohmann/detail/input/json_sax.hpp). - -### STL-like access - -We designed the JSON class to behave just like an STL container. In fact, it satisfies the [**ReversibleContainer**](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) requirement. - -```cpp -// create an array using push_back -json j; -j.push_back("foo"); -j.push_back(1); -j.push_back(true); - -// also use emplace_back -j.emplace_back(1.78); - -// iterate the array -for (json::iterator it = j.begin(); it != j.end(); ++it) { - std::cout << *it << '\n'; -} - -// range-based for -for (auto& element : j) { - std::cout << element << '\n'; -} - -// getter/setter -const auto tmp = j[0].get(); -j[1] = 42; -bool foo = j.at(2); - -// comparison -j == "[\"foo\", 42, true, 1.78]"_json; // true - -// other stuff -j.size(); // 3 entries -j.empty(); // false -j.type(); // json::value_t::array -j.clear(); // the array is empty again - -// convenience type checkers -j.is_null(); -j.is_boolean(); -j.is_number(); -j.is_object(); -j.is_array(); -j.is_string(); - -// create an object -json o; -o["foo"] = 23; -o["bar"] = false; -o["baz"] = 3.141; - -// also use emplace -o.emplace("weather", "sunny"); - -// special iterator member functions for objects -for (json::iterator it = o.begin(); it != o.end(); ++it) { - std::cout << it.key() << " : " << it.value() << "\n"; -} - -// the same code as range for -for (auto& el : o.items()) { - std::cout << el.key() << " : " << el.value() << "\n"; -} - -// even easier with structured bindings (C++17) -for (auto& [key, value] : o.items()) { - std::cout << key << " : " << value << "\n"; -} - -// find an entry -if (o.contains("foo")) { - // there is an entry with key "foo" -} - -// or via find and an iterator -if (o.find("foo") != o.end()) { - // there is an entry with key "foo" -} - -// or simpler using count() -int foo_present = o.count("foo"); // 1 -int fob_present = o.count("fob"); // 0 - -// delete an entry -o.erase("foo"); -``` - - -### Conversion from STL containers - -Any sequence container (`std::array`, `std::vector`, `std::deque`, `std::forward_list`, `std::list`) whose values can be used to construct JSON values (e.g., integers, floating point numbers, Booleans, string types, or again STL containers described in this section) can be used to create a JSON array. The same holds for similar associative containers (`std::set`, `std::multiset`, `std::unordered_set`, `std::unordered_multiset`), but in these cases the order of the elements of the array depends on how the elements are ordered in the respective STL container. - -```cpp -std::vector c_vector {1, 2, 3, 4}; -json j_vec(c_vector); -// [1, 2, 3, 4] - -std::deque c_deque {1.2, 2.3, 3.4, 5.6}; -json j_deque(c_deque); -// [1.2, 2.3, 3.4, 5.6] - -std::list c_list {true, true, false, true}; -json j_list(c_list); -// [true, true, false, true] - -std::forward_list c_flist {12345678909876, 23456789098765, 34567890987654, 45678909876543}; -json j_flist(c_flist); -// [12345678909876, 23456789098765, 34567890987654, 45678909876543] - -std::array c_array {{1, 2, 3, 4}}; -json j_array(c_array); -// [1, 2, 3, 4] - -std::set c_set {"one", "two", "three", "four", "one"}; -json j_set(c_set); // only one entry for "one" is used -// ["four", "one", "three", "two"] - -std::unordered_set c_uset {"one", "two", "three", "four", "one"}; -json j_uset(c_uset); // only one entry for "one" is used -// maybe ["two", "three", "four", "one"] - -std::multiset c_mset {"one", "two", "one", "four"}; -json j_mset(c_mset); // both entries for "one" are used -// maybe ["one", "two", "one", "four"] - -std::unordered_multiset c_umset {"one", "two", "one", "four"}; -json j_umset(c_umset); // both entries for "one" are used -// maybe ["one", "two", "one", "four"] -``` - -Likewise, any associative key-value containers (`std::map`, `std::multimap`, `std::unordered_map`, `std::unordered_multimap`) whose keys can construct an `std::string` and whose values can be used to construct JSON values (see examples above) can be used to create a JSON object. Note that in case of multimaps only one key is used in the JSON object and the value depends on the internal order of the STL container. - -```cpp -std::map c_map { {"one", 1}, {"two", 2}, {"three", 3} }; -json j_map(c_map); -// {"one": 1, "three": 3, "two": 2 } - -std::unordered_map c_umap { {"one", 1.2}, {"two", 2.3}, {"three", 3.4} }; -json j_umap(c_umap); -// {"one": 1.2, "two": 2.3, "three": 3.4} - -std::multimap c_mmap { {"one", true}, {"two", true}, {"three", false}, {"three", true} }; -json j_mmap(c_mmap); // only one entry for key "three" is used -// maybe {"one": true, "two": true, "three": true} - -std::unordered_multimap c_ummap { {"one", true}, {"two", true}, {"three", false}, {"three", true} }; -json j_ummap(c_ummap); // only one entry for key "three" is used -// maybe {"one": true, "two": true, "three": true} -``` - -### JSON Pointer and JSON Patch - -The library supports **JSON Pointer** ([RFC 6901](https://tools.ietf.org/html/rfc6901)) as alternative means to address structured values. On top of this, **JSON Patch** ([RFC 6902](https://tools.ietf.org/html/rfc6902)) allows to describe differences between two JSON values - effectively allowing patch and diff operations known from Unix. - -```cpp -// a JSON value -json j_original = R"({ - "baz": ["one", "two", "three"], - "foo": "bar" -})"_json; - -// access members with a JSON pointer (RFC 6901) -j_original["/baz/1"_json_pointer]; -// "two" - -// a JSON patch (RFC 6902) -json j_patch = R"([ - { "op": "replace", "path": "/baz", "value": "boo" }, - { "op": "add", "path": "/hello", "value": ["world"] }, - { "op": "remove", "path": "/foo"} -])"_json; - -// apply the patch -json j_result = j_original.patch(j_patch); -// { -// "baz": "boo", -// "hello": ["world"] -// } - -// calculate a JSON patch from two JSON values -json::diff(j_result, j_original); -// [ -// { "op":" replace", "path": "/baz", "value": ["one", "two", "three"] }, -// { "op": "remove","path": "/hello" }, -// { "op": "add", "path": "/foo", "value": "bar" } -// ] -``` - -### JSON Merge Patch - -The library supports **JSON Merge Patch** ([RFC 7386](https://tools.ietf.org/html/rfc7386)) as a patch format. Instead of using JSON Pointer (see above) to specify values to be manipulated, it describes the changes using a syntax that closely mimics the document being modified. - -```cpp -// a JSON value -json j_document = R"({ - "a": "b", - "c": { - "d": "e", - "f": "g" - } -})"_json; - -// a patch -json j_patch = R"({ - "a":"z", - "c": { - "f": null - } -})"_json; - -// apply the patch -j_document.merge_patch(j_patch); -// { -// "a": "z", -// "c": { -// "d": "e" -// } -// } -``` - -### Implicit conversions - -Supported types can be implicitly converted to JSON values. - -It is recommended to **NOT USE** implicit conversions **FROM** a JSON value. -You can find more details about this recommendation [here](https://www.github.com/nlohmann/json/issues/958). -You can switch off implicit conversions by defining `JSON_USE_IMPLICIT_CONVERSIONS` to `0` before including the `json.hpp` header. When using CMake, you can also achieve this by setting the option `JSON_ImplicitConversions` to `OFF`. - -```cpp -// strings -std::string s1 = "Hello, world!"; -json js = s1; -auto s2 = js.get(); -// NOT RECOMMENDED -std::string s3 = js; -std::string s4; -s4 = js; - -// Booleans -bool b1 = true; -json jb = b1; -auto b2 = jb.get(); -// NOT RECOMMENDED -bool b3 = jb; -bool b4; -b4 = jb; - -// numbers -int i = 42; -json jn = i; -auto f = jn.get(); -// NOT RECOMMENDED -double f2 = jb; -double f3; -f3 = jb; - -// etc. -``` - -Note that `char` types are not automatically converted to JSON strings, but to integer numbers. A conversion to a string must be specified explicitly: - -```cpp -char ch = 'A'; // ASCII value 65 -json j_default = ch; // stores integer number 65 -json j_string = std::string(1, ch); // stores string "A" -``` - -### Arbitrary types conversions - -Every type can be serialized in JSON, not just STL containers and scalar types. Usually, you would do something along those lines: - -```cpp -namespace ns { - // a simple struct to model a person - struct person { - std::string name; - std::string address; - int age; - }; -} - -ns::person p = {"Ned Flanders", "744 Evergreen Terrace", 60}; - -// convert to JSON: copy each value into the JSON object -json j; -j["name"] = p.name; -j["address"] = p.address; -j["age"] = p.age; - -// ... - -// convert from JSON: copy each value from the JSON object -ns::person p { - j["name"].get(), - j["address"].get(), - j["age"].get() -}; -``` - -It works, but that's quite a lot of boilerplate... Fortunately, there's a better way: - -```cpp -// create a person -ns::person p {"Ned Flanders", "744 Evergreen Terrace", 60}; - -// conversion: person -> json -json j = p; - -std::cout << j << std::endl; -// {"address":"744 Evergreen Terrace","age":60,"name":"Ned Flanders"} - -// conversion: json -> person -auto p2 = j.get(); - -// that's it -assert(p == p2); -``` - -#### Basic usage - -To make this work with one of your types, you only need to provide two functions: - -```cpp -using nlohmann::json; - -namespace ns { - void to_json(json& j, const person& p) { - j = json{{"name", p.name}, {"address", p.address}, {"age", p.age}}; - } - - void from_json(const json& j, person& p) { - j.at("name").get_to(p.name); - j.at("address").get_to(p.address); - j.at("age").get_to(p.age); - } -} // namespace ns -``` - -That's all! When calling the `json` constructor with your type, your custom `to_json` method will be automatically called. -Likewise, when calling `get()` or `get_to(your_type&)`, the `from_json` method will be called. - -Some important things: - -* Those methods **MUST** be in your type's namespace (which can be the global namespace), or the library will not be able to locate them (in this example, they are in namespace `ns`, where `person` is defined). -* Those methods **MUST** be available (e.g., proper headers must be included) everywhere you use these conversions. Look at [issue 1108](https://github.com/nlohmann/json/issues/1108) for errors that may occur otherwise. -* When using `get()`, `your_type` **MUST** be [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible). (There is a way to bypass this requirement described later.) -* In function `from_json`, use function [`at()`](https://nlohmann.github.io/json/api/basic_json/at/) to access the object values rather than `operator[]`. In case a key does not exist, `at` throws an exception that you can handle, whereas `operator[]` exhibits undefined behavior. -* You do not need to add serializers or deserializers for STL types like `std::vector`: the library already implements these. - -#### Simplify your life with macros - -If you just want to serialize/deserialize some structs, the `to_json`/`from_json` functions can be a lot of boilerplate. - -There are two macros to make your life easier as long as you (1) want to use a JSON object as serialization and (2) want to use the member variable names as object keys in that object: - -- `NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(name, member1, member2, ...)` is to be defined inside of the namespace of the class/struct to create code for. -- `NLOHMANN_DEFINE_TYPE_INTRUSIVE(name, member1, member2, ...)` is to be defined inside of the class/struct to create code for. This macro can also access private members. - -In both macros, the first parameter is the name of the class/struct, and all remaining parameters name the members. - -##### Examples - -The `to_json`/`from_json` functions for the `person` struct above can be created with: - -```cpp -namespace ns { - NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(person, name, address, age) -} -``` - -Here is an example with private members, where `NLOHMANN_DEFINE_TYPE_INTRUSIVE` is needed: - -```cpp -namespace ns { - class address { - private: - std::string street; - int housenumber; - int postcode; - - public: - NLOHMANN_DEFINE_TYPE_INTRUSIVE(address, street, housenumber, postcode) - }; -} -``` - -#### How do I convert third-party types? - -This requires a bit more advanced technique. But first, let's see how this conversion mechanism works: - -The library uses **JSON Serializers** to convert types to json. -The default serializer for `nlohmann::json` is `nlohmann::adl_serializer` (ADL means [Argument-Dependent Lookup](https://en.cppreference.com/w/cpp/language/adl)). - -It is implemented like this (simplified): - -```cpp -template -struct adl_serializer { - static void to_json(json& j, const T& value) { - // calls the "to_json" method in T's namespace - } - - static void from_json(const json& j, T& value) { - // same thing, but with the "from_json" method - } -}; -``` - -This serializer works fine when you have control over the type's namespace. However, what about `boost::optional` or `std::filesystem::path` (C++17)? Hijacking the `boost` namespace is pretty bad, and it's illegal to add something other than template specializations to `std`... - -To solve this, you need to add a specialization of `adl_serializer` to the `nlohmann` namespace, here's an example: - -```cpp -// partial specialization (full specialization works too) -namespace nlohmann { - template - struct adl_serializer> { - static void to_json(json& j, const boost::optional& opt) { - if (opt == boost::none) { - j = nullptr; - } else { - j = *opt; // this will call adl_serializer::to_json which will - // find the free function to_json in T's namespace! - } - } - - static void from_json(const json& j, boost::optional& opt) { - if (j.is_null()) { - opt = boost::none; - } else { - opt = j.get(); // same as above, but with - // adl_serializer::from_json - } - } - }; -} -``` - -#### How can I use `get()` for non-default constructible/non-copyable types? - -There is a way, if your type is [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible). You will need to specialize the `adl_serializer` as well, but with a special `from_json` overload: - -```cpp -struct move_only_type { - move_only_type() = delete; - move_only_type(int ii): i(ii) {} - move_only_type(const move_only_type&) = delete; - move_only_type(move_only_type&&) = default; - - int i; -}; - -namespace nlohmann { - template <> - struct adl_serializer { - // note: the return type is no longer 'void', and the method only takes - // one argument - static move_only_type from_json(const json& j) { - return {j.get()}; - } - - // Here's the catch! You must provide a to_json method! Otherwise you - // will not be able to convert move_only_type to json, since you fully - // specialized adl_serializer on that type - static void to_json(json& j, move_only_type t) { - j = t.i; - } - }; -} -``` - -#### Can I write my own serializer? (Advanced use) - -Yes. You might want to take a look at [`unit-udt.cpp`](https://github.com/nlohmann/json/blob/develop/test/src/unit-udt.cpp) in the test suite, to see a few examples. - -If you write your own serializer, you'll need to do a few things: - -- use a different `basic_json` alias than `nlohmann::json` (the last template parameter of `basic_json` is the `JSONSerializer`) -- use your `basic_json` alias (or a template parameter) in all your `to_json`/`from_json` methods -- use `nlohmann::to_json` and `nlohmann::from_json` when you need ADL - -Here is an example, without simplifications, that only accepts types with a size <= 32, and uses ADL. - -```cpp -// You should use void as a second template argument -// if you don't need compile-time checks on T -template::type> -struct less_than_32_serializer { - template - static void to_json(BasicJsonType& j, T value) { - // we want to use ADL, and call the correct to_json overload - using nlohmann::to_json; // this method is called by adl_serializer, - // this is where the magic happens - to_json(j, value); - } - - template - static void from_json(const BasicJsonType& j, T& value) { - // same thing here - using nlohmann::from_json; - from_json(j, value); - } -}; -``` - -Be **very** careful when reimplementing your serializer, you can stack overflow if you don't pay attention: - -```cpp -template -struct bad_serializer -{ - template - static void to_json(BasicJsonType& j, const T& value) { - // this calls BasicJsonType::json_serializer::to_json(j, value); - // if BasicJsonType::json_serializer == bad_serializer ... oops! - j = value; - } - - template - static void to_json(const BasicJsonType& j, T& value) { - // this calls BasicJsonType::json_serializer::from_json(j, value); - // if BasicJsonType::json_serializer == bad_serializer ... oops! - value = j.template get(); // oops! - } -}; -``` - -### Specializing enum conversion - -By default, enum values are serialized to JSON as integers. In some cases this could result in undesired behavior. If an enum is modified or re-ordered after data has been serialized to JSON, the later de-serialized JSON data may be undefined or a different enum value than was originally intended. - -It is possible to more precisely specify how a given enum is mapped to and from JSON as shown below: - -```cpp -// example enum type declaration -enum TaskState { - TS_STOPPED, - TS_RUNNING, - TS_COMPLETED, - TS_INVALID=-1, -}; - -// map TaskState values to JSON as strings -NLOHMANN_JSON_SERIALIZE_ENUM( TaskState, { - {TS_INVALID, nullptr}, - {TS_STOPPED, "stopped"}, - {TS_RUNNING, "running"}, - {TS_COMPLETED, "completed"}, -}) -``` - -The `NLOHMANN_JSON_SERIALIZE_ENUM()` macro declares a set of `to_json()` / `from_json()` functions for type `TaskState` while avoiding repetition and boilerplate serialization code. - -**Usage:** - -```cpp -// enum to JSON as string -json j = TS_STOPPED; -assert(j == "stopped"); - -// json string to enum -json j3 = "running"; -assert(j3.get() == TS_RUNNING); - -// undefined json value to enum (where the first map entry above is the default) -json jPi = 3.14; -assert(jPi.get() == TS_INVALID ); -``` - -Just as in [Arbitrary Type Conversions](#arbitrary-types-conversions) above, -- `NLOHMANN_JSON_SERIALIZE_ENUM()` MUST be declared in your enum type's namespace (which can be the global namespace), or the library will not be able to locate it and it will default to integer serialization. -- It MUST be available (e.g., proper headers must be included) everywhere you use the conversions. - -Other Important points: -- When using `get()`, undefined JSON values will default to the first pair specified in your map. Select this default pair carefully. -- If an enum or JSON value is specified more than once in your map, the first matching occurrence from the top of the map will be returned when converting to or from JSON. - -### Binary formats (BSON, CBOR, MessagePack, and UBJSON) - -Though JSON is a ubiquitous data format, it is not a very compact format suitable for data exchange, for instance over a network. Hence, the library supports [BSON](http://bsonspec.org) (Binary JSON), [CBOR](https://cbor.io) (Concise Binary Object Representation), [MessagePack](https://msgpack.org), and [UBJSON](http://ubjson.org) (Universal Binary JSON Specification) to efficiently encode JSON values to byte vectors and to decode such vectors. - -```cpp -// create a JSON value -json j = R"({"compact": true, "schema": 0})"_json; - -// serialize to BSON -std::vector v_bson = json::to_bson(j); - -// 0x1B, 0x00, 0x00, 0x00, 0x08, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x00, 0x01, 0x10, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 - -// roundtrip -json j_from_bson = json::from_bson(v_bson); - -// serialize to CBOR -std::vector v_cbor = json::to_cbor(j); - -// 0xA2, 0x67, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0xF5, 0x66, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x00 - -// roundtrip -json j_from_cbor = json::from_cbor(v_cbor); - -// serialize to MessagePack -std::vector v_msgpack = json::to_msgpack(j); - -// 0x82, 0xA7, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0xC3, 0xA6, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x00 - -// roundtrip -json j_from_msgpack = json::from_msgpack(v_msgpack); - -// serialize to UBJSON -std::vector v_ubjson = json::to_ubjson(j); - -// 0x7B, 0x69, 0x07, 0x63, 0x6F, 0x6D, 0x70, 0x61, 0x63, 0x74, 0x54, 0x69, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6D, 0x61, 0x69, 0x00, 0x7D - -// roundtrip -json j_from_ubjson = json::from_ubjson(v_ubjson); -``` - -The library also supports binary types from BSON, CBOR (byte strings), and MessagePack (bin, ext, fixext). They are stored by default as `std::vector` to be processed outside of the library. - -```cpp -// CBOR byte string with payload 0xCAFE -std::vector v = {0x42, 0xCA, 0xFE}; - -// read value -json j = json::from_cbor(v); - -// the JSON value has type binary -j.is_binary(); // true - -// get reference to stored binary value -auto& binary = j.get_binary(); - -// the binary value has no subtype (CBOR has no binary subtypes) -binary.has_subtype(); // false - -// access std::vector member functions -binary.size(); // 2 -binary[0]; // 0xCA -binary[1]; // 0xFE - -// set subtype to 0x10 -binary.set_subtype(0x10); - -// serialize to MessagePack -auto cbor = json::to_msgpack(j); // 0xD5 (fixext2), 0x10, 0xCA, 0xFE -``` - - -## Supported compilers - -Though it's 2021 already, the support for C++11 is still a bit sparse. Currently, the following compilers are known to work: - -- GCC 4.8 - 11.0 (and possibly later) -- Clang 3.4 - 11.0 (and possibly later) -- Apple Clang 9.1 - 12.3 (and possibly later) -- Intel C++ Compiler 17.0.2 (and possibly later) -- Microsoft Visual C++ 2015 / Build Tools 14.0.25123.0 (and possibly later) -- Microsoft Visual C++ 2017 / Build Tools 15.5.180.51428 (and possibly later) -- Microsoft Visual C++ 2019 / Build Tools 16.3.1+1def00d3d (and possibly later) - -I would be happy to learn about other compilers/versions. - -Please note: - -- GCC 4.8 has a bug [57824](https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57824)): multiline raw strings cannot be the arguments to macros. Don't use multiline raw strings directly in macros with this compiler. -- Android defaults to using very old compilers and C++ libraries. To fix this, add the following to your `Application.mk`. This will switch to the LLVM C++ library, the Clang compiler, and enable C++11 and other features disabled by default. - - ``` - APP_STL := c++_shared - NDK_TOOLCHAIN_VERSION := clang3.6 - APP_CPPFLAGS += -frtti -fexceptions - ``` - - The code compiles successfully with [Android NDK](https://developer.android.com/ndk/index.html?hl=ml), Revision 9 - 11 (and possibly later) and [CrystaX's Android NDK](https://www.crystax.net/en/android/ndk) version 10. - -- For GCC running on MinGW or Android SDK, the error `'to_string' is not a member of 'std'` (or similarly, for `strtod` or `strtof`) may occur. Note this is not an issue with the code, but rather with the compiler itself. On Android, see above to build with a newer environment. For MinGW, please refer to [this site](https://tehsausage.com/mingw-to-string) and [this discussion](https://github.com/nlohmann/json/issues/136) for information on how to fix this bug. For Android NDK using `APP_STL := gnustl_static`, please refer to [this discussion](https://github.com/nlohmann/json/issues/219). - -- Unsupported versions of GCC and Clang are rejected by `#error` directives. This can be switched off by defining `JSON_SKIP_UNSUPPORTED_COMPILER_CHECK`. Note that you can expect no support in this case. - -The following compilers are currently used in continuous integration at [Travis](https://travis-ci.org/nlohmann/json), [AppVeyor](https://ci.appveyor.com/project/nlohmann/json), [GitHub Actions](https://github.com/nlohmann/json/actions), and [CircleCI](https://circleci.com/gh/nlohmann/json): - -| Compiler | Operating System | CI Provider | -|-------------------------------------------------------------------|--------------------|----------------| -| Apple Clang 10.0.1 (clang-1001.0.46.4); Xcode 10.2.1 | macOS 10.14.4 | Travis | -| Apple Clang 11.0.0 (clang-1100.0.33.12); Xcode 11.2.1 | macOS 10.14.6 | Travis | -| Apple Clang 11.0.3 (clang-1103.0.32.59); Xcode 11.4.1 | macOS 10.15.4 | GitHub Actions | -| Apple Clang 12.0.0 (clang-1200.0.22.7); Xcode 11.4.1 | macOS 10.15.5 | Travis | -| Clang 3.5.0 (3.5.0-4ubuntu2\~trusty2) | Ubuntu 14.04.5 LTS | Travis | -| Clang 3.6.2 (3.6.2-svn240577-1\~exp1) | Ubuntu 14.04.5 LTS | Travis | -| Clang 3.7.1 (3.7.1-svn253571-1\~exp1) | Ubuntu 14.04.5 LTS | Travis | -| Clang 3.8.0 (3.8.0-2ubuntu3\~trusty5) | Ubuntu 14.04.5 LTS | Travis | -| Clang 3.9.1 (3.9.1-4ubuntu3\~14.04.3) | Ubuntu 14.04.5 LTS | Travis | -| Clang 4.0.1 (4.0.1-svn305264-1\~exp1) | Ubuntu 14.04.5 LTS | Travis | -| Clang 5.0.2 (version 5.0.2-svn328729-1\~exp1\~20180509123505.100) | Ubuntu 14.04.5 LTS | Travis | -| Clang 6.0.1 (6.0.1-svn334776-1\~exp1\~20190309042707.121) | Ubuntu 14.04.5 LTS | Travis | -| Clang 7.1.0 (7.1.0-svn353565-1\~exp1\~20190419134007.64) | Ubuntu 14.04.5 LTS | Travis | -| Clang 7.5.0 (Ubuntu 7.5.0-3ubuntu1\~18.04) | Ubuntu 18.04.4 LTS | Travis | -| Clang 9.0.0 (x86_64-pc-windows-msvc) | Windows-10.0.17763 | GitHub Actions | -| Clang 10.0.0 (x86_64-pc-windows-msvc) | Windows-10.0.17763 | GitHub Actions | -| GCC 4.8.5 (Ubuntu 4.8.5-4ubuntu8\~14.04.2) | Ubuntu 14.04.5 LTS | Travis | -| GCC 4.9.4 (Ubuntu 4.9.4-2ubuntu1\~14.04.1) | Ubuntu 14.04.5 LTS | Travis | -| GCC 5.5.0 (Ubuntu 5.5.0-12ubuntu1\~14.04) | Ubuntu 14.04.5 LTS | Travis | -| GCC 6.3.0 (Debian 6.3.0-18+deb9u1) | Debian 9 | Circle CI | -| GCC 6.5.0 (Ubuntu 6.5.0-2ubuntu1\~14.04.1) | Ubuntu 14.04.5 LTS | Travis | -| GCC 7.3.0 (x86_64-posix-seh-rev0, Built by MinGW-W64 project) | Windows-6.3.9600 | AppVeyor | -| GCC 7.5.0 (Ubuntu 7.5.0-3ubuntu1\~14.04.1) | Ubuntu 14.04.5 LTS | Travis | -| GCC 7.5.0 (Ubuntu 7.5.0-3ubuntu1\~18.04) | Ubuntu 18.04.4 LTS | GitHub Actions | -| GCC 8.4.0 (Ubuntu 8.4.0-1ubuntu1\~14.04) | Ubuntu 14.04.5 LTS | Travis | -| GCC 9.3.0 (Ubuntu 9.3.0-11ubuntu0\~14.04) | Ubuntu 14.04.5 LTS | Travis | -| GCC 10.1.0 (Arch Linux latest) | Arch Linux | Circle CI | -| MSVC 19.0.24241.7 (Build Engine version 14.0.25420.1) | Windows-6.3.9600 | AppVeyor | -| MSVC 19.16.27035.0 (15.9.21+g9802d43bc3 for .NET Framework) | Windows-10.0.14393 | AppVeyor | -| MSVC 19.25.28614.0 (Build Engine version 16.5.0+d4cbfca49 for .NET Framework) | Windows-10.0.17763 | AppVeyor | -| MSVC 19.25.28614.0 (Build Engine version 16.5.0+d4cbfca49 for .NET Framework) | Windows-10.0.17763 | GitHub Actions | -| MSVC 19.25.28614.0 (Build Engine version 16.5.0+d4cbfca49 for .NET Framework) with ClangCL 10.0.0 | Windows-10.0.17763 | GitHub Actions | - -## License - - - -The class is licensed under the [MIT License](http://opensource.org/licenses/MIT): - -Copyright © 2013-2021 [Niels Lohmann](https://nlohmann.me) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -* * * - -The class contains the UTF-8 Decoder from Bjoern Hoehrmann which is licensed under the [MIT License](http://opensource.org/licenses/MIT) (see above). Copyright © 2008-2009 [Björn Hoehrmann](https://bjoern.hoehrmann.de/) - -The class contains a slightly modified version of the Grisu2 algorithm from Florian Loitsch which is licensed under the [MIT License](http://opensource.org/licenses/MIT) (see above). Copyright © 2009 [Florian Loitsch](https://florian.loitsch.com/) - -The class contains a copy of [Hedley](https://nemequ.github.io/hedley/) from Evan Nemerson which is licensed as [CC0-1.0](https://creativecommons.org/publicdomain/zero/1.0/). - -## Contact - -If you have questions regarding the library, I would like to invite you to [open an issue at GitHub](https://github.com/nlohmann/json/issues/new/choose). Please describe your request, problem, or question as detailed as possible, and also mention the version of the library you are using as well as the version of your compiler and operating system. Opening an issue at GitHub allows other users and contributors to this library to collaborate. For instance, I have little experience with MSVC, and most issues in this regard have been solved by a growing community. If you have a look at the [closed issues](https://github.com/nlohmann/json/issues?q=is%3Aissue+is%3Aclosed), you will see that we react quite timely in most cases. - -Only if your request would contain confidential information, please [send me an email](mailto:mail@nlohmann.me). For encrypted messages, please use [this key](https://keybase.io/nlohmann/pgp_keys.asc). - -## Security - -[Commits by Niels Lohmann](https://github.com/nlohmann/json/commits) and [releases](https://github.com/nlohmann/json/releases) are signed with this [PGP Key](https://keybase.io/nlohmann/pgp_keys.asc?fingerprint=797167ae41c0a6d9232e48457f3cea63ae251b69). - -## Thanks - -I deeply appreciate the help of the following people. - - - -- [Teemperor](https://github.com/Teemperor) implemented CMake support and lcov integration, realized escape and Unicode handling in the string parser, and fixed the JSON serialization. -- [elliotgoodrich](https://github.com/elliotgoodrich) fixed an issue with double deletion in the iterator classes. -- [kirkshoop](https://github.com/kirkshoop) made the iterators of the class composable to other libraries. -- [wancw](https://github.com/wanwc) fixed a bug that hindered the class to compile with Clang. -- Tomas Åblad found a bug in the iterator implementation. -- [Joshua C. Randall](https://github.com/jrandall) fixed a bug in the floating-point serialization. -- [Aaron Burghardt](https://github.com/aburgh) implemented code to parse streams incrementally. Furthermore, he greatly improved the parser class by allowing the definition of a filter function to discard undesired elements while parsing. -- [Daniel Kopeček](https://github.com/dkopecek) fixed a bug in the compilation with GCC 5.0. -- [Florian Weber](https://github.com/Florianjw) fixed a bug in and improved the performance of the comparison operators. -- [Eric Cornelius](https://github.com/EricMCornelius) pointed out a bug in the handling with NaN and infinity values. He also improved the performance of the string escaping. -- [易思龙](https://github.com/likebeta) implemented a conversion from anonymous enums. -- [kepkin](https://github.com/kepkin) patiently pushed forward the support for Microsoft Visual studio. -- [gregmarr](https://github.com/gregmarr) simplified the implementation of reverse iterators and helped with numerous hints and improvements. In particular, he pushed forward the implementation of user-defined types. -- [Caio Luppi](https://github.com/caiovlp) fixed a bug in the Unicode handling. -- [dariomt](https://github.com/dariomt) fixed some typos in the examples. -- [Daniel Frey](https://github.com/d-frey) cleaned up some pointers and implemented exception-safe memory allocation. -- [Colin Hirsch](https://github.com/ColinH) took care of a small namespace issue. -- [Huu Nguyen](https://github.com/whoshuu) correct a variable name in the documentation. -- [Silverweed](https://github.com/silverweed) overloaded `parse()` to accept an rvalue reference. -- [dariomt](https://github.com/dariomt) fixed a subtlety in MSVC type support and implemented the `get_ref()` function to get a reference to stored values. -- [ZahlGraf](https://github.com/ZahlGraf) added a workaround that allows compilation using Android NDK. -- [whackashoe](https://github.com/whackashoe) replaced a function that was marked as unsafe by Visual Studio. -- [406345](https://github.com/406345) fixed two small warnings. -- [Glen Fernandes](https://github.com/glenfe) noted a potential portability problem in the `has_mapped_type` function. -- [Corbin Hughes](https://github.com/nibroc) fixed some typos in the contribution guidelines. -- [twelsby](https://github.com/twelsby) fixed the array subscript operator, an issue that failed the MSVC build, and floating-point parsing/dumping. He further added support for unsigned integer numbers and implemented better roundtrip support for parsed numbers. -- [Volker Diels-Grabsch](https://github.com/vog) fixed a link in the README file. -- [msm-](https://github.com/msm-) added support for American Fuzzy Lop. -- [Annihil](https://github.com/Annihil) fixed an example in the README file. -- [Themercee](https://github.com/Themercee) noted a wrong URL in the README file. -- [Lv Zheng](https://github.com/lv-zheng) fixed a namespace issue with `int64_t` and `uint64_t`. -- [abc100m](https://github.com/abc100m) analyzed the issues with GCC 4.8 and proposed a [partial solution](https://github.com/nlohmann/json/pull/212). -- [zewt](https://github.com/zewt) added useful notes to the README file about Android. -- [Róbert Márki](https://github.com/robertmrk) added a fix to use move iterators and improved the integration via CMake. -- [Chris Kitching](https://github.com/ChrisKitching) cleaned up the CMake files. -- [Tom Needham](https://github.com/06needhamt) fixed a subtle bug with MSVC 2015 which was also proposed by [Michael K.](https://github.com/Epidal). -- [Mário Feroldi](https://github.com/thelostt) fixed a small typo. -- [duncanwerner](https://github.com/duncanwerner) found a really embarrassing performance regression in the 2.0.0 release. -- [Damien](https://github.com/dtoma) fixed one of the last conversion warnings. -- [Thomas Braun](https://github.com/t-b) fixed a warning in a test case and adjusted MSVC calls in the CI. -- [Théo DELRIEU](https://github.com/theodelrieu) patiently and constructively oversaw the long way toward [iterator-range parsing](https://github.com/nlohmann/json/issues/290). He also implemented the magic behind the serialization/deserialization of user-defined types and split the single header file into smaller chunks. -- [Stefan](https://github.com/5tefan) fixed a minor issue in the documentation. -- [Vasil Dimov](https://github.com/vasild) fixed the documentation regarding conversions from `std::multiset`. -- [ChristophJud](https://github.com/ChristophJud) overworked the CMake files to ease project inclusion. -- [Vladimir Petrigo](https://github.com/vpetrigo) made a SFINAE hack more readable and added Visual Studio 17 to the build matrix. -- [Denis Andrejew](https://github.com/seeekr) fixed a grammar issue in the README file. -- [Pierre-Antoine Lacaze](https://github.com/palacaze) found a subtle bug in the `dump()` function. -- [TurpentineDistillery](https://github.com/TurpentineDistillery) pointed to [`std::locale::classic()`](https://en.cppreference.com/w/cpp/locale/locale/classic) to avoid too much locale joggling, found some nice performance improvements in the parser, improved the benchmarking code, and realized locale-independent number parsing and printing. -- [cgzones](https://github.com/cgzones) had an idea how to fix the Coverity scan. -- [Jared Grubb](https://github.com/jaredgrubb) silenced a nasty documentation warning. -- [Yixin Zhang](https://github.com/qwename) fixed an integer overflow check. -- [Bosswestfalen](https://github.com/Bosswestfalen) merged two iterator classes into a smaller one. -- [Daniel599](https://github.com/Daniel599) helped to get Travis execute the tests with Clang's sanitizers. -- [Jonathan Lee](https://github.com/vjon) fixed an example in the README file. -- [gnzlbg](https://github.com/gnzlbg) supported the implementation of user-defined types. -- [Alexej Harm](https://github.com/qis) helped to get the user-defined types working with Visual Studio. -- [Jared Grubb](https://github.com/jaredgrubb) supported the implementation of user-defined types. -- [EnricoBilla](https://github.com/EnricoBilla) noted a typo in an example. -- [Martin Hořeňovský](https://github.com/horenmar) found a way for a 2x speedup for the compilation time of the test suite. -- [ukhegg](https://github.com/ukhegg) found proposed an improvement for the examples section. -- [rswanson-ihi](https://github.com/rswanson-ihi) noted a typo in the README. -- [Mihai Stan](https://github.com/stanmihai4) fixed a bug in the comparison with `nullptr`s. -- [Tushar Maheshwari](https://github.com/tusharpm) added [cotire](https://github.com/sakra/cotire) support to speed up the compilation. -- [TedLyngmo](https://github.com/TedLyngmo) noted a typo in the README, removed unnecessary bit arithmetic, and fixed some `-Weffc++` warnings. -- [Krzysztof Woś](https://github.com/krzysztofwos) made exceptions more visible. -- [ftillier](https://github.com/ftillier) fixed a compiler warning. -- [tinloaf](https://github.com/tinloaf) made sure all pushed warnings are properly popped. -- [Fytch](https://github.com/Fytch) found a bug in the documentation. -- [Jay Sistar](https://github.com/Type1J) implemented a Meson build description. -- [Henry Lee](https://github.com/HenryRLee) fixed a warning in ICC and improved the iterator implementation. -- [Vincent Thiery](https://github.com/vthiery) maintains a package for the Conan package manager. -- [Steffen](https://github.com/koemeet) fixed a potential issue with MSVC and `std::min`. -- [Mike Tzou](https://github.com/Chocobo1) fixed some typos. -- [amrcode](https://github.com/amrcode) noted a misleading documentation about comparison of floats. -- [Oleg Endo](https://github.com/olegendo) reduced the memory consumption by replacing `` with ``. -- [dan-42](https://github.com/dan-42) cleaned up the CMake files to simplify including/reusing of the library. -- [Nikita Ofitserov](https://github.com/himikof) allowed for moving values from initializer lists. -- [Greg Hurrell](https://github.com/wincent) fixed a typo. -- [Dmitry Kukovinets](https://github.com/DmitryKuk) fixed a typo. -- [kbthomp1](https://github.com/kbthomp1) fixed an issue related to the Intel OSX compiler. -- [Markus Werle](https://github.com/daixtrose) fixed a typo. -- [WebProdPP](https://github.com/WebProdPP) fixed a subtle error in a precondition check. -- [Alex](https://github.com/leha-bot) noted an error in a code sample. -- [Tom de Geus](https://github.com/tdegeus) reported some warnings with ICC and helped fixing them. -- [Perry Kundert](https://github.com/pjkundert) simplified reading from input streams. -- [Sonu Lohani](https://github.com/sonulohani) fixed a small compilation error. -- [Jamie Seward](https://github.com/jseward) fixed all MSVC warnings. -- [Nate Vargas](https://github.com/eld00d) added a Doxygen tag file. -- [pvleuven](https://github.com/pvleuven) helped fixing a warning in ICC. -- [Pavel](https://github.com/crea7or) helped fixing some warnings in MSVC. -- [Jamie Seward](https://github.com/jseward) avoided unnecessary string copies in `find()` and `count()`. -- [Mitja](https://github.com/Itja) fixed some typos. -- [Jorrit Wronski](https://github.com/jowr) updated the Hunter package links. -- [Matthias Möller](https://github.com/TinyTinni) added a `.natvis` for the MSVC debug view. -- [bogemic](https://github.com/bogemic) fixed some C++17 deprecation warnings. -- [Eren Okka](https://github.com/erengy) fixed some MSVC warnings. -- [abolz](https://github.com/abolz) integrated the Grisu2 algorithm for proper floating-point formatting, allowing more roundtrip checks to succeed. -- [Vadim Evard](https://github.com/Pipeliner) fixed a Markdown issue in the README. -- [zerodefect](https://github.com/zerodefect) fixed a compiler warning. -- [Kert](https://github.com/kaidokert) allowed to template the string type in the serialization and added the possibility to override the exceptional behavior. -- [mark-99](https://github.com/mark-99) helped fixing an ICC error. -- [Patrik Huber](https://github.com/patrikhuber) fixed links in the README file. -- [johnfb](https://github.com/johnfb) found a bug in the implementation of CBOR's indefinite length strings. -- [Paul Fultz II](https://github.com/pfultz2) added a note on the cget package manager. -- [Wilson Lin](https://github.com/wla80) made the integration section of the README more concise. -- [RalfBielig](https://github.com/ralfbielig) detected and fixed a memory leak in the parser callback. -- [agrianius](https://github.com/agrianius) allowed to dump JSON to an alternative string type. -- [Kevin Tonon](https://github.com/ktonon) overworked the C++11 compiler checks in CMake. -- [Axel Huebl](https://github.com/ax3l) simplified a CMake check and added support for the [Spack package manager](https://spack.io). -- [Carlos O'Ryan](https://github.com/coryan) fixed a typo. -- [James Upjohn](https://github.com/jammehcow) fixed a version number in the compilers section. -- [Chuck Atkins](https://github.com/chuckatkins) adjusted the CMake files to the CMake packaging guidelines and provided documentation for the CMake integration. -- [Jan Schöppach](https://github.com/dns13) fixed a typo. -- [martin-mfg](https://github.com/martin-mfg) fixed a typo. -- [Matthias Möller](https://github.com/TinyTinni) removed the dependency from `std::stringstream`. -- [agrianius](https://github.com/agrianius) added code to use alternative string implementations. -- [Daniel599](https://github.com/Daniel599) allowed to use more algorithms with the `items()` function. -- [Julius Rakow](https://github.com/jrakow) fixed the Meson include directory and fixed the links to [cppreference.com](cppreference.com). -- [Sonu Lohani](https://github.com/sonulohani) fixed the compilation with MSVC 2015 in debug mode. -- [grembo](https://github.com/grembo) fixed the test suite and re-enabled several test cases. -- [Hyeon Kim](https://github.com/simnalamburt) introduced the macro `JSON_INTERNAL_CATCH` to control the exception handling inside the library. -- [thyu](https://github.com/thyu) fixed a compiler warning. -- [David Guthrie](https://github.com/LEgregius) fixed a subtle compilation error with Clang 3.4.2. -- [Dennis Fischer](https://github.com/dennisfischer) allowed to call `find_package` without installing the library. -- [Hyeon Kim](https://github.com/simnalamburt) fixed an issue with a double macro definition. -- [Ben Berman](https://github.com/rivertam) made some error messages more understandable. -- [zakalibit](https://github.com/zakalibit) fixed a compilation problem with the Intel C++ compiler. -- [mandreyel](https://github.com/mandreyel) fixed a compilation problem. -- [Kostiantyn Ponomarenko](https://github.com/koponomarenko) added version and license information to the Meson build file. -- [Henry Schreiner](https://github.com/henryiii) added support for GCC 4.8. -- [knilch](https://github.com/knilch0r) made sure the test suite does not stall when run in the wrong directory. -- [Antonio Borondo](https://github.com/antonioborondo) fixed an MSVC 2017 warning. -- [Dan Gendreau](https://github.com/dgendreau) implemented the `NLOHMANN_JSON_SERIALIZE_ENUM` macro to quickly define a enum/JSON mapping. -- [efp](https://github.com/efp) added line and column information to parse errors. -- [julian-becker](https://github.com/julian-becker) added BSON support. -- [Pratik Chowdhury](https://github.com/pratikpc) added support for structured bindings. -- [David Avedissian](https://github.com/davedissian) added support for Clang 5.0.1 (PS4 version). -- [Jonathan Dumaresq](https://github.com/dumarjo) implemented an input adapter to read from `FILE*`. -- [kjpus](https://github.com/kjpus) fixed a link in the documentation. -- [Manvendra Singh](https://github.com/manu-chroma) fixed a typo in the documentation. -- [ziggurat29](https://github.com/ziggurat29) fixed an MSVC warning. -- [Sylvain Corlay](https://github.com/SylvainCorlay) added code to avoid an issue with MSVC. -- [mefyl](https://github.com/mefyl) fixed a bug when JSON was parsed from an input stream. -- [Millian Poquet](https://github.com/mpoquet) allowed to install the library via Meson. -- [Michael Behrns-Miller](https://github.com/moodboom) found an issue with a missing namespace. -- [Nasztanovics Ferenc](https://github.com/naszta) fixed a compilation issue with libc 2.12. -- [Andreas Schwab](https://github.com/andreas-schwab) fixed the endian conversion. -- [Mark-Dunning](https://github.com/Mark-Dunning) fixed a warning in MSVC. -- [Gareth Sylvester-Bradley](https://github.com/garethsb-sony) added `operator/` for JSON Pointers. -- [John-Mark](https://github.com/johnmarkwayve) noted a missing header. -- [Vitaly Zaitsev](https://github.com/xvitaly) fixed compilation with GCC 9.0. -- [Laurent Stacul](https://github.com/stac47) fixed compilation with GCC 9.0. -- [Ivor Wanders](https://github.com/iwanders) helped reducing the CMake requirement to version 3.1. -- [njlr](https://github.com/njlr) updated the Buckaroo instructions. -- [Lion](https://github.com/lieff) fixed a compilation issue with GCC 7 on CentOS. -- [Isaac Nickaein](https://github.com/nickaein) improved the integer serialization performance and implemented the `contains()` function. -- [past-due](https://github.com/past-due) suppressed an unfixable warning. -- [Elvis Oric](https://github.com/elvisoric) improved Meson support. -- [Matěj Plch](https://github.com/Afforix) fixed an example in the README. -- [Mark Beckwith](https://github.com/wythe) fixed a typo. -- [scinart](https://github.com/scinart) fixed bug in the serializer. -- [Patrick Boettcher](https://github.com/pboettch) implemented `push_back()` and `pop_back()` for JSON Pointers. -- [Bruno Oliveira](https://github.com/nicoddemus) added support for Conda. -- [Michele Caini](https://github.com/skypjack) fixed links in the README. -- [Hani](https://github.com/hnkb) documented how to install the library with NuGet. -- [Mark Beckwith](https://github.com/wythe) fixed a typo. -- [yann-morin-1998](https://github.com/yann-morin-1998) helped reducing the CMake requirement to version 3.1. -- [Konstantin Podsvirov](https://github.com/podsvirov) maintains a package for the MSYS2 software distro. -- [remyabel](https://github.com/remyabel) added GNUInstallDirs to the CMake files. -- [Taylor Howard](https://github.com/taylorhoward92) fixed a unit test. -- [Gabe Ron](https://github.com/Macr0Nerd) implemented the `to_string` method. -- [Watal M. Iwasaki](https://github.com/heavywatal) fixed a Clang warning. -- [Viktor Kirilov](https://github.com/onqtam) switched the unit tests from [Catch](https://github.com/philsquared/Catch) to [doctest](https://github.com/onqtam/doctest) -- [Juncheng E](https://github.com/ejcjason) fixed a typo. -- [tete17](https://github.com/tete17) fixed a bug in the `contains` function. -- [Xav83](https://github.com/Xav83) fixed some cppcheck warnings. -- [0xflotus](https://github.com/0xflotus) fixed some typos. -- [Christian Deneke](https://github.com/chris0x44) added a const version of `json_pointer::back`. -- [Julien Hamaide](https://github.com/crazyjul) made the `items()` function work with custom string types. -- [Evan Nemerson](https://github.com/nemequ) updated fixed a bug in Hedley and updated this library accordingly. -- [Florian Pigorsch](https://github.com/flopp) fixed a lot of typos. -- [Camille Bégué](https://github.com/cbegue) fixed an issue in the conversion from `std::pair` and `std::tuple` to `json`. -- [Anthony VH](https://github.com/AnthonyVH) fixed a compile error in an enum deserialization. -- [Yuriy Vountesmery](https://github.com/ua-code-dragon) noted a subtle bug in a preprocessor check. -- [Chen](https://github.com/dota17) fixed numerous issues in the library. -- [Antony Kellermann](https://github.com/aokellermann) added a CI step for GCC 10.1. -- [Alex](https://github.com/gistrec) fixed an MSVC warning. -- [Rainer](https://github.com/rvjr) proposed an improvement in the floating-point serialization in CBOR. -- [Francois Chabot](https://github.com/FrancoisChabot) made performance improvements in the input adapters. -- [Arthur Sonzogni](https://github.com/ArthurSonzogni) documented how the library can be included via `FetchContent`. -- [Rimas Misevičius](https://github.com/rmisev) fixed an error message. -- [Alexander Myasnikov](https://github.com/alexandermyasnikov) fixed some examples and a link in the README. -- [Hubert Chathi](https://github.com/uhoreg) made CMake's version config file architecture-independent. -- [OmnipotentEntity](https://github.com/OmnipotentEntity) implemented the binary values for CBOR, MessagePack, BSON, and UBJSON. -- [ArtemSarmini](https://github.com/ArtemSarmini) fixed a compilation issue with GCC 10 and fixed a leak. -- [Evgenii Sopov](https://github.com/sea-kg) integrated the library to the wsjcpp package manager. -- [Sergey Linev](https://github.com/linev) fixed a compiler warning. -- [Miguel Magalhães](https://github.com/magamig) fixed the year in the copyright. -- [Gareth Sylvester-Bradley](https://github.com/garethsb-sony) fixed a compilation issue with MSVC. -- [Alexander “weej” Jones](https://github.com/alex-weej) fixed an example in the README. -- [Antoine Cœur](https://github.com/Coeur) fixed some typos in the documentation. -- [jothepro](https://github.com/jothepro) updated links to the Hunter package. -- [Dave Lee](https://github.com/kastiglione) fixed link in the README. -- [Joël Lamotte](https://github.com/Klaim) added instruction for using Build2's package manager. -- [Paul Jurczak](https://github.com/pauljurczak) fixed an example in the README. -- [Sonu Lohani](https://github.com/sonulohani) fixed a warning. -- [Carlos Gomes Martinho](https://github.com/gocarlos) updated the Conan package source. -- [Konstantin Podsvirov](https://github.com/podsvirov) fixed the MSYS2 package documentation. -- [Tridacnid](https://github.com/Tridacnid) improved the CMake tests. -- [Michael](https://github.com/MBalszun) fixed MSVC warnings. -- [Quentin Barbarat](https://github.com/quentin-dev) fixed an example in the documentation. -- [XyFreak](https://github.com/XyFreak) fixed a compiler warning. -- [TotalCaesar659](https://github.com/TotalCaesar659) fixed links in the README. -- [Tanuj Garg](https://github.com/tanuj208) improved the fuzzer coverage for UBSAN input. -- [AODQ](https://github.com/AODQ) fixed a compiler warning. -- [jwittbrodt](https://github.com/jwittbrodt) made `NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE` inline. -- [pfeatherstone](https://github.com/pfeatherstone) improved the upper bound of arguments of the `NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE`/`NLOHMANN_DEFINE_TYPE_INTRUSIVE` macros. -- [Jan Procházka](https://github.com/jprochazk) fixed a bug in the CBOR parser for binary and string values. -- [T0b1-iOS](https://github.com/T0b1-iOS) fixed a bug in the new hash implementation. -- [Matthew Bauer](https://github.com/matthewbauer) adjusted the CBOR writer to create tags for binary subtypes. -- [gatopeich](https://github.com/gatopeich) implemented an ordered map container for `nlohmann::ordered_json`. -- [Érico Nogueira Rolim](https://github.com/ericonr) added support for pkg-config. -- [KonanM](https://github.com/KonanM) proposed an implementation for the `NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE`/`NLOHMANN_DEFINE_TYPE_INTRUSIVE` macros. -- [Guillaume Racicot](https://github.com/gracicot) implemented `string_view` support and allowed C++20 support. -- [Alex Reinking](https://github.com/alexreinking) improved CMake support for `FetchContent`. -- [Hannes Domani](https://github.com/ssbssa) provided a GDB pretty printer. - -Thanks a lot for helping out! Please [let me know](mailto:mail@nlohmann.me) if I forgot someone. - - -## Used third-party tools - -The library itself consists of a single header file licensed under the MIT license. However, it is built, tested, documented, and whatnot using a lot of third-party tools and services. Thanks a lot! - -- [**amalgamate.py - Amalgamate C source and header files**](https://github.com/edlund/amalgamate) to create a single header file -- [**American fuzzy lop**](https://lcamtuf.coredump.cx/afl/) for fuzz testing -- [**AppVeyor**](https://www.appveyor.com) for [continuous integration](https://ci.appveyor.com/project/nlohmann/json) on Windows -- [**Artistic Style**](http://astyle.sourceforge.net) for automatic source code indentation -- [**CircleCI**](https://circleci.com) for [continuous integration](https://circleci.com/gh/nlohmann/json). -- [**Clang**](https://clang.llvm.org) for compilation with code sanitizers -- [**CMake**](https://cmake.org) for build automation -- [**Codacity**](https://www.codacy.com) for further [code analysis](https://www.codacy.com/app/nlohmann/json) -- [**Coveralls**](https://coveralls.io) to measure [code coverage](https://coveralls.io/github/nlohmann/json) -- [**Coverity Scan**](https://scan.coverity.com) for [static analysis](https://scan.coverity.com/projects/nlohmann-json) -- [**cppcheck**](http://cppcheck.sourceforge.net) for static analysis -- [**doctest**](https://github.com/onqtam/doctest) for the unit tests -- [**Doxygen**](https://www.doxygen.nl/index.html) to generate [documentation](https://nlohmann.github.io/json/doxygen/index.html) -- [**git-update-ghpages**](https://github.com/rstacruz/git-update-ghpages) to upload the documentation to gh-pages -- [**GitHub Changelog Generator**](https://github.com/skywinder/github-changelog-generator) to generate the [ChangeLog](https://github.com/nlohmann/json/blob/develop/ChangeLog.md) -- [**Google Benchmark**](https://github.com/google/benchmark) to implement the benchmarks -- [**Hedley**](https://nemequ.github.io/hedley/) to avoid re-inventing several compiler-agnostic feature macros -- [**lcov**](http://ltp.sourceforge.net/coverage/lcov.php) to process coverage information and create a HTML view -- [**libFuzzer**](https://llvm.org/docs/LibFuzzer.html) to implement fuzz testing for OSS-Fuzz -- [**OSS-Fuzz**](https://github.com/google/oss-fuzz) for continuous fuzz testing of the library ([project repository](https://github.com/google/oss-fuzz/tree/master/projects/json)) -- [**Probot**](https://probot.github.io) for automating maintainer tasks such as closing stale issues, requesting missing information, or detecting toxic comments. -- [**send_to_wandbox**](https://github.com/nlohmann/json/blob/develop/doc/scripts/send_to_wandbox.py) to send code examples to [Wandbox](http://melpon.org/wandbox) -- [**Travis**](https://travis-ci.org) for [continuous integration](https://travis-ci.org/nlohmann/json) on Linux and macOS -- [**Valgrind**](https://valgrind.org) to check for correct memory management -- [**Wandbox**](https://wandbox.org) for [online examples](https://wandbox.org/permlink/3lCHrFUZANONKv7a) - - -## Projects using JSON for Modern C++ - -The library is currently used in Apple macOS Sierra and iOS 10. I am not sure what they are using the library for, but I am happy that it runs on so many devices. - - -## Notes - -### Character encoding - -The library supports **Unicode input** as follows: - -- Only **UTF-8** encoded input is supported which is the default encoding for JSON according to [RFC 8259](https://tools.ietf.org/html/rfc8259.html#section-8.1). -- `std::u16string` and `std::u32string` can be parsed, assuming UTF-16 and UTF-32 encoding, respectively. These encodings are not supported when reading from files or other input containers. -- Other encodings such as Latin-1 or ISO 8859-1 are **not** supported and will yield parse or serialization errors. -- [Unicode noncharacters](https://www.unicode.org/faq/private_use.html#nonchar1) will not be replaced by the library. -- Invalid surrogates (e.g., incomplete pairs such as `\uDEAD`) will yield parse errors. -- The strings stored in the library are UTF-8 encoded. When using the default string type (`std::string`), note that its length/size functions return the number of stored bytes rather than the number of characters or glyphs. -- When you store strings with different encodings in the library, calling [`dump()`](https://nlohmann.github.io/json/api/basic_json/dump/) may throw an exception unless `json::error_handler_t::replace` or `json::error_handler_t::ignore` are used as error handlers. - -### Comments in JSON - -This library does not support comments by default. It does so for three reasons: - -1. Comments are not part of the [JSON specification](https://tools.ietf.org/html/rfc8259). You may argue that `//` or `/* */` are allowed in JavaScript, but JSON is not JavaScript. -2. This was not an oversight: Douglas Crockford [wrote on this](https://plus.google.com/118095276221607585885/posts/RK8qyGVaGSr) in May 2012: - - > I removed comments from JSON because I saw people were using them to hold parsing directives, a practice which would have destroyed interoperability. I know that the lack of comments makes some people sad, but it shouldn't. - - > Suppose you are using JSON to keep configuration files, which you would like to annotate. Go ahead and insert all the comments you like. Then pipe it through JSMin before handing it to your JSON parser. - -3. It is dangerous for interoperability if some libraries would add comment support while others don't. Please check [The Harmful Consequences of the Robustness Principle](https://tools.ietf.org/html/draft-iab-protocol-maintenance-01) on this. - -However, you can pass set parameter `ignore_comments` to true in the `parse` function to ignore `//` or `/* */` comments. Comments will then be treated as whitespace. - -### Order of object keys - -By default, the library does not preserve the **insertion order of object elements**. This is standards-compliant, as the [JSON standard](https://tools.ietf.org/html/rfc8259.html) defines objects as "an unordered collection of zero or more name/value pairs". - -If you do want to preserve the insertion order, you can try the type [`nlohmann::ordered_json`](https://github.com/nlohmann/json/issues/2179). Alternatively, you can use a more sophisticated ordered map like [`tsl::ordered_map`](https://github.com/Tessil/ordered-map) ([integration](https://github.com/nlohmann/json/issues/546#issuecomment-304447518)) or [`nlohmann::fifo_map`](https://github.com/nlohmann/fifo_map) ([integration](https://github.com/nlohmann/json/issues/485#issuecomment-333652309)). - -### Memory Release - -We checked with Valgrind and the Address Sanitizer (ASAN) that there are no memory leaks. - -If you find that a parsing program with this library does not release memory, please consider the following case and it maybe unrelated to this library. - -**Your program is compiled with glibc.** There is a tunable threshold that glibc uses to decide whether to actually return memory to the system or whether to cache it for later reuse. If in your program you make lots of small allocations and those small allocations are not a contiguous block and are presumably below the threshold, then they will not get returned to the OS. -Here is a related issue [#1924](https://github.com/nlohmann/json/issues/1924). - -### Further notes - -- The code contains numerous debug **assertions** which can be switched off by defining the preprocessor macro `NDEBUG`, see the [documentation of `assert`](https://en.cppreference.com/w/cpp/error/assert). In particular, note [`operator[]`](https://nlohmann.github.io/json/api/basic_json/operator%5B%5D/) implements **unchecked access** for const objects: If the given key is not present, the behavior is undefined (think of a dereferenced null pointer) and yields an [assertion failure](https://github.com/nlohmann/json/issues/289) if assertions are switched on. If you are not sure whether an element in an object exists, use checked access with the [`at()` function](https://nlohmann.github.io/json/api/basic_json/at/). Furthermore, you can define `JSON_ASSERT(x)` to replace calls to `assert(x)`. -- As the exact type of a number is not defined in the [JSON specification](https://tools.ietf.org/html/rfc8259.html), this library tries to choose the best fitting C++ number type automatically. As a result, the type `double` may be used to store numbers which may yield [**floating-point exceptions**](https://github.com/nlohmann/json/issues/181) in certain rare situations if floating-point exceptions have been unmasked in the calling code. These exceptions are not caused by the library and need to be fixed in the calling code, such as by re-masking the exceptions prior to calling library functions. -- The code can be compiled without C++ **runtime type identification** features; that is, you can use the `-fno-rtti` compiler flag. -- **Exceptions** are used widely within the library. They can, however, be switched off with either using the compiler flag `-fno-exceptions` or by defining the symbol `JSON_NOEXCEPTION`. In this case, exceptions are replaced by `abort()` calls. You can further control this behavior by defining `JSON_THROW_USER` (overriding `throw`), `JSON_TRY_USER` (overriding `try`), and `JSON_CATCH_USER` (overriding `catch`). Note that `JSON_THROW_USER` should leave the current scope (e.g., by throwing or aborting), as continuing after it may yield undefined behavior. - -## Execute unit tests - -To compile and run the tests, you need to execute - -```sh -$ mkdir build -$ cd build -$ cmake .. -DJSON_BuildTests=On -$ cmake --build . -$ ctest --output-on-failure -``` - -Note that during the `ctest` stage, several JSON test files are downloaded from an [external repository](https://github.com/nlohmann/json_test_data). If policies forbid downloading artifacts during testing, you can download the files yourself and pass the directory with the test files via `-DJSON_TestDataDirectory=path` to CMake. Then, no Internet connectivity is required. See [issue #2189](https://github.com/nlohmann/json/issues/2189) for more information. - -In case you have downloaded the library rather than checked out the code via Git, test `cmake_fetch_content_configure`. Please execute `ctest -LE git_required` to skip these tests. See [issue #2189](https://github.com/nlohmann/json/issues/2189) for more information. - -Some tests change the installed files and hence make the whole process not reproducible. Please execute `ctest -LE not_reproducible` to skip these tests. See [issue #2324](https://github.com/nlohmann/json/issues/2324) for more information. - -Note you need to call `cmake -LE "not_reproducible|git_required"` to exclude both labels. See [issue #2596](https://github.com/nlohmann/json/issues/2596) for more information. - -As Intel compilers use unsafe floating point optimization by default, the unit tests may fail. Use flag [`/fp:precise`](https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/compiler-options/compiler-option-details/floating-point-options/fp-model-fp.html) then. diff --git a/cpp/external/katagocoreml/vendor/deps/nlohmann/json.hpp b/cpp/external/katagocoreml/vendor/deps/nlohmann/json.hpp deleted file mode 100644 index a83971da2..000000000 --- a/cpp/external/katagocoreml/vendor/deps/nlohmann/json.hpp +++ /dev/null @@ -1,25855 +0,0 @@ -/* - __ _____ _____ _____ - __| | __| | | | JSON for Modern C++ -| | |__ | | | | | | version 3.9.1 -|_____|_____|_____|_|___| https://github.com/nlohmann/json - -Licensed under the MIT License . -SPDX-License-Identifier: MIT -Copyright (c) 2013-2019 Niels Lohmann . - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -#ifndef INCLUDE_NLOHMANN_JSON_HPP_ -#define INCLUDE_NLOHMANN_JSON_HPP_ - -#define NLOHMANN_JSON_VERSION_MAJOR 3 -#define NLOHMANN_JSON_VERSION_MINOR 9 -#define NLOHMANN_JSON_VERSION_PATCH 1 - -#include // all_of, find, for_each -#include // nullptr_t, ptrdiff_t, size_t -#include // hash, less -#include // initializer_list -#include // istream, ostream -#include // random_access_iterator_tag -#include // unique_ptr -#include // accumulate -#include // string, stoi, to_string -#include // declval, forward, move, pair, swap -#include // vector - -// #include - - -#include - -// #include - - -#include // transform -#include // array -#include // forward_list -#include // inserter, front_inserter, end -#include // map -#include // string -#include // tuple, make_tuple -#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible -#include // unordered_map -#include // pair, declval -#include // valarray - -// #include - - -#include // exception -#include // runtime_error -#include // to_string - -// #include - - -#include // array -#include // size_t -#include // uint8_t -#include // string - -namespace nlohmann -{ -namespace detail -{ -/////////////////////////// -// JSON type enumeration // -/////////////////////////// - -/*! -@brief the JSON type enumeration - -This enumeration collects the different JSON types. It is internally used to -distinguish the stored values, and the functions @ref basic_json::is_null(), -@ref basic_json::is_object(), @ref basic_json::is_array(), -@ref basic_json::is_string(), @ref basic_json::is_boolean(), -@ref basic_json::is_number() (with @ref basic_json::is_number_integer(), -@ref basic_json::is_number_unsigned(), and @ref basic_json::is_number_float()), -@ref basic_json::is_discarded(), @ref basic_json::is_primitive(), and -@ref basic_json::is_structured() rely on it. - -@note There are three enumeration entries (number_integer, number_unsigned, and -number_float), because the library distinguishes these three types for numbers: -@ref basic_json::number_unsigned_t is used for unsigned integers, -@ref basic_json::number_integer_t is used for signed integers, and -@ref basic_json::number_float_t is used for floating-point numbers or to -approximate integers which do not fit in the limits of their respective type. - -@sa see @ref basic_json::basic_json(const value_t value_type) -- create a JSON -value with the default value for a given type - -@since version 1.0.0 -*/ -enum class value_t : std::uint8_t -{ - null, ///< null value - object, ///< object (unordered set of name/value pairs) - array, ///< array (ordered collection of values) - string, ///< string value - boolean, ///< boolean value - number_integer, ///< number value (signed integer) - number_unsigned, ///< number value (unsigned integer) - number_float, ///< number value (floating-point) - binary, ///< binary array (ordered collection of bytes) - discarded ///< discarded by the parser callback function -}; - -/*! -@brief comparison operator for JSON types - -Returns an ordering that is similar to Python: -- order: null < boolean < number < object < array < string < binary -- furthermore, each type is not smaller than itself -- discarded values are not comparable -- binary is represented as a b"" string in python and directly comparable to a - string; however, making a binary array directly comparable with a string would - be surprising behavior in a JSON file. - -@since version 1.0.0 -*/ -inline bool operator<(const value_t lhs, const value_t rhs) noexcept -{ - static constexpr std::array order = {{ - 0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */, - 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */, - 6 /* binary */ - } - }; - - const auto l_index = static_cast(lhs); - const auto r_index = static_cast(rhs); - return l_index < order.size() && r_index < order.size() && order[l_index] < order[r_index]; -} -} // namespace detail -} // namespace nlohmann - -// #include - - -#include -// #include - - -#include // pair -// #include -/* Hedley - https://nemequ.github.io/hedley - * Created by Evan Nemerson - * - * To the extent possible under law, the author(s) have dedicated all - * copyright and related and neighboring rights to this software to - * the public domain worldwide. This software is distributed without - * any warranty. - * - * For details, see . - * SPDX-License-Identifier: CC0-1.0 - */ - -#if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 15) -#if defined(JSON_HEDLEY_VERSION) - #undef JSON_HEDLEY_VERSION -#endif -#define JSON_HEDLEY_VERSION 15 - -#if defined(JSON_HEDLEY_STRINGIFY_EX) - #undef JSON_HEDLEY_STRINGIFY_EX -#endif -#define JSON_HEDLEY_STRINGIFY_EX(x) #x - -#if defined(JSON_HEDLEY_STRINGIFY) - #undef JSON_HEDLEY_STRINGIFY -#endif -#define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x) - -#if defined(JSON_HEDLEY_CONCAT_EX) - #undef JSON_HEDLEY_CONCAT_EX -#endif -#define JSON_HEDLEY_CONCAT_EX(a,b) a##b - -#if defined(JSON_HEDLEY_CONCAT) - #undef JSON_HEDLEY_CONCAT -#endif -#define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b) - -#if defined(JSON_HEDLEY_CONCAT3_EX) - #undef JSON_HEDLEY_CONCAT3_EX -#endif -#define JSON_HEDLEY_CONCAT3_EX(a,b,c) a##b##c - -#if defined(JSON_HEDLEY_CONCAT3) - #undef JSON_HEDLEY_CONCAT3 -#endif -#define JSON_HEDLEY_CONCAT3(a,b,c) JSON_HEDLEY_CONCAT3_EX(a,b,c) - -#if defined(JSON_HEDLEY_VERSION_ENCODE) - #undef JSON_HEDLEY_VERSION_ENCODE -#endif -#define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision)) - -#if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR) - #undef JSON_HEDLEY_VERSION_DECODE_MAJOR -#endif -#define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000) - -#if defined(JSON_HEDLEY_VERSION_DECODE_MINOR) - #undef JSON_HEDLEY_VERSION_DECODE_MINOR -#endif -#define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000) - -#if defined(JSON_HEDLEY_VERSION_DECODE_REVISION) - #undef JSON_HEDLEY_VERSION_DECODE_REVISION -#endif -#define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000) - -#if defined(JSON_HEDLEY_GNUC_VERSION) - #undef JSON_HEDLEY_GNUC_VERSION -#endif -#if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__) - #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) -#elif defined(__GNUC__) - #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0) -#endif - -#if defined(JSON_HEDLEY_GNUC_VERSION_CHECK) - #undef JSON_HEDLEY_GNUC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_GNUC_VERSION) - #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_MSVC_VERSION) - #undef JSON_HEDLEY_MSVC_VERSION -#endif -#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000) && !defined(__ICL) - #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100) -#elif defined(_MSC_FULL_VER) && !defined(__ICL) - #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10) -#elif defined(_MSC_VER) && !defined(__ICL) - #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0) -#endif - -#if defined(JSON_HEDLEY_MSVC_VERSION_CHECK) - #undef JSON_HEDLEY_MSVC_VERSION_CHECK -#endif -#if !defined(JSON_HEDLEY_MSVC_VERSION) - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0) -#elif defined(_MSC_VER) && (_MSC_VER >= 1400) - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch))) -#elif defined(_MSC_VER) && (_MSC_VER >= 1200) - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch))) -#else - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor))) -#endif - -#if defined(JSON_HEDLEY_INTEL_VERSION) - #undef JSON_HEDLEY_INTEL_VERSION -#endif -#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && !defined(__ICL) - #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE) -#elif defined(__INTEL_COMPILER) && !defined(__ICL) - #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0) -#endif - -#if defined(JSON_HEDLEY_INTEL_VERSION_CHECK) - #undef JSON_HEDLEY_INTEL_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_INTEL_VERSION) - #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_INTEL_CL_VERSION) - #undef JSON_HEDLEY_INTEL_CL_VERSION -#endif -#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && defined(__ICL) - #define JSON_HEDLEY_INTEL_CL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER, __INTEL_COMPILER_UPDATE, 0) -#endif - -#if defined(JSON_HEDLEY_INTEL_CL_VERSION_CHECK) - #undef JSON_HEDLEY_INTEL_CL_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_INTEL_CL_VERSION) - #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_CL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_PGI_VERSION) - #undef JSON_HEDLEY_PGI_VERSION -#endif -#if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__) - #define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__) -#endif - -#if defined(JSON_HEDLEY_PGI_VERSION_CHECK) - #undef JSON_HEDLEY_PGI_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_PGI_VERSION) - #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_SUNPRO_VERSION) - #undef JSON_HEDLEY_SUNPRO_VERSION -#endif -#if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10) -#elif defined(__SUNPRO_C) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf) -#elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10) -#elif defined(__SUNPRO_CC) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf) -#endif - -#if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK) - #undef JSON_HEDLEY_SUNPRO_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_SUNPRO_VERSION) - #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) - #undef JSON_HEDLEY_EMSCRIPTEN_VERSION -#endif -#if defined(__EMSCRIPTEN__) - #define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__) -#endif - -#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK) - #undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) - #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_ARM_VERSION) - #undef JSON_HEDLEY_ARM_VERSION -#endif -#if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION) - #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100) -#elif defined(__CC_ARM) && defined(__ARMCC_VERSION) - #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100) -#endif - -#if defined(JSON_HEDLEY_ARM_VERSION_CHECK) - #undef JSON_HEDLEY_ARM_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_ARM_VERSION) - #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_IBM_VERSION) - #undef JSON_HEDLEY_IBM_VERSION -#endif -#if defined(__ibmxl__) - #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__) -#elif defined(__xlC__) && defined(__xlC_ver__) - #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff) -#elif defined(__xlC__) - #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0) -#endif - -#if defined(JSON_HEDLEY_IBM_VERSION_CHECK) - #undef JSON_HEDLEY_IBM_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_IBM_VERSION) - #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_VERSION) - #undef JSON_HEDLEY_TI_VERSION -#endif -#if \ - defined(__TI_COMPILER_VERSION__) && \ - ( \ - defined(__TMS470__) || defined(__TI_ARM__) || \ - defined(__MSP430__) || \ - defined(__TMS320C2000__) \ - ) -#if (__TI_COMPILER_VERSION__ >= 16000000) - #define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif -#endif - -#if defined(JSON_HEDLEY_TI_VERSION_CHECK) - #undef JSON_HEDLEY_TI_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_VERSION) - #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CL2000_VERSION) - #undef JSON_HEDLEY_TI_CL2000_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__) - #define JSON_HEDLEY_TI_CL2000_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CL2000_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CL2000_VERSION) - #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL2000_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CL430_VERSION) - #undef JSON_HEDLEY_TI_CL430_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__) - #define JSON_HEDLEY_TI_CL430_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CL430_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CL430_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CL430_VERSION) - #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL430_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) - #undef JSON_HEDLEY_TI_ARMCL_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__)) - #define JSON_HEDLEY_TI_ARMCL_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK) - #undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) - #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_ARMCL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CL6X_VERSION) - #undef JSON_HEDLEY_TI_CL6X_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__) - #define JSON_HEDLEY_TI_CL6X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CL6X_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CL6X_VERSION) - #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL6X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CL7X_VERSION) - #undef JSON_HEDLEY_TI_CL7X_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__C7000__) - #define JSON_HEDLEY_TI_CL7X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CL7X_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CL7X_VERSION) - #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL7X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) - #undef JSON_HEDLEY_TI_CLPRU_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__PRU__) - #define JSON_HEDLEY_TI_CLPRU_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CLPRU_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) - #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CLPRU_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_CRAY_VERSION) - #undef JSON_HEDLEY_CRAY_VERSION -#endif -#if defined(_CRAYC) - #if defined(_RELEASE_PATCHLEVEL) - #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL) - #else - #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0) - #endif -#endif - -#if defined(JSON_HEDLEY_CRAY_VERSION_CHECK) - #undef JSON_HEDLEY_CRAY_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_CRAY_VERSION) - #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_IAR_VERSION) - #undef JSON_HEDLEY_IAR_VERSION -#endif -#if defined(__IAR_SYSTEMS_ICC__) - #if __VER__ > 1000 - #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000)) - #else - #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(__VER__ / 100, __VER__ % 100, 0) - #endif -#endif - -#if defined(JSON_HEDLEY_IAR_VERSION_CHECK) - #undef JSON_HEDLEY_IAR_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_IAR_VERSION) - #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TINYC_VERSION) - #undef JSON_HEDLEY_TINYC_VERSION -#endif -#if defined(__TINYC__) - #define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100) -#endif - -#if defined(JSON_HEDLEY_TINYC_VERSION_CHECK) - #undef JSON_HEDLEY_TINYC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TINYC_VERSION) - #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_DMC_VERSION) - #undef JSON_HEDLEY_DMC_VERSION -#endif -#if defined(__DMC__) - #define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf) -#endif - -#if defined(JSON_HEDLEY_DMC_VERSION_CHECK) - #undef JSON_HEDLEY_DMC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_DMC_VERSION) - #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_COMPCERT_VERSION) - #undef JSON_HEDLEY_COMPCERT_VERSION -#endif -#if defined(__COMPCERT_VERSION__) - #define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100) -#endif - -#if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK) - #undef JSON_HEDLEY_COMPCERT_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_COMPCERT_VERSION) - #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_PELLES_VERSION) - #undef JSON_HEDLEY_PELLES_VERSION -#endif -#if defined(__POCC__) - #define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0) -#endif - -#if defined(JSON_HEDLEY_PELLES_VERSION_CHECK) - #undef JSON_HEDLEY_PELLES_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_PELLES_VERSION) - #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_MCST_LCC_VERSION) - #undef JSON_HEDLEY_MCST_LCC_VERSION -#endif -#if defined(__LCC__) && defined(__LCC_MINOR__) - #define JSON_HEDLEY_MCST_LCC_VERSION JSON_HEDLEY_VERSION_ENCODE(__LCC__ / 100, __LCC__ % 100, __LCC_MINOR__) -#endif - -#if defined(JSON_HEDLEY_MCST_LCC_VERSION_CHECK) - #undef JSON_HEDLEY_MCST_LCC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_MCST_LCC_VERSION) - #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_MCST_LCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_GCC_VERSION) - #undef JSON_HEDLEY_GCC_VERSION -#endif -#if \ - defined(JSON_HEDLEY_GNUC_VERSION) && \ - !defined(__clang__) && \ - !defined(JSON_HEDLEY_INTEL_VERSION) && \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_ARM_VERSION) && \ - !defined(JSON_HEDLEY_CRAY_VERSION) && \ - !defined(JSON_HEDLEY_TI_VERSION) && \ - !defined(JSON_HEDLEY_TI_ARMCL_VERSION) && \ - !defined(JSON_HEDLEY_TI_CL430_VERSION) && \ - !defined(JSON_HEDLEY_TI_CL2000_VERSION) && \ - !defined(JSON_HEDLEY_TI_CL6X_VERSION) && \ - !defined(JSON_HEDLEY_TI_CL7X_VERSION) && \ - !defined(JSON_HEDLEY_TI_CLPRU_VERSION) && \ - !defined(__COMPCERT__) && \ - !defined(JSON_HEDLEY_MCST_LCC_VERSION) - #define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION -#endif - -#if defined(JSON_HEDLEY_GCC_VERSION_CHECK) - #undef JSON_HEDLEY_GCC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_GCC_VERSION) - #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_HAS_ATTRIBUTE -#endif -#if \ - defined(__has_attribute) && \ - ( \ - (!defined(JSON_HEDLEY_IAR_VERSION) || JSON_HEDLEY_IAR_VERSION_CHECK(8,5,9)) \ - ) -# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute) -#else -# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE -#endif -#if defined(__has_attribute) - #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) -#else - #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE -#endif -#if defined(__has_attribute) - #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) -#else - #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE -#endif -#if \ - defined(__has_cpp_attribute) && \ - defined(__cplusplus) && \ - (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute) -#else - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0) -#endif - -#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS) - #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS -#endif -#if !defined(__cplusplus) || !defined(__has_cpp_attribute) - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) -#elif \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_IAR_VERSION) && \ - (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \ - (!defined(JSON_HEDLEY_MSVC_VERSION) || JSON_HEDLEY_MSVC_VERSION_CHECK(19,20,0)) - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute) -#else - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE -#endif -#if defined(__has_cpp_attribute) && defined(__cplusplus) - #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) -#else - #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE -#endif -#if defined(__has_cpp_attribute) && defined(__cplusplus) - #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) -#else - #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_BUILTIN) - #undef JSON_HEDLEY_HAS_BUILTIN -#endif -#if defined(__has_builtin) - #define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin) -#else - #define JSON_HEDLEY_HAS_BUILTIN(builtin) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN) - #undef JSON_HEDLEY_GNUC_HAS_BUILTIN -#endif -#if defined(__has_builtin) - #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) -#else - #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_BUILTIN) - #undef JSON_HEDLEY_GCC_HAS_BUILTIN -#endif -#if defined(__has_builtin) - #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) -#else - #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_FEATURE) - #undef JSON_HEDLEY_HAS_FEATURE -#endif -#if defined(__has_feature) - #define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature) -#else - #define JSON_HEDLEY_HAS_FEATURE(feature) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_FEATURE) - #undef JSON_HEDLEY_GNUC_HAS_FEATURE -#endif -#if defined(__has_feature) - #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) -#else - #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_FEATURE) - #undef JSON_HEDLEY_GCC_HAS_FEATURE -#endif -#if defined(__has_feature) - #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) -#else - #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_EXTENSION) - #undef JSON_HEDLEY_HAS_EXTENSION -#endif -#if defined(__has_extension) - #define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension) -#else - #define JSON_HEDLEY_HAS_EXTENSION(extension) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION) - #undef JSON_HEDLEY_GNUC_HAS_EXTENSION -#endif -#if defined(__has_extension) - #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) -#else - #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_EXTENSION) - #undef JSON_HEDLEY_GCC_HAS_EXTENSION -#endif -#if defined(__has_extension) - #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) -#else - #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE -#endif -#if defined(__has_declspec_attribute) - #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute) -#else - #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE -#endif -#if defined(__has_declspec_attribute) - #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) -#else - #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE -#endif -#if defined(__has_declspec_attribute) - #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) -#else - #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_WARNING) - #undef JSON_HEDLEY_HAS_WARNING -#endif -#if defined(__has_warning) - #define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning) -#else - #define JSON_HEDLEY_HAS_WARNING(warning) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_WARNING) - #undef JSON_HEDLEY_GNUC_HAS_WARNING -#endif -#if defined(__has_warning) - #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) -#else - #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_WARNING) - #undef JSON_HEDLEY_GCC_HAS_WARNING -#endif -#if defined(__has_warning) - #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) -#else - #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ - defined(__clang__) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR)) - #define JSON_HEDLEY_PRAGMA(value) _Pragma(#value) -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_PRAGMA(value) __pragma(value) -#else - #define JSON_HEDLEY_PRAGMA(value) -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH) - #undef JSON_HEDLEY_DIAGNOSTIC_PUSH -#endif -#if defined(JSON_HEDLEY_DIAGNOSTIC_POP) - #undef JSON_HEDLEY_DIAGNOSTIC_POP -#endif -#if defined(__clang__) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push)) - #define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop)) -#elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop") -#elif \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop") -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") -#else - #define JSON_HEDLEY_DIAGNOSTIC_PUSH - #define JSON_HEDLEY_DIAGNOSTIC_POP -#endif - -/* JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for - HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ -#endif -#if defined(__cplusplus) -# if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat") -# if JSON_HEDLEY_HAS_WARNING("-Wc++17-extensions") -# if JSON_HEDLEY_HAS_WARNING("-Wc++1z-extensions") -# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ - _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ - _Pragma("clang diagnostic ignored \"-Wc++1z-extensions\"") \ - xpr \ - JSON_HEDLEY_DIAGNOSTIC_POP -# else -# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ - _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ - xpr \ - JSON_HEDLEY_DIAGNOSTIC_POP -# endif -# else -# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ - xpr \ - JSON_HEDLEY_DIAGNOSTIC_POP -# endif -# endif -#endif -#if !defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x -#endif - -#if defined(JSON_HEDLEY_CONST_CAST) - #undef JSON_HEDLEY_CONST_CAST -#endif -#if defined(__cplusplus) -# define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast(expr)) -#elif \ - JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) -# define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ - ((T) (expr)); \ - JSON_HEDLEY_DIAGNOSTIC_POP \ - })) -#else -# define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr)) -#endif - -#if defined(JSON_HEDLEY_REINTERPRET_CAST) - #undef JSON_HEDLEY_REINTERPRET_CAST -#endif -#if defined(__cplusplus) - #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast(expr)) -#else - #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr)) -#endif - -#if defined(JSON_HEDLEY_STATIC_CAST) - #undef JSON_HEDLEY_STATIC_CAST -#endif -#if defined(__cplusplus) - #define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast(expr)) -#else - #define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr)) -#endif - -#if defined(JSON_HEDLEY_CPP_CAST) - #undef JSON_HEDLEY_CPP_CAST -#endif -#if defined(__cplusplus) -# if JSON_HEDLEY_HAS_WARNING("-Wold-style-cast") -# define JSON_HEDLEY_CPP_CAST(T, expr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \ - ((T) (expr)) \ - JSON_HEDLEY_DIAGNOSTIC_POP -# elif JSON_HEDLEY_IAR_VERSION_CHECK(8,3,0) -# define JSON_HEDLEY_CPP_CAST(T, expr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("diag_suppress=Pe137") \ - JSON_HEDLEY_DIAGNOSTIC_POP -# else -# define JSON_HEDLEY_CPP_CAST(T, expr) ((T) (expr)) -# endif -#else -# define JSON_HEDLEY_CPP_CAST(T, expr) (expr) -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)") -#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:1478 1786)) -#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1216,1444,1445") -#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996)) -#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") -#elif \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718") -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)") -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215") -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)") -#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:161)) -#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"") -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068)) -#elif \ - JSON_HEDLEY_TI_VERSION_CHECK(16,9,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") -#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161") -#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 161") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunknown-attributes") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)") -#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:1292)) -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030)) -#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097,1098") -#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)") -#elif \ - JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097") -#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wcast-qual") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunused-function") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("clang diagnostic ignored \"-Wunused-function\"") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("GCC diagnostic ignored \"-Wunused-function\"") -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(1,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION __pragma(warning(disable:4505)) -#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("diag_suppress 3142") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION -#endif - -#if defined(JSON_HEDLEY_DEPRECATED) - #undef JSON_HEDLEY_DEPRECATED -#endif -#if defined(JSON_HEDLEY_DEPRECATED_FOR) - #undef JSON_HEDLEY_DEPRECATED_FOR -#endif -#if \ - JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since)) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement)) -#elif \ - (JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since))) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement))) -#elif defined(__cplusplus) && (__cplusplus >= 201402L) - #define JSON_HEDLEY_DEPRECATED(since) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]]) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]]) -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) - #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__)) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__)) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated) -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated") - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated") -#else - #define JSON_HEDLEY_DEPRECATED(since) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) -#endif - -#if defined(JSON_HEDLEY_UNAVAILABLE) - #undef JSON_HEDLEY_UNAVAILABLE -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(warning) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since))) -#else - #define JSON_HEDLEY_UNAVAILABLE(available_since) -#endif - -#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT) - #undef JSON_HEDLEY_WARN_UNUSED_RESULT -#endif -#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT_MSG) - #undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) __attribute__((__warn_unused_result__)) -#elif (JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) >= 201907L) - #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard(msg)]]) -#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) - #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) -#elif defined(_Check_return_) /* SAL */ - #define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_ - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) _Check_return_ -#else - #define JSON_HEDLEY_WARN_UNUSED_RESULT - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) -#endif - -#if defined(JSON_HEDLEY_SENTINEL) - #undef JSON_HEDLEY_SENTINEL -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position))) -#else - #define JSON_HEDLEY_SENTINEL(position) -#endif - -#if defined(JSON_HEDLEY_NO_RETURN) - #undef JSON_HEDLEY_NO_RETURN -#endif -#if JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_NO_RETURN __noreturn -#elif \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) -#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L - #define JSON_HEDLEY_NO_RETURN _Noreturn -#elif defined(__cplusplus) && (__cplusplus >= 201103L) - #define JSON_HEDLEY_NO_RETURN JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[noreturn]]) -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,2,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) - #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) - #define JSON_HEDLEY_NO_RETURN _Pragma("does_not_return") -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) -#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) - #define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;") -#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) - #define JSON_HEDLEY_NO_RETURN __attribute((noreturn)) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) - #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) -#else - #define JSON_HEDLEY_NO_RETURN -#endif - -#if defined(JSON_HEDLEY_NO_ESCAPE) - #undef JSON_HEDLEY_NO_ESCAPE -#endif -#if JSON_HEDLEY_HAS_ATTRIBUTE(noescape) - #define JSON_HEDLEY_NO_ESCAPE __attribute__((__noescape__)) -#else - #define JSON_HEDLEY_NO_ESCAPE -#endif - -#if defined(JSON_HEDLEY_UNREACHABLE) - #undef JSON_HEDLEY_UNREACHABLE -#endif -#if defined(JSON_HEDLEY_UNREACHABLE_RETURN) - #undef JSON_HEDLEY_UNREACHABLE_RETURN -#endif -#if defined(JSON_HEDLEY_ASSUME) - #undef JSON_HEDLEY_ASSUME -#endif -#if \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_ASSUME(expr) __assume(expr) -#elif JSON_HEDLEY_HAS_BUILTIN(__builtin_assume) - #define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr) -#elif \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) - #if defined(__cplusplus) - #define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr) - #else - #define JSON_HEDLEY_ASSUME(expr) _nassert(expr) - #endif -#endif -#if \ - (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(18,10,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(10,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable() -#elif defined(JSON_HEDLEY_ASSUME) - #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) -#endif -#if !defined(JSON_HEDLEY_ASSUME) - #if defined(JSON_HEDLEY_UNREACHABLE) - #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, ((expr) ? 1 : (JSON_HEDLEY_UNREACHABLE(), 1))) - #else - #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, expr) - #endif -#endif -#if defined(JSON_HEDLEY_UNREACHABLE) - #if \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) - #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (JSON_HEDLEY_STATIC_CAST(void, JSON_HEDLEY_ASSUME(0)), (value)) - #else - #define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE() - #endif -#else - #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (value) -#endif -#if !defined(JSON_HEDLEY_UNREACHABLE) - #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) -#endif - -JSON_HEDLEY_DIAGNOSTIC_PUSH -#if JSON_HEDLEY_HAS_WARNING("-Wpedantic") - #pragma clang diagnostic ignored "-Wpedantic" -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") && defined(__cplusplus) - #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" -#endif -#if JSON_HEDLEY_GCC_HAS_WARNING("-Wvariadic-macros",4,0,0) - #if defined(__clang__) - #pragma clang diagnostic ignored "-Wvariadic-macros" - #elif defined(JSON_HEDLEY_GCC_VERSION) - #pragma GCC diagnostic ignored "-Wvariadic-macros" - #endif -#endif -#if defined(JSON_HEDLEY_NON_NULL) - #undef JSON_HEDLEY_NON_NULL -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) - #define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__))) -#else - #define JSON_HEDLEY_NON_NULL(...) -#endif -JSON_HEDLEY_DIAGNOSTIC_POP - -#if defined(JSON_HEDLEY_PRINTF_FORMAT) - #undef JSON_HEDLEY_PRINTF_FORMAT -#endif -#if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check))) -#elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check))) -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(format) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check))) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(6,0,0) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check)) -#else - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) -#endif - -#if defined(JSON_HEDLEY_CONSTEXPR) - #undef JSON_HEDLEY_CONSTEXPR -#endif -#if defined(__cplusplus) - #if __cplusplus >= 201103L - #define JSON_HEDLEY_CONSTEXPR JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(constexpr) - #endif -#endif -#if !defined(JSON_HEDLEY_CONSTEXPR) - #define JSON_HEDLEY_CONSTEXPR -#endif - -#if defined(JSON_HEDLEY_PREDICT) - #undef JSON_HEDLEY_PREDICT -#endif -#if defined(JSON_HEDLEY_LIKELY) - #undef JSON_HEDLEY_LIKELY -#endif -#if defined(JSON_HEDLEY_UNLIKELY) - #undef JSON_HEDLEY_UNLIKELY -#endif -#if defined(JSON_HEDLEY_UNPREDICTABLE) - #undef JSON_HEDLEY_UNPREDICTABLE -#endif -#if JSON_HEDLEY_HAS_BUILTIN(__builtin_unpredictable) - #define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr)) -#endif -#if \ - (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) && !defined(JSON_HEDLEY_PGI_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(9,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) -# define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability)) -# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1 , (probability)) -# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0 , (probability)) -# define JSON_HEDLEY_LIKELY(expr) __builtin_expect (!!(expr), 1 ) -# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect (!!(expr), 0 ) -#elif \ - (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,27) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) -# define JSON_HEDLEY_PREDICT(expr, expected, probability) \ - (((probability) >= 0.9) ? __builtin_expect((expr), (expected)) : (JSON_HEDLEY_STATIC_CAST(void, expected), (expr))) -# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \ - (__extension__ ({ \ - double hedley_probability_ = (probability); \ - ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \ - })) -# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \ - (__extension__ ({ \ - double hedley_probability_ = (probability); \ - ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \ - })) -# define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) -# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) -#else -# define JSON_HEDLEY_PREDICT(expr, expected, probability) (JSON_HEDLEY_STATIC_CAST(void, expected), (expr)) -# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr)) -# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr)) -# define JSON_HEDLEY_LIKELY(expr) (!!(expr)) -# define JSON_HEDLEY_UNLIKELY(expr) (!!(expr)) -#endif -#if !defined(JSON_HEDLEY_UNPREDICTABLE) - #define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5) -#endif - -#if defined(JSON_HEDLEY_MALLOC) - #undef JSON_HEDLEY_MALLOC -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_MALLOC __attribute__((__malloc__)) -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) - #define JSON_HEDLEY_MALLOC _Pragma("returns_new_memory") -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_MALLOC __declspec(restrict) -#else - #define JSON_HEDLEY_MALLOC -#endif - -#if defined(JSON_HEDLEY_PURE) - #undef JSON_HEDLEY_PURE -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(pure) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(2,96,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) -# define JSON_HEDLEY_PURE __attribute__((__pure__)) -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) -# define JSON_HEDLEY_PURE _Pragma("does_not_write_global_data") -#elif defined(__cplusplus) && \ - ( \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) \ - ) -# define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;") -#else -# define JSON_HEDLEY_PURE -#endif - -#if defined(JSON_HEDLEY_CONST) - #undef JSON_HEDLEY_CONST -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(const) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(2,5,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_CONST __attribute__((__const__)) -#elif \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) - #define JSON_HEDLEY_CONST _Pragma("no_side_effect") -#else - #define JSON_HEDLEY_CONST JSON_HEDLEY_PURE -#endif - -#if defined(JSON_HEDLEY_RESTRICT) - #undef JSON_HEDLEY_RESTRICT -#endif -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus) - #define JSON_HEDLEY_RESTRICT restrict -#elif \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,4) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ - defined(__clang__) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_RESTRICT __restrict -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus) - #define JSON_HEDLEY_RESTRICT _Restrict -#else - #define JSON_HEDLEY_RESTRICT -#endif - -#if defined(JSON_HEDLEY_INLINE) - #undef JSON_HEDLEY_INLINE -#endif -#if \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ - (defined(__cplusplus) && (__cplusplus >= 199711L)) - #define JSON_HEDLEY_INLINE inline -#elif \ - defined(JSON_HEDLEY_GCC_VERSION) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(6,2,0) - #define JSON_HEDLEY_INLINE __inline__ -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,1,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_INLINE __inline -#else - #define JSON_HEDLEY_INLINE -#endif - -#if defined(JSON_HEDLEY_ALWAYS_INLINE) - #undef JSON_HEDLEY_ALWAYS_INLINE -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) -# define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) -# define JSON_HEDLEY_ALWAYS_INLINE __forceinline -#elif defined(__cplusplus) && \ - ( \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) \ - ) -# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) -# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced") -#else -# define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE -#endif - -#if defined(JSON_HEDLEY_NEVER_INLINE) - #undef JSON_HEDLEY_NEVER_INLINE -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) - #define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__)) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) -#elif JSON_HEDLEY_PGI_VERSION_CHECK(10,2,0) - #define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline") -#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) - #define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never") -#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) - #define JSON_HEDLEY_NEVER_INLINE __attribute((noinline)) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) - #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) -#else - #define JSON_HEDLEY_NEVER_INLINE -#endif - -#if defined(JSON_HEDLEY_PRIVATE) - #undef JSON_HEDLEY_PRIVATE -#endif -#if defined(JSON_HEDLEY_PUBLIC) - #undef JSON_HEDLEY_PUBLIC -#endif -#if defined(JSON_HEDLEY_IMPORT) - #undef JSON_HEDLEY_IMPORT -#endif -#if defined(_WIN32) || defined(__CYGWIN__) -# define JSON_HEDLEY_PRIVATE -# define JSON_HEDLEY_PUBLIC __declspec(dllexport) -# define JSON_HEDLEY_IMPORT __declspec(dllimport) -#else -# if \ - JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ - ( \ - defined(__TI_EABI__) && \ - ( \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) \ - ) \ - ) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) -# define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden"))) -# define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default"))) -# else -# define JSON_HEDLEY_PRIVATE -# define JSON_HEDLEY_PUBLIC -# endif -# define JSON_HEDLEY_IMPORT extern -#endif - -#if defined(JSON_HEDLEY_NO_THROW) - #undef JSON_HEDLEY_NO_THROW -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__)) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) - #define JSON_HEDLEY_NO_THROW __declspec(nothrow) -#else - #define JSON_HEDLEY_NO_THROW -#endif - -#if defined(JSON_HEDLEY_FALL_THROUGH) - #undef JSON_HEDLEY_FALL_THROUGH -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(fallthrough) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(7,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__)) -#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(clang,fallthrough) - #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[clang::fallthrough]]) -#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough) - #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[fallthrough]]) -#elif defined(__fallthrough) /* SAL */ - #define JSON_HEDLEY_FALL_THROUGH __fallthrough -#else - #define JSON_HEDLEY_FALL_THROUGH -#endif - -#if defined(JSON_HEDLEY_RETURNS_NON_NULL) - #undef JSON_HEDLEY_RETURNS_NON_NULL -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__)) -#elif defined(_Ret_notnull_) /* SAL */ - #define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_ -#else - #define JSON_HEDLEY_RETURNS_NON_NULL -#endif - -#if defined(JSON_HEDLEY_ARRAY_PARAM) - #undef JSON_HEDLEY_ARRAY_PARAM -#endif -#if \ - defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \ - !defined(__STDC_NO_VLA__) && \ - !defined(__cplusplus) && \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_TINYC_VERSION) - #define JSON_HEDLEY_ARRAY_PARAM(name) (name) -#else - #define JSON_HEDLEY_ARRAY_PARAM(name) -#endif - -#if defined(JSON_HEDLEY_IS_CONSTANT) - #undef JSON_HEDLEY_IS_CONSTANT -#endif -#if defined(JSON_HEDLEY_REQUIRE_CONSTEXPR) - #undef JSON_HEDLEY_REQUIRE_CONSTEXPR -#endif -/* JSON_HEDLEY_IS_CONSTEXPR_ is for - HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ -#if defined(JSON_HEDLEY_IS_CONSTEXPR_) - #undef JSON_HEDLEY_IS_CONSTEXPR_ -#endif -#if \ - JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr) -#endif -#if !defined(__cplusplus) -# if \ - JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,24) -#if defined(__INTPTR_TYPE__) - #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*) -#else - #include - #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*) -#endif -# elif \ - ( \ - defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \ - !defined(JSON_HEDLEY_SUNPRO_VERSION) && \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_IAR_VERSION)) || \ - (JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,3,0) -#if defined(__INTPTR_TYPE__) - #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0) -#else - #include - #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0) -#endif -# elif \ - defined(JSON_HEDLEY_GCC_VERSION) || \ - defined(JSON_HEDLEY_INTEL_VERSION) || \ - defined(JSON_HEDLEY_TINYC_VERSION) || \ - defined(JSON_HEDLEY_TI_ARMCL_VERSION) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(18,12,0) || \ - defined(JSON_HEDLEY_TI_CL2000_VERSION) || \ - defined(JSON_HEDLEY_TI_CL6X_VERSION) || \ - defined(JSON_HEDLEY_TI_CL7X_VERSION) || \ - defined(JSON_HEDLEY_TI_CLPRU_VERSION) || \ - defined(__clang__) -# define JSON_HEDLEY_IS_CONSTEXPR_(expr) ( \ - sizeof(void) != \ - sizeof(*( \ - 1 ? \ - ((void*) ((expr) * 0L) ) : \ -((struct { char v[sizeof(void) * 2]; } *) 1) \ - ) \ - ) \ - ) -# endif -#endif -#if defined(JSON_HEDLEY_IS_CONSTEXPR_) - #if !defined(JSON_HEDLEY_IS_CONSTANT) - #define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY_IS_CONSTEXPR_(expr) - #endif - #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY_IS_CONSTEXPR_(expr) ? (expr) : (-1)) -#else - #if !defined(JSON_HEDLEY_IS_CONSTANT) - #define JSON_HEDLEY_IS_CONSTANT(expr) (0) - #endif - #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr) -#endif - -#if defined(JSON_HEDLEY_BEGIN_C_DECLS) - #undef JSON_HEDLEY_BEGIN_C_DECLS -#endif -#if defined(JSON_HEDLEY_END_C_DECLS) - #undef JSON_HEDLEY_END_C_DECLS -#endif -#if defined(JSON_HEDLEY_C_DECL) - #undef JSON_HEDLEY_C_DECL -#endif -#if defined(__cplusplus) - #define JSON_HEDLEY_BEGIN_C_DECLS extern "C" { - #define JSON_HEDLEY_END_C_DECLS } - #define JSON_HEDLEY_C_DECL extern "C" -#else - #define JSON_HEDLEY_BEGIN_C_DECLS - #define JSON_HEDLEY_END_C_DECLS - #define JSON_HEDLEY_C_DECL -#endif - -#if defined(JSON_HEDLEY_STATIC_ASSERT) - #undef JSON_HEDLEY_STATIC_ASSERT -#endif -#if \ - !defined(__cplusplus) && ( \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \ - (JSON_HEDLEY_HAS_FEATURE(c_static_assert) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(6,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - defined(_Static_assert) \ - ) -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message) -#elif \ - (defined(__cplusplus) && (__cplusplus >= 201103L)) || \ - JSON_HEDLEY_MSVC_VERSION_CHECK(16,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message)) -#else -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) -#endif - -#if defined(JSON_HEDLEY_NULL) - #undef JSON_HEDLEY_NULL -#endif -#if defined(__cplusplus) - #if __cplusplus >= 201103L - #define JSON_HEDLEY_NULL JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(nullptr) - #elif defined(NULL) - #define JSON_HEDLEY_NULL NULL - #else - #define JSON_HEDLEY_NULL JSON_HEDLEY_STATIC_CAST(void*, 0) - #endif -#elif defined(NULL) - #define JSON_HEDLEY_NULL NULL -#else - #define JSON_HEDLEY_NULL ((void*) 0) -#endif - -#if defined(JSON_HEDLEY_MESSAGE) - #undef JSON_HEDLEY_MESSAGE -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") -# define JSON_HEDLEY_MESSAGE(msg) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ - JSON_HEDLEY_PRAGMA(message msg) \ - JSON_HEDLEY_DIAGNOSTIC_POP -#elif \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg) -#elif JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg) -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) -#else -# define JSON_HEDLEY_MESSAGE(msg) -#endif - -#if defined(JSON_HEDLEY_WARNING) - #undef JSON_HEDLEY_WARNING -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") -# define JSON_HEDLEY_WARNING(msg) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ - JSON_HEDLEY_PRAGMA(clang warning msg) \ - JSON_HEDLEY_DIAGNOSTIC_POP -#elif \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,8,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) -# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) -# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg)) -#else -# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg) -#endif - -#if defined(JSON_HEDLEY_REQUIRE) - #undef JSON_HEDLEY_REQUIRE -#endif -#if defined(JSON_HEDLEY_REQUIRE_MSG) - #undef JSON_HEDLEY_REQUIRE_MSG -#endif -#if JSON_HEDLEY_HAS_ATTRIBUTE(diagnose_if) -# if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat") -# define JSON_HEDLEY_REQUIRE(expr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ - __attribute__((diagnose_if(!(expr), #expr, "error"))) \ - JSON_HEDLEY_DIAGNOSTIC_POP -# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ - __attribute__((diagnose_if(!(expr), msg, "error"))) \ - JSON_HEDLEY_DIAGNOSTIC_POP -# else -# define JSON_HEDLEY_REQUIRE(expr) __attribute__((diagnose_if(!(expr), #expr, "error"))) -# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) __attribute__((diagnose_if(!(expr), msg, "error"))) -# endif -#else -# define JSON_HEDLEY_REQUIRE(expr) -# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) -#endif - -#if defined(JSON_HEDLEY_FLAGS) - #undef JSON_HEDLEY_FLAGS -#endif -#if JSON_HEDLEY_HAS_ATTRIBUTE(flag_enum) && (!defined(__cplusplus) || JSON_HEDLEY_HAS_WARNING("-Wbitfield-enum-conversion")) - #define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__)) -#else - #define JSON_HEDLEY_FLAGS -#endif - -#if defined(JSON_HEDLEY_FLAGS_CAST) - #undef JSON_HEDLEY_FLAGS_CAST -#endif -#if JSON_HEDLEY_INTEL_VERSION_CHECK(19,0,0) -# define JSON_HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("warning(disable:188)") \ - ((T) (expr)); \ - JSON_HEDLEY_DIAGNOSTIC_POP \ - })) -#else -# define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr) -#endif - -#if defined(JSON_HEDLEY_EMPTY_BASES) - #undef JSON_HEDLEY_EMPTY_BASES -#endif -#if \ - (JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,23918) && !JSON_HEDLEY_MSVC_VERSION_CHECK(20,0,0)) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_EMPTY_BASES __declspec(empty_bases) -#else - #define JSON_HEDLEY_EMPTY_BASES -#endif - -/* Remaining macros are deprecated. */ - -#if defined(JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK) - #undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK -#endif -#if defined(__clang__) - #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0) -#else - #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_CLANG_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE -#endif -#define JSON_HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) - -#if defined(JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE -#endif -#define JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) - -#if defined(JSON_HEDLEY_CLANG_HAS_BUILTIN) - #undef JSON_HEDLEY_CLANG_HAS_BUILTIN -#endif -#define JSON_HEDLEY_CLANG_HAS_BUILTIN(builtin) JSON_HEDLEY_HAS_BUILTIN(builtin) - -#if defined(JSON_HEDLEY_CLANG_HAS_FEATURE) - #undef JSON_HEDLEY_CLANG_HAS_FEATURE -#endif -#define JSON_HEDLEY_CLANG_HAS_FEATURE(feature) JSON_HEDLEY_HAS_FEATURE(feature) - -#if defined(JSON_HEDLEY_CLANG_HAS_EXTENSION) - #undef JSON_HEDLEY_CLANG_HAS_EXTENSION -#endif -#define JSON_HEDLEY_CLANG_HAS_EXTENSION(extension) JSON_HEDLEY_HAS_EXTENSION(extension) - -#if defined(JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE -#endif -#define JSON_HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) - -#if defined(JSON_HEDLEY_CLANG_HAS_WARNING) - #undef JSON_HEDLEY_CLANG_HAS_WARNING -#endif -#define JSON_HEDLEY_CLANG_HAS_WARNING(warning) JSON_HEDLEY_HAS_WARNING(warning) - -#endif /* !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < X) */ - - -// This file contains all internal macro definitions -// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them - -// exclude unsupported compilers -#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) - #if defined(__clang__) - #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 - #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) - #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 - #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #endif -#endif - -// C++ language standard detection -#if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) - #define JSON_HAS_CPP_20 - #define JSON_HAS_CPP_17 - #define JSON_HAS_CPP_14 -#elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 - #define JSON_HAS_CPP_17 - #define JSON_HAS_CPP_14 -#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) - #define JSON_HAS_CPP_14 -#endif - -// disable float-equal warnings on GCC/clang -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wfloat-equal" -#endif - -// disable documentation warnings on clang -#if defined(__clang__) - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wdocumentation" -#endif - -// allow to disable exceptions -#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) - #define JSON_THROW(exception) throw exception - #define JSON_TRY try - #define JSON_CATCH(exception) catch(exception) - #define JSON_INTERNAL_CATCH(exception) catch(exception) -#else - #include - #define JSON_THROW(exception) std::abort() - #define JSON_TRY if(true) - #define JSON_CATCH(exception) if(false) - #define JSON_INTERNAL_CATCH(exception) if(false) -#endif - -// override exception macros -#if defined(JSON_THROW_USER) - #undef JSON_THROW - #define JSON_THROW JSON_THROW_USER -#endif -#if defined(JSON_TRY_USER) - #undef JSON_TRY - #define JSON_TRY JSON_TRY_USER -#endif -#if defined(JSON_CATCH_USER) - #undef JSON_CATCH - #define JSON_CATCH JSON_CATCH_USER - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_CATCH_USER -#endif -#if defined(JSON_INTERNAL_CATCH_USER) - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER -#endif - -// allow to override assert -#if !defined(JSON_ASSERT) - #include // assert - #define JSON_ASSERT(x) assert(x) -#endif - -// allow to access some private functions (needed by the test suite) -#if defined(JSON_TESTS_PRIVATE) - #define JSON_PRIVATE_UNLESS_TESTED public -#else - #define JSON_PRIVATE_UNLESS_TESTED private -#endif - -/*! -@brief macro to briefly define a mapping between an enum and JSON -@def NLOHMANN_JSON_SERIALIZE_ENUM -@since version 3.4.0 -*/ -#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ - template \ - inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [e](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.first == e; \ - }); \ - j = ((it != std::end(m)) ? it : std::begin(m))->second; \ - } \ - template \ - inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [&j](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.second == j; \ - }); \ - e = ((it != std::end(m)) ? it : std::begin(m))->first; \ - } - -// Ugly macros to avoid uglier copy-paste when specializing basic_json. They -// may be removed in the future once the class is split. - -#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ - template class ObjectType, \ - template class ArrayType, \ - class StringType, class BooleanType, class NumberIntegerType, \ - class NumberUnsignedType, class NumberFloatType, \ - template class AllocatorType, \ - template class JSONSerializer, \ - class BinaryType> - -#define NLOHMANN_BASIC_JSON_TPL \ - basic_json - -// Macros to simplify conversion from/to types - -#define NLOHMANN_JSON_EXPAND( x ) x -#define NLOHMANN_JSON_GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, NAME,...) NAME -#define NLOHMANN_JSON_PASTE(...) NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_GET_MACRO(__VA_ARGS__, \ - NLOHMANN_JSON_PASTE64, \ - NLOHMANN_JSON_PASTE63, \ - NLOHMANN_JSON_PASTE62, \ - NLOHMANN_JSON_PASTE61, \ - NLOHMANN_JSON_PASTE60, \ - NLOHMANN_JSON_PASTE59, \ - NLOHMANN_JSON_PASTE58, \ - NLOHMANN_JSON_PASTE57, \ - NLOHMANN_JSON_PASTE56, \ - NLOHMANN_JSON_PASTE55, \ - NLOHMANN_JSON_PASTE54, \ - NLOHMANN_JSON_PASTE53, \ - NLOHMANN_JSON_PASTE52, \ - NLOHMANN_JSON_PASTE51, \ - NLOHMANN_JSON_PASTE50, \ - NLOHMANN_JSON_PASTE49, \ - NLOHMANN_JSON_PASTE48, \ - NLOHMANN_JSON_PASTE47, \ - NLOHMANN_JSON_PASTE46, \ - NLOHMANN_JSON_PASTE45, \ - NLOHMANN_JSON_PASTE44, \ - NLOHMANN_JSON_PASTE43, \ - NLOHMANN_JSON_PASTE42, \ - NLOHMANN_JSON_PASTE41, \ - NLOHMANN_JSON_PASTE40, \ - NLOHMANN_JSON_PASTE39, \ - NLOHMANN_JSON_PASTE38, \ - NLOHMANN_JSON_PASTE37, \ - NLOHMANN_JSON_PASTE36, \ - NLOHMANN_JSON_PASTE35, \ - NLOHMANN_JSON_PASTE34, \ - NLOHMANN_JSON_PASTE33, \ - NLOHMANN_JSON_PASTE32, \ - NLOHMANN_JSON_PASTE31, \ - NLOHMANN_JSON_PASTE30, \ - NLOHMANN_JSON_PASTE29, \ - NLOHMANN_JSON_PASTE28, \ - NLOHMANN_JSON_PASTE27, \ - NLOHMANN_JSON_PASTE26, \ - NLOHMANN_JSON_PASTE25, \ - NLOHMANN_JSON_PASTE24, \ - NLOHMANN_JSON_PASTE23, \ - NLOHMANN_JSON_PASTE22, \ - NLOHMANN_JSON_PASTE21, \ - NLOHMANN_JSON_PASTE20, \ - NLOHMANN_JSON_PASTE19, \ - NLOHMANN_JSON_PASTE18, \ - NLOHMANN_JSON_PASTE17, \ - NLOHMANN_JSON_PASTE16, \ - NLOHMANN_JSON_PASTE15, \ - NLOHMANN_JSON_PASTE14, \ - NLOHMANN_JSON_PASTE13, \ - NLOHMANN_JSON_PASTE12, \ - NLOHMANN_JSON_PASTE11, \ - NLOHMANN_JSON_PASTE10, \ - NLOHMANN_JSON_PASTE9, \ - NLOHMANN_JSON_PASTE8, \ - NLOHMANN_JSON_PASTE7, \ - NLOHMANN_JSON_PASTE6, \ - NLOHMANN_JSON_PASTE5, \ - NLOHMANN_JSON_PASTE4, \ - NLOHMANN_JSON_PASTE3, \ - NLOHMANN_JSON_PASTE2, \ - NLOHMANN_JSON_PASTE1)(__VA_ARGS__)) -#define NLOHMANN_JSON_PASTE2(func, v1) func(v1) -#define NLOHMANN_JSON_PASTE3(func, v1, v2) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE2(func, v2) -#define NLOHMANN_JSON_PASTE4(func, v1, v2, v3) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE3(func, v2, v3) -#define NLOHMANN_JSON_PASTE5(func, v1, v2, v3, v4) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE4(func, v2, v3, v4) -#define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5) -#define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6) -#define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7) -#define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8) -#define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v6, v7, v8, v9) -#define NLOHMANN_JSON_PASTE11(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE10(func, v2, v3, v4, v5, v6, v7, v8, v9, v10) -#define NLOHMANN_JSON_PASTE12(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE11(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) -#define NLOHMANN_JSON_PASTE13(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE12(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) -#define NLOHMANN_JSON_PASTE14(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE13(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) -#define NLOHMANN_JSON_PASTE15(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE14(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) -#define NLOHMANN_JSON_PASTE16(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE15(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) -#define NLOHMANN_JSON_PASTE17(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE16(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) -#define NLOHMANN_JSON_PASTE18(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE17(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) -#define NLOHMANN_JSON_PASTE19(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE18(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) -#define NLOHMANN_JSON_PASTE20(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE19(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) -#define NLOHMANN_JSON_PASTE21(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE20(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) -#define NLOHMANN_JSON_PASTE22(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE21(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) -#define NLOHMANN_JSON_PASTE23(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE22(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) -#define NLOHMANN_JSON_PASTE24(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE23(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) -#define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) -#define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) -#define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) -#define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) -#define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) -#define NLOHMANN_JSON_PASTE30(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE29(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) -#define NLOHMANN_JSON_PASTE31(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE30(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) -#define NLOHMANN_JSON_PASTE32(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE31(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) -#define NLOHMANN_JSON_PASTE33(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE32(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) -#define NLOHMANN_JSON_PASTE34(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE33(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) -#define NLOHMANN_JSON_PASTE35(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE34(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) -#define NLOHMANN_JSON_PASTE36(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE35(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) -#define NLOHMANN_JSON_PASTE37(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE36(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) -#define NLOHMANN_JSON_PASTE38(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE37(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) -#define NLOHMANN_JSON_PASTE39(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE38(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) -#define NLOHMANN_JSON_PASTE40(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE39(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) -#define NLOHMANN_JSON_PASTE41(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE40(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) -#define NLOHMANN_JSON_PASTE42(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE41(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) -#define NLOHMANN_JSON_PASTE43(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE42(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) -#define NLOHMANN_JSON_PASTE44(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE43(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) -#define NLOHMANN_JSON_PASTE45(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE44(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) -#define NLOHMANN_JSON_PASTE46(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE45(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) -#define NLOHMANN_JSON_PASTE47(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE46(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) -#define NLOHMANN_JSON_PASTE48(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE47(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) -#define NLOHMANN_JSON_PASTE49(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE48(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) -#define NLOHMANN_JSON_PASTE50(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE49(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) -#define NLOHMANN_JSON_PASTE51(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE50(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) -#define NLOHMANN_JSON_PASTE52(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE51(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) -#define NLOHMANN_JSON_PASTE53(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE52(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) -#define NLOHMANN_JSON_PASTE54(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE53(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) -#define NLOHMANN_JSON_PASTE55(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE54(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) -#define NLOHMANN_JSON_PASTE56(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE55(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) -#define NLOHMANN_JSON_PASTE57(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE56(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) -#define NLOHMANN_JSON_PASTE58(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE57(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) -#define NLOHMANN_JSON_PASTE59(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE58(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) -#define NLOHMANN_JSON_PASTE60(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE59(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) -#define NLOHMANN_JSON_PASTE61(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE60(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) -#define NLOHMANN_JSON_PASTE62(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE61(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) -#define NLOHMANN_JSON_PASTE63(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE62(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) -#define NLOHMANN_JSON_PASTE64(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE63(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) - -#define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1; -#define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1); - -/*! -@brief macro -@def NLOHMANN_DEFINE_TYPE_INTRUSIVE -@since version 3.9.0 -*/ -#define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } - -/*! -@brief macro -@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE -@since version 3.9.0 -*/ -#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } - -#ifndef JSON_USE_IMPLICIT_CONVERSIONS - #define JSON_USE_IMPLICIT_CONVERSIONS 1 -#endif - -#if JSON_USE_IMPLICIT_CONVERSIONS - #define JSON_EXPLICIT -#else - #define JSON_EXPLICIT explicit -#endif - - -namespace nlohmann -{ -namespace detail -{ - -/*! -@brief replace all occurrences of a substring by another string - -@param[in,out] s the string to manipulate; changed so that all - occurrences of @a f are replaced with @a t -@param[in] f the substring to replace with @a t -@param[in] t the string to replace @a f - -@pre The search string @a f must not be empty. **This precondition is -enforced with an assertion.** - -@since version 2.0.0 -*/ -inline void replace_substring(std::string& s, const std::string& f, - const std::string& t) -{ - JSON_ASSERT(!f.empty()); - for (auto pos = s.find(f); // find first occurrence of f - pos != std::string::npos; // make sure f was found - s.replace(pos, f.size(), t), // replace with t, and - pos = s.find(f, pos + t.size())) // find next occurrence of f - {} -} - -/*! - * @brief string escaping as described in RFC 6901 (Sect. 4) - * @param[in] s string to escape - * @return escaped string - * - * Note the order of escaping "~" to "~0" and "/" to "~1" is important. - */ -inline std::string escape(std::string s) -{ - replace_substring(s, "~", "~0"); - replace_substring(s, "/", "~1"); - return s; -} - -/*! - * @brief string unescaping as described in RFC 6901 (Sect. 4) - * @param[in] s string to unescape - * @return unescaped string - * - * Note the order of escaping "~1" to "/" and "~0" to "~" is important. - */ -static void unescape(std::string& s) -{ - replace_substring(s, "~1", "/"); - replace_substring(s, "~0", "~"); -} - -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // size_t - -namespace nlohmann -{ -namespace detail -{ -/// struct to capture the start position of the current token -struct position_t -{ - /// the total number of characters read - std::size_t chars_read_total = 0; - /// the number of characters read in the current line - std::size_t chars_read_current_line = 0; - /// the number of lines read - std::size_t lines_read = 0; - - /// conversion to size_t to preserve SAX interface - constexpr operator size_t() const - { - return chars_read_total; - } -}; - -} // namespace detail -} // namespace nlohmann - -// #include - - -namespace nlohmann -{ -namespace detail -{ -//////////////// -// exceptions // -//////////////// - -/*! -@brief general exception of the @ref basic_json class - -This class is an extension of `std::exception` objects with a member @a id for -exception ids. It is used as the base class for all exceptions thrown by the -@ref basic_json class. This class can hence be used as "wildcard" to catch -exceptions. - -Subclasses: -- @ref parse_error for exceptions indicating a parse error -- @ref invalid_iterator for exceptions indicating errors with iterators -- @ref type_error for exceptions indicating executing a member function with - a wrong type -- @ref out_of_range for exceptions indicating access out of the defined range -- @ref other_error for exceptions indicating other library errors - -@internal -@note To have nothrow-copy-constructible exceptions, we internally use - `std::runtime_error` which can cope with arbitrary-length error messages. - Intermediate strings are built with static functions and then passed to - the actual constructor. -@endinternal - -@liveexample{The following code shows how arbitrary library exceptions can be -caught.,exception} - -@since version 3.0.0 -*/ -class exception : public std::exception -{ - public: - /// returns the explanatory string - JSON_HEDLEY_RETURNS_NON_NULL - const char* what() const noexcept override - { - return m.what(); - } - - /// the id of the exception - const int id; - - protected: - JSON_HEDLEY_NON_NULL(3) - exception(int id_, const char* what_arg) : id(id_), m(what_arg) {} - - static std::string name(const std::string& ename, int id_) - { - return "[json.exception." + ename + "." + std::to_string(id_) + "] "; - } - - template - static std::string diagnostics(const BasicJsonType& leaf_element) - { -#if JSON_DIAGNOSTICS - std::vector tokens; - for (const auto* current = &leaf_element; current->m_parent != nullptr; current = current->m_parent) - { - switch (current->m_parent->type()) - { - case value_t::array: - { - for (std::size_t i = 0; i < current->m_parent->m_value.array->size(); ++i) - { - if (¤t->m_parent->m_value.array->operator[](i) == current) - { - tokens.emplace_back(std::to_string(i)); - break; - } - } - break; - } - - case value_t::object: - { - for (const auto& element : *current->m_parent->m_value.object) - { - if (&element.second == current) - { - tokens.emplace_back(element.first.c_str()); - break; - } - } - break; - } - - default: // LCOV_EXCL_LINE - break; // LCOV_EXCL_LINE - } - } - - if (tokens.empty()) - { - return ""; - } - - return "(" + std::accumulate(tokens.rbegin(), tokens.rend(), std::string{}, - [](const std::string & a, const std::string & b) - { - return a + "/" + detail::escape(b); - }) + ") "; -#else - return ""; -#endif - } - - private: - /// an exception object as storage for error messages - std::runtime_error m; -}; - -/*! -@brief exception indicating a parse error - -This exception is thrown by the library when a parse error occurs. Parse errors -can occur during the deserialization of JSON text, CBOR, MessagePack, as well -as when using JSON Patch. - -Member @a byte holds the byte index of the last read character in the input -file. - -Exceptions have ids 1xx. - -name / id | example message | description ------------------------------- | --------------- | ------------------------- -json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position. -json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point. -json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid. -json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects. -json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors. -json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`. -json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character. -json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences. -json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number. -json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read. -json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read. -json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read. -json.exception.parse_error.114 | parse error: Unsupported BSON record type 0x0F | The parsing of the corresponding BSON record type is not implemented (yet). -json.exception.parse_error.115 | parse error at byte 5: syntax error while parsing UBJSON high-precision number: invalid number text: 1A | A UBJSON high-precision number could not be parsed. - -@note For an input with n bytes, 1 is the index of the first character and n+1 - is the index of the terminating null byte or the end of file. This also - holds true when reading a byte vector (CBOR or MessagePack). - -@liveexample{The following code shows how a `parse_error` exception can be -caught.,parse_error} - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref invalid_iterator for exceptions indicating errors with iterators -@sa - @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa - @ref out_of_range for exceptions indicating access out of the defined range -@sa - @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class parse_error : public exception -{ - public: - /*! - @brief create a parse error exception - @param[in] id_ the id of the exception - @param[in] pos the position where the error occurred (or with - chars_read_total=0 if the position cannot be - determined) - @param[in] what_arg the explanatory string - @return parse_error object - */ - template - static parse_error create(int id_, const position_t& pos, const std::string& what_arg, const BasicJsonType& context) - { - std::string w = exception::name("parse_error", id_) + "parse error" + - position_string(pos) + ": " + exception::diagnostics(context) + what_arg; - return parse_error(id_, pos.chars_read_total, w.c_str()); - } - - template - static parse_error create(int id_, std::size_t byte_, const std::string& what_arg, const BasicJsonType& context) - { - std::string w = exception::name("parse_error", id_) + "parse error" + - (byte_ != 0 ? (" at byte " + std::to_string(byte_)) : "") + - ": " + exception::diagnostics(context) + what_arg; - return parse_error(id_, byte_, w.c_str()); - } - - /*! - @brief byte index of the parse error - - The byte index of the last read character in the input file. - - @note For an input with n bytes, 1 is the index of the first character and - n+1 is the index of the terminating null byte or the end of file. - This also holds true when reading a byte vector (CBOR or MessagePack). - */ - const std::size_t byte; - - private: - parse_error(int id_, std::size_t byte_, const char* what_arg) - : exception(id_, what_arg), byte(byte_) {} - - static std::string position_string(const position_t& pos) - { - return " at line " + std::to_string(pos.lines_read + 1) + - ", column " + std::to_string(pos.chars_read_current_line); - } -}; - -/*! -@brief exception indicating errors with iterators - -This exception is thrown if iterators passed to a library function do not match -the expected semantics. - -Exceptions have ids 2xx. - -name / id | example message | description ------------------------------------ | --------------- | ------------------------- -json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. -json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion. -json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from. -json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid. -json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid. -json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range. -json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key. -json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. -json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered. -json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid. -json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to. -json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container. -json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered. -json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin(). - -@liveexample{The following code shows how an `invalid_iterator` exception can be -caught.,invalid_iterator} - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref parse_error for exceptions indicating a parse error -@sa - @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa - @ref out_of_range for exceptions indicating access out of the defined range -@sa - @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class invalid_iterator : public exception -{ - public: - template - static invalid_iterator create(int id_, const std::string& what_arg, const BasicJsonType& context) - { - std::string w = exception::name("invalid_iterator", id_) + exception::diagnostics(context) + what_arg; - return invalid_iterator(id_, w.c_str()); - } - - private: - JSON_HEDLEY_NON_NULL(3) - invalid_iterator(int id_, const char* what_arg) - : exception(id_, what_arg) {} -}; - -/*! -@brief exception indicating executing a member function with a wrong type - -This exception is thrown in case of a type error; that is, a library function is -executed on a JSON value whose type does not match the expected semantics. - -Exceptions have ids 3xx. - -name / id | example message | description ------------------------------ | --------------- | ------------------------- -json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead. -json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types. -json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t &. -json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types. -json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types. -json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types. -json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types. -json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types. -json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types. -json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types. -json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types. -json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types. -json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined. -json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers. -json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive. -json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. | -json.exception.type_error.317 | JSON value cannot be serialized to requested format | The dynamic type of the object cannot be represented in the requested serialization format (e.g. a raw `true` or `null` JSON object cannot be serialized to BSON) | - -@liveexample{The following code shows how a `type_error` exception can be -caught.,type_error} - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref parse_error for exceptions indicating a parse error -@sa - @ref invalid_iterator for exceptions indicating errors with iterators -@sa - @ref out_of_range for exceptions indicating access out of the defined range -@sa - @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class type_error : public exception -{ - public: - template - static type_error create(int id_, const std::string& what_arg, const BasicJsonType& context) - { - std::string w = exception::name("type_error", id_) + exception::diagnostics(context) + what_arg; - return type_error(id_, w.c_str()); - } - - private: - JSON_HEDLEY_NON_NULL(3) - type_error(int id_, const char* what_arg) : exception(id_, what_arg) {} -}; - -/*! -@brief exception indicating access out of the defined range - -This exception is thrown in case a library function is called on an input -parameter that exceeds the expected range, for instance in case of array -indices or nonexisting object keys. - -Exceptions have ids 4xx. - -name / id | example message | description -------------------------------- | --------------- | ------------------------- -json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1. -json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it. -json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object. -json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved. -json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value. -json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF. -json.exception.out_of_range.407 | number overflow serializing '9223372036854775808' | UBJSON and BSON only support integer numbers up to 9223372036854775807. (until version 3.8.0) | -json.exception.out_of_range.408 | excessive array size: 8658170730974374167 | The size (following `#`) of an UBJSON array or object exceeds the maximal capacity. | -json.exception.out_of_range.409 | BSON key cannot contain code point U+0000 (at byte 2) | Key identifiers to be serialized to BSON cannot contain code point U+0000, since the key is stored as zero-terminated c-string | - -@liveexample{The following code shows how an `out_of_range` exception can be -caught.,out_of_range} - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref parse_error for exceptions indicating a parse error -@sa - @ref invalid_iterator for exceptions indicating errors with iterators -@sa - @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa - @ref other_error for exceptions indicating other library errors - -@since version 3.0.0 -*/ -class out_of_range : public exception -{ - public: - template - static out_of_range create(int id_, const std::string& what_arg, const BasicJsonType& context) - { - std::string w = exception::name("out_of_range", id_) + exception::diagnostics(context) + what_arg; - return out_of_range(id_, w.c_str()); - } - - private: - JSON_HEDLEY_NON_NULL(3) - out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {} -}; - -/*! -@brief exception indicating other library errors - -This exception is thrown in case of errors that cannot be classified with the -other exception types. - -Exceptions have ids 5xx. - -name / id | example message | description ------------------------------- | --------------- | ------------------------- -json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed. - -@sa - @ref exception for the base class of the library exceptions -@sa - @ref parse_error for exceptions indicating a parse error -@sa - @ref invalid_iterator for exceptions indicating errors with iterators -@sa - @ref type_error for exceptions indicating executing a member function with - a wrong type -@sa - @ref out_of_range for exceptions indicating access out of the defined range - -@liveexample{The following code shows how an `other_error` exception can be -caught.,other_error} - -@since version 3.0.0 -*/ -class other_error : public exception -{ - public: - template - static other_error create(int id_, const std::string& what_arg, const BasicJsonType& context) - { - std::string w = exception::name("other_error", id_) + exception::diagnostics(context) + what_arg; - return other_error(id_, w.c_str()); - } - - private: - JSON_HEDLEY_NON_NULL(3) - other_error(int id_, const char* what_arg) : exception(id_, what_arg) {} -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - - -#include // size_t -#include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type -#include // index_sequence, make_index_sequence, index_sequence_for - -// #include - - -namespace nlohmann -{ -namespace detail -{ - -template -using uncvref_t = typename std::remove_cv::type>::type; - -#ifdef JSON_HAS_CPP_14 - -// the following utilities are natively available in C++14 -using std::enable_if_t; -using std::index_sequence; -using std::make_index_sequence; -using std::index_sequence_for; - -#else - -// alias templates to reduce boilerplate -template -using enable_if_t = typename std::enable_if::type; - -// source: https://stackoverflow.com/a/32223343 -template -struct index_sequence -{ - using type = index_sequence; - using value_type = std::size_t; - static constexpr std::size_t size() noexcept - { - return sizeof...(Ints); - } -}; - -template -struct merge_and_renumber; - -template -struct merge_and_renumber, index_sequence> - : index_sequence < I1..., (sizeof...(I1) + I2)... > {}; - -template -struct make_index_sequence - : merge_and_renumber < typename make_index_sequence < N / 2 >::type, - typename make_index_sequence < N - N / 2 >::type > {}; - -template<> struct make_index_sequence<0> : index_sequence<> {}; -template<> struct make_index_sequence<1> : index_sequence<0> {}; - -template -using index_sequence_for = make_index_sequence; - -#endif - -// dispatch utility (taken from ranges-v3) -template struct priority_tag : priority_tag < N - 1 > {}; -template<> struct priority_tag<0> {}; - -// taken from ranges-v3 -template -struct static_const -{ - static constexpr T value{}; -}; - -template -constexpr T static_const::value; - -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // numeric_limits -#include // false_type, is_constructible, is_integral, is_same, true_type -#include // declval -#include // tuple - -// #include - - -#include // random_access_iterator_tag - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template struct make_void -{ - using type = void; -}; -template using void_t = typename make_void::type; -} // namespace detail -} // namespace nlohmann - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template -struct iterator_types {}; - -template -struct iterator_types < - It, - void_t> -{ - using difference_type = typename It::difference_type; - using value_type = typename It::value_type; - using pointer = typename It::pointer; - using reference = typename It::reference; - using iterator_category = typename It::iterator_category; -}; - -// This is required as some compilers implement std::iterator_traits in a way that -// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. -template -struct iterator_traits -{ -}; - -template -struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> - : iterator_types -{ -}; - -template -struct iterator_traits::value>> -{ - using iterator_category = std::random_access_iterator_tag; - using value_type = T; - using difference_type = ptrdiff_t; - using pointer = T*; - using reference = T&; -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - -// #include - - -#include - -// #include - - -// https://en.cppreference.com/w/cpp/experimental/is_detected -namespace nlohmann -{ -namespace detail -{ -struct nonesuch -{ - nonesuch() = delete; - ~nonesuch() = delete; - nonesuch(nonesuch const&) = delete; - nonesuch(nonesuch const&&) = delete; - void operator=(nonesuch const&) = delete; - void operator=(nonesuch&&) = delete; -}; - -template class Op, - class... Args> -struct detector -{ - using value_t = std::false_type; - using type = Default; -}; - -template class Op, class... Args> -struct detector>, Op, Args...> -{ - using value_t = std::true_type; - using type = Op; -}; - -template class Op, class... Args> -using is_detected = typename detector::value_t; - -template class Op, class... Args> -using detected_t = typename detector::type; - -template class Op, class... Args> -using detected_or = detector; - -template class Op, class... Args> -using detected_or_t = typename detected_or::type; - -template class Op, class... Args> -using is_detected_exact = std::is_same>; - -template class Op, class... Args> -using is_detected_convertible = - std::is_convertible, To>; -} // namespace detail -} // namespace nlohmann - -// #include -#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ -#define INCLUDE_NLOHMANN_JSON_FWD_HPP_ - -#include // int64_t, uint64_t -#include // map -#include // allocator -#include // string -#include // vector - -/*! -@brief namespace for Niels Lohmann -@see https://github.com/nlohmann -@since version 1.0.0 -*/ -namespace nlohmann -{ -/*! -@brief default JSONSerializer template argument - -This serializer ignores the template arguments and uses ADL -([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) -for serialization. -*/ -template -struct adl_serializer; - -template class ObjectType = - std::map, - template class ArrayType = std::vector, - class StringType = std::string, class BooleanType = bool, - class NumberIntegerType = std::int64_t, - class NumberUnsignedType = std::uint64_t, - class NumberFloatType = double, - template class AllocatorType = std::allocator, - template class JSONSerializer = - adl_serializer, - class BinaryType = std::vector> -class basic_json; - -/*! -@brief JSON Pointer - -A JSON pointer defines a string syntax for identifying a specific value -within a JSON document. It can be used with functions `at` and -`operator[]`. Furthermore, JSON pointers are the base for JSON patches. - -@sa [RFC 6901](https://tools.ietf.org/html/rfc6901) - -@since version 2.0.0 -*/ -template -class json_pointer; - -/*! -@brief default JSON class - -This type is the default specialization of the @ref basic_json class which -uses the standard template types. - -@since version 1.0.0 -*/ -using json = basic_json<>; - -template -struct ordered_map; - -/*! -@brief ordered JSON class - -This type preserves the insertion order of object keys. - -@since version 3.9.0 -*/ -using ordered_json = basic_json; - -} // namespace nlohmann - -#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ - - -namespace nlohmann -{ -/*! -@brief detail namespace with internal helper functions - -This namespace collects functions that should not be exposed, -implementations of some @ref basic_json methods, and meta-programming helpers. - -@since version 2.1.0 -*/ -namespace detail -{ -///////////// -// helpers // -///////////// - -// Note to maintainers: -// -// Every trait in this file expects a non CV-qualified type. -// The only exceptions are in the 'aliases for detected' section -// (i.e. those of the form: decltype(T::member_function(std::declval()))) -// -// In this case, T has to be properly CV-qualified to constraint the function arguments -// (e.g. to_json(BasicJsonType&, const T&)) - -template struct is_basic_json : std::false_type {}; - -NLOHMANN_BASIC_JSON_TPL_DECLARATION -struct is_basic_json : std::true_type {}; - -////////////////////// -// json_ref helpers // -////////////////////// - -template -class json_ref; - -template -struct is_json_ref : std::false_type {}; - -template -struct is_json_ref> : std::true_type {}; - -////////////////////////// -// aliases for detected // -////////////////////////// - -template -using mapped_type_t = typename T::mapped_type; - -template -using key_type_t = typename T::key_type; - -template -using value_type_t = typename T::value_type; - -template -using difference_type_t = typename T::difference_type; - -template -using pointer_t = typename T::pointer; - -template -using reference_t = typename T::reference; - -template -using iterator_category_t = typename T::iterator_category; - -template -using iterator_t = typename T::iterator; - -template -using to_json_function = decltype(T::to_json(std::declval()...)); - -template -using from_json_function = decltype(T::from_json(std::declval()...)); - -template -using get_template_function = decltype(std::declval().template get()); - -// trait checking if JSONSerializer::from_json(json const&, udt&) exists -template -struct has_from_json : std::false_type {}; - -// trait checking if j.get is valid -// use this trait instead of std::is_constructible or std::is_convertible, -// both rely on, or make use of implicit conversions, and thus fail when T -// has several constructors/operator= (see https://github.com/nlohmann/json/issues/958) -template -struct is_getable -{ - static constexpr bool value = is_detected::value; -}; - -template -struct has_from_json < BasicJsonType, T, - enable_if_t < !is_basic_json::value >> -{ - using serializer = typename BasicJsonType::template json_serializer; - - static constexpr bool value = - is_detected_exact::value; -}; - -// This trait checks if JSONSerializer::from_json(json const&) exists -// this overload is used for non-default-constructible user-defined-types -template -struct has_non_default_from_json : std::false_type {}; - -template -struct has_non_default_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> -{ - using serializer = typename BasicJsonType::template json_serializer; - - static constexpr bool value = - is_detected_exact::value; -}; - -// This trait checks if BasicJsonType::json_serializer::to_json exists -// Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion. -template -struct has_to_json : std::false_type {}; - -template -struct has_to_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> -{ - using serializer = typename BasicJsonType::template json_serializer; - - static constexpr bool value = - is_detected_exact::value; -}; - - -/////////////////// -// is_ functions // -/////////////////// - -template -struct is_iterator_traits : std::false_type {}; - -template -struct is_iterator_traits> -{ - private: - using traits = iterator_traits; - - public: - static constexpr auto value = - is_detected::value && - is_detected::value && - is_detected::value && - is_detected::value && - is_detected::value; -}; - -// The following implementation of is_complete_type is taken from -// https://blogs.msdn.microsoft.com/vcblog/2015/12/02/partial-support-for-expression-sfinae-in-vs-2015-update-1/ -// and is written by Xiang Fan who agreed to using it in this library. - -template -struct is_complete_type : std::false_type {}; - -template -struct is_complete_type : std::true_type {}; - -template -struct is_compatible_object_type_impl : std::false_type {}; - -template -struct is_compatible_object_type_impl < - BasicJsonType, CompatibleObjectType, - enable_if_t < is_detected::value&& - is_detected::value >> -{ - using object_t = typename BasicJsonType::object_t; - - // macOS's is_constructible does not play well with nonesuch... - static constexpr bool value = - std::is_constructible::value && - std::is_constructible::value; -}; - -template -struct is_compatible_object_type - : is_compatible_object_type_impl {}; - -template -struct is_constructible_object_type_impl : std::false_type {}; - -template -struct is_constructible_object_type_impl < - BasicJsonType, ConstructibleObjectType, - enable_if_t < is_detected::value&& - is_detected::value >> -{ - using object_t = typename BasicJsonType::object_t; - - static constexpr bool value = - (std::is_default_constructible::value && - (std::is_move_assignable::value || - std::is_copy_assignable::value) && - (std::is_constructible::value && - std::is_same < - typename object_t::mapped_type, - typename ConstructibleObjectType::mapped_type >::value)) || - (has_from_json::value || - has_non_default_from_json < - BasicJsonType, - typename ConstructibleObjectType::mapped_type >::value); -}; - -template -struct is_constructible_object_type - : is_constructible_object_type_impl {}; - -template -struct is_compatible_string_type_impl : std::false_type {}; - -template -struct is_compatible_string_type_impl < - BasicJsonType, CompatibleStringType, - enable_if_t::value >> -{ - static constexpr auto value = - std::is_constructible::value; -}; - -template -struct is_compatible_string_type - : is_compatible_string_type_impl {}; - -template -struct is_constructible_string_type_impl : std::false_type {}; - -template -struct is_constructible_string_type_impl < - BasicJsonType, ConstructibleStringType, - enable_if_t::value >> -{ - static constexpr auto value = - std::is_constructible::value; -}; - -template -struct is_constructible_string_type - : is_constructible_string_type_impl {}; - -template -struct is_compatible_array_type_impl : std::false_type {}; - -template -struct is_compatible_array_type_impl < - BasicJsonType, CompatibleArrayType, - enable_if_t < is_detected::value&& - is_detected::value&& -// This is needed because json_reverse_iterator has a ::iterator type... -// Therefore it is detected as a CompatibleArrayType. -// The real fix would be to have an Iterable concept. - !is_iterator_traits < - iterator_traits>::value >> -{ - static constexpr bool value = - std::is_constructible::value; -}; - -template -struct is_compatible_array_type - : is_compatible_array_type_impl {}; - -template -struct is_constructible_array_type_impl : std::false_type {}; - -template -struct is_constructible_array_type_impl < - BasicJsonType, ConstructibleArrayType, - enable_if_t::value >> - : std::true_type {}; - -template -struct is_constructible_array_type_impl < - BasicJsonType, ConstructibleArrayType, - enable_if_t < !std::is_same::value&& - std::is_default_constructible::value&& -(std::is_move_assignable::value || - std::is_copy_assignable::value)&& -is_detected::value&& -is_detected::value&& -is_complete_type < -detected_t>::value >> -{ - static constexpr bool value = - // This is needed because json_reverse_iterator has a ::iterator type, - // furthermore, std::back_insert_iterator (and other iterators) have a - // base class `iterator`... Therefore it is detected as a - // ConstructibleArrayType. The real fix would be to have an Iterable - // concept. - !is_iterator_traits>::value && - - (std::is_same::value || - has_from_json::value || - has_non_default_from_json < - BasicJsonType, typename ConstructibleArrayType::value_type >::value); -}; - -template -struct is_constructible_array_type - : is_constructible_array_type_impl {}; - -template -struct is_compatible_integer_type_impl : std::false_type {}; - -template -struct is_compatible_integer_type_impl < - RealIntegerType, CompatibleNumberIntegerType, - enable_if_t < std::is_integral::value&& - std::is_integral::value&& - !std::is_same::value >> -{ - // is there an assert somewhere on overflows? - using RealLimits = std::numeric_limits; - using CompatibleLimits = std::numeric_limits; - - static constexpr auto value = - std::is_constructible::value && - CompatibleLimits::is_integer && - RealLimits::is_signed == CompatibleLimits::is_signed; -}; - -template -struct is_compatible_integer_type - : is_compatible_integer_type_impl {}; - -template -struct is_compatible_type_impl: std::false_type {}; - -template -struct is_compatible_type_impl < - BasicJsonType, CompatibleType, - enable_if_t::value >> -{ - static constexpr bool value = - has_to_json::value; -}; - -template -struct is_compatible_type - : is_compatible_type_impl {}; - -// https://en.cppreference.com/w/cpp/types/conjunction -template struct conjunction : std::true_type { }; -template struct conjunction : B1 { }; -template -struct conjunction -: std::conditional, B1>::type {}; - -template -struct is_constructible_tuple : std::false_type {}; - -template -struct is_constructible_tuple> : conjunction...> {}; -} // namespace detail -} // namespace nlohmann - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template -void from_json(const BasicJsonType& j, typename std::nullptr_t& n) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_null())) - { - JSON_THROW(type_error::create(302, "type must be null, but is " + std::string(j.type_name()), j)); - } - n = nullptr; -} - -// overloads for basic_json template parameters -template < typename BasicJsonType, typename ArithmeticType, - enable_if_t < std::is_arithmetic::value&& - !std::is_same::value, - int > = 0 > -void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val) -{ - switch (static_cast(j)) - { - case value_t::number_unsigned: - { - val = static_cast(*j.template get_ptr()); - break; - } - case value_t::number_integer: - { - val = static_cast(*j.template get_ptr()); - break; - } - case value_t::number_float: - { - val = static_cast(*j.template get_ptr()); - break; - } - - default: - JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name()), j)); - } -} - -template -void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_boolean())) - { - JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(j.type_name()), j)); - } - b = *j.template get_ptr(); -} - -template -void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_string())) - { - JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name()), j)); - } - s = *j.template get_ptr(); -} - -template < - typename BasicJsonType, typename ConstructibleStringType, - enable_if_t < - is_constructible_string_type::value&& - !std::is_same::value, - int > = 0 > -void from_json(const BasicJsonType& j, ConstructibleStringType& s) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_string())) - { - JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name()), j)); - } - - s = *j.template get_ptr(); -} - -template -void from_json(const BasicJsonType& j, typename BasicJsonType::number_float_t& val) -{ - get_arithmetic_value(j, val); -} - -template -void from_json(const BasicJsonType& j, typename BasicJsonType::number_unsigned_t& val) -{ - get_arithmetic_value(j, val); -} - -template -void from_json(const BasicJsonType& j, typename BasicJsonType::number_integer_t& val) -{ - get_arithmetic_value(j, val); -} - -template::value, int> = 0> -void from_json(const BasicJsonType& j, EnumType& e) -{ - typename std::underlying_type::type val; - get_arithmetic_value(j, val); - e = static_cast(val); -} - -// forward_list doesn't have an insert method -template::value, int> = 0> -void from_json(const BasicJsonType& j, std::forward_list& l) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_array())) - { - JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); - } - l.clear(); - std::transform(j.rbegin(), j.rend(), - std::front_inserter(l), [](const BasicJsonType & i) - { - return i.template get(); - }); -} - -// valarray doesn't have an insert method -template::value, int> = 0> -void from_json(const BasicJsonType& j, std::valarray& l) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_array())) - { - JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); - } - l.resize(j.size()); - std::transform(j.begin(), j.end(), std::begin(l), - [](const BasicJsonType & elem) - { - return elem.template get(); - }); -} - -template -auto from_json(const BasicJsonType& j, T (&arr)[N]) --> decltype(j.template get(), void()) -{ - for (std::size_t i = 0; i < N; ++i) - { - arr[i] = j.at(i).template get(); - } -} - -template -void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/) -{ - arr = *j.template get_ptr(); -} - -template -auto from_json_array_impl(const BasicJsonType& j, std::array& arr, - priority_tag<2> /*unused*/) --> decltype(j.template get(), void()) -{ - for (std::size_t i = 0; i < N; ++i) - { - arr[i] = j.at(i).template get(); - } -} - -template -auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, priority_tag<1> /*unused*/) --> decltype( - arr.reserve(std::declval()), - j.template get(), - void()) -{ - using std::end; - - ConstructibleArrayType ret; - ret.reserve(j.size()); - std::transform(j.begin(), j.end(), - std::inserter(ret, end(ret)), [](const BasicJsonType & i) - { - // get() returns *this, this won't call a from_json - // method when value_type is BasicJsonType - return i.template get(); - }); - arr = std::move(ret); -} - -template -void from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, - priority_tag<0> /*unused*/) -{ - using std::end; - - ConstructibleArrayType ret; - std::transform( - j.begin(), j.end(), std::inserter(ret, end(ret)), - [](const BasicJsonType & i) - { - // get() returns *this, this won't call a from_json - // method when value_type is BasicJsonType - return i.template get(); - }); - arr = std::move(ret); -} - -template < typename BasicJsonType, typename ConstructibleArrayType, - enable_if_t < - is_constructible_array_type::value&& - !is_constructible_object_type::value&& - !is_constructible_string_type::value&& - !std::is_same::value&& - !is_basic_json::value, - int > = 0 > -auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr) --> decltype(from_json_array_impl(j, arr, priority_tag<3> {}), -j.template get(), -void()) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_array())) - { - JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); - } - - from_json_array_impl(j, arr, priority_tag<3> {}); -} - -template -void from_json(const BasicJsonType& j, typename BasicJsonType::binary_t& bin) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_binary())) - { - JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(j.type_name()), j)); - } - - bin = *j.template get_ptr(); -} - -template::value, int> = 0> -void from_json(const BasicJsonType& j, ConstructibleObjectType& obj) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_object())) - { - JSON_THROW(type_error::create(302, "type must be object, but is " + std::string(j.type_name()), j)); - } - - ConstructibleObjectType ret; - auto inner_object = j.template get_ptr(); - using value_type = typename ConstructibleObjectType::value_type; - std::transform( - inner_object->begin(), inner_object->end(), - std::inserter(ret, ret.begin()), - [](typename BasicJsonType::object_t::value_type const & p) - { - return value_type(p.first, p.second.template get()); - }); - obj = std::move(ret); -} - -// overload for arithmetic types, not chosen for basic_json template arguments -// (BooleanType, etc..); note: Is it really necessary to provide explicit -// overloads for boolean_t etc. in case of a custom BooleanType which is not -// an arithmetic type? -template < typename BasicJsonType, typename ArithmeticType, - enable_if_t < - std::is_arithmetic::value&& - !std::is_same::value&& - !std::is_same::value&& - !std::is_same::value&& - !std::is_same::value, - int > = 0 > -void from_json(const BasicJsonType& j, ArithmeticType& val) -{ - switch (static_cast(j)) - { - case value_t::number_unsigned: - { - val = static_cast(*j.template get_ptr()); - break; - } - case value_t::number_integer: - { - val = static_cast(*j.template get_ptr()); - break; - } - case value_t::number_float: - { - val = static_cast(*j.template get_ptr()); - break; - } - case value_t::boolean: - { - val = static_cast(*j.template get_ptr()); - break; - } - - default: - JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name()), j)); - } -} - -template -void from_json(const BasicJsonType& j, std::pair& p) -{ - p = {j.at(0).template get(), j.at(1).template get()}; -} - -template -void from_json_tuple_impl(const BasicJsonType& j, Tuple& t, index_sequence /*unused*/) -{ - t = std::make_tuple(j.at(Idx).template get::type>()...); -} - -template -void from_json(const BasicJsonType& j, std::tuple& t) -{ - from_json_tuple_impl(j, t, index_sequence_for {}); -} - -template < typename BasicJsonType, typename Key, typename Value, typename Compare, typename Allocator, - typename = enable_if_t < !std::is_constructible < - typename BasicJsonType::string_t, Key >::value >> -void from_json(const BasicJsonType& j, std::map& m) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_array())) - { - JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); - } - m.clear(); - for (const auto& p : j) - { - if (JSON_HEDLEY_UNLIKELY(!p.is_array())) - { - JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name()), j)); - } - m.emplace(p.at(0).template get(), p.at(1).template get()); - } -} - -template < typename BasicJsonType, typename Key, typename Value, typename Hash, typename KeyEqual, typename Allocator, - typename = enable_if_t < !std::is_constructible < - typename BasicJsonType::string_t, Key >::value >> -void from_json(const BasicJsonType& j, std::unordered_map& m) -{ - if (JSON_HEDLEY_UNLIKELY(!j.is_array())) - { - JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name()), j)); - } - m.clear(); - for (const auto& p : j) - { - if (JSON_HEDLEY_UNLIKELY(!p.is_array())) - { - JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name()), j)); - } - m.emplace(p.at(0).template get(), p.at(1).template get()); - } -} - -struct from_json_fn -{ - template - auto operator()(const BasicJsonType& j, T& val) const - noexcept(noexcept(from_json(j, val))) - -> decltype(from_json(j, val), void()) - { - return from_json(j, val); - } -}; -} // namespace detail - -/// namespace to hold default `from_json` function -/// to see why this is required: -/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html -namespace -{ -constexpr const auto& from_json = detail::static_const::value; -} // namespace -} // namespace nlohmann - -// #include - - -#include // copy -#include // begin, end -#include // string -#include // tuple, get -#include // is_same, is_constructible, is_floating_point, is_enum, underlying_type -#include // move, forward, declval, pair -#include // valarray -#include // vector - -// #include - - -#include // size_t -#include // input_iterator_tag -#include // string, to_string -#include // tuple_size, get, tuple_element - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template -void int_to_string( string_type& target, std::size_t value ) -{ - // For ADL - using std::to_string; - target = to_string(value); -} -template class iteration_proxy_value -{ - public: - using difference_type = std::ptrdiff_t; - using value_type = iteration_proxy_value; - using pointer = value_type * ; - using reference = value_type & ; - using iterator_category = std::input_iterator_tag; - using string_type = typename std::remove_cv< typename std::remove_reference().key() ) >::type >::type; - - private: - /// the iterator - IteratorType anchor; - /// an index for arrays (used to create key names) - std::size_t array_index = 0; - /// last stringified array index - mutable std::size_t array_index_last = 0; - /// a string representation of the array index - mutable string_type array_index_str = "0"; - /// an empty string (to return a reference for primitive values) - const string_type empty_str{}; - - public: - explicit iteration_proxy_value(IteratorType it) noexcept : anchor(it) {} - - /// dereference operator (needed for range-based for) - iteration_proxy_value& operator*() - { - return *this; - } - - /// increment operator (needed for range-based for) - iteration_proxy_value& operator++() - { - ++anchor; - ++array_index; - - return *this; - } - - /// equality operator (needed for InputIterator) - bool operator==(const iteration_proxy_value& o) const - { - return anchor == o.anchor; - } - - /// inequality operator (needed for range-based for) - bool operator!=(const iteration_proxy_value& o) const - { - return anchor != o.anchor; - } - - /// return key of the iterator - const string_type& key() const - { - JSON_ASSERT(anchor.m_object != nullptr); - - switch (anchor.m_object->type()) - { - // use integer array index as key - case value_t::array: - { - if (array_index != array_index_last) - { - int_to_string( array_index_str, array_index ); - array_index_last = array_index; - } - return array_index_str; - } - - // use key from the object - case value_t::object: - return anchor.key(); - - // use an empty key for all primitive types - default: - return empty_str; - } - } - - /// return value of the iterator - typename IteratorType::reference value() const - { - return anchor.value(); - } -}; - -/// proxy class for the items() function -template class iteration_proxy -{ - private: - /// the container to iterate - typename IteratorType::reference container; - - public: - /// construct iteration proxy from a container - explicit iteration_proxy(typename IteratorType::reference cont) noexcept - : container(cont) {} - - /// return iterator begin (needed for range-based for) - iteration_proxy_value begin() noexcept - { - return iteration_proxy_value(container.begin()); - } - - /// return iterator end (needed for range-based for) - iteration_proxy_value end() noexcept - { - return iteration_proxy_value(container.end()); - } -}; -// Structured Bindings Support -// For further reference see https://blog.tartanllama.xyz/structured-bindings/ -// And see https://github.com/nlohmann/json/pull/1391 -template = 0> -auto get(const nlohmann::detail::iteration_proxy_value& i) -> decltype(i.key()) -{ - return i.key(); -} -// Structured Bindings Support -// For further reference see https://blog.tartanllama.xyz/structured-bindings/ -// And see https://github.com/nlohmann/json/pull/1391 -template = 0> -auto get(const nlohmann::detail::iteration_proxy_value& i) -> decltype(i.value()) -{ - return i.value(); -} -} // namespace detail -} // namespace nlohmann - -// The Addition to the STD Namespace is required to add -// Structured Bindings Support to the iteration_proxy_value class -// For further reference see https://blog.tartanllama.xyz/structured-bindings/ -// And see https://github.com/nlohmann/json/pull/1391 -namespace std -{ -#if defined(__clang__) - // Fix: https://github.com/nlohmann/json/issues/1401 - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wmismatched-tags" -#endif -template -class tuple_size<::nlohmann::detail::iteration_proxy_value> - : public std::integral_constant {}; - -template -class tuple_element> -{ - public: - using type = decltype( - get(std::declval < - ::nlohmann::detail::iteration_proxy_value> ())); -}; -#if defined(__clang__) - #pragma clang diagnostic pop -#endif -} // namespace std - -// #include - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -////////////////// -// constructors // -////////////////// - -template struct external_constructor; - -template<> -struct external_constructor -{ - template - static void construct(BasicJsonType& j, typename BasicJsonType::boolean_t b) noexcept - { - j.m_type = value_t::boolean; - j.m_value = b; - j.assert_invariant(); - } -}; - -template<> -struct external_constructor -{ - template - static void construct(BasicJsonType& j, const typename BasicJsonType::string_t& s) - { - j.m_type = value_t::string; - j.m_value = s; - j.assert_invariant(); - } - - template - static void construct(BasicJsonType& j, typename BasicJsonType::string_t&& s) - { - j.m_type = value_t::string; - j.m_value = std::move(s); - j.assert_invariant(); - } - - template < typename BasicJsonType, typename CompatibleStringType, - enable_if_t < !std::is_same::value, - int > = 0 > - static void construct(BasicJsonType& j, const CompatibleStringType& str) - { - j.m_type = value_t::string; - j.m_value.string = j.template create(str); - j.assert_invariant(); - } -}; - -template<> -struct external_constructor -{ - template - static void construct(BasicJsonType& j, const typename BasicJsonType::binary_t& b) - { - j.m_type = value_t::binary; - typename BasicJsonType::binary_t value{b}; - j.m_value = value; - j.assert_invariant(); - } - - template - static void construct(BasicJsonType& j, typename BasicJsonType::binary_t&& b) - { - j.m_type = value_t::binary; - typename BasicJsonType::binary_t value{std::move(b)}; - j.m_value = value; - j.assert_invariant(); - } -}; - -template<> -struct external_constructor -{ - template - static void construct(BasicJsonType& j, typename BasicJsonType::number_float_t val) noexcept - { - j.m_type = value_t::number_float; - j.m_value = val; - j.assert_invariant(); - } -}; - -template<> -struct external_constructor -{ - template - static void construct(BasicJsonType& j, typename BasicJsonType::number_unsigned_t val) noexcept - { - j.m_type = value_t::number_unsigned; - j.m_value = val; - j.assert_invariant(); - } -}; - -template<> -struct external_constructor -{ - template - static void construct(BasicJsonType& j, typename BasicJsonType::number_integer_t val) noexcept - { - j.m_type = value_t::number_integer; - j.m_value = val; - j.assert_invariant(); - } -}; - -template<> -struct external_constructor -{ - template - static void construct(BasicJsonType& j, const typename BasicJsonType::array_t& arr) - { - j.m_type = value_t::array; - j.m_value = arr; - j.set_parents(); - j.assert_invariant(); - } - - template - static void construct(BasicJsonType& j, typename BasicJsonType::array_t&& arr) - { - j.m_type = value_t::array; - j.m_value = std::move(arr); - j.set_parents(); - j.assert_invariant(); - } - - template < typename BasicJsonType, typename CompatibleArrayType, - enable_if_t < !std::is_same::value, - int > = 0 > - static void construct(BasicJsonType& j, const CompatibleArrayType& arr) - { - using std::begin; - using std::end; - j.m_type = value_t::array; - j.m_value.array = j.template create(begin(arr), end(arr)); - j.set_parents(); - j.assert_invariant(); - } - - template - static void construct(BasicJsonType& j, const std::vector& arr) - { - j.m_type = value_t::array; - j.m_value = value_t::array; - j.m_value.array->reserve(arr.size()); - for (const bool x : arr) - { - j.m_value.array->push_back(x); - j.set_parent(j.m_value.array->back()); - } - j.assert_invariant(); - } - - template::value, int> = 0> - static void construct(BasicJsonType& j, const std::valarray& arr) - { - j.m_type = value_t::array; - j.m_value = value_t::array; - j.m_value.array->resize(arr.size()); - if (arr.size() > 0) - { - std::copy(std::begin(arr), std::end(arr), j.m_value.array->begin()); - } - j.set_parents(); - j.assert_invariant(); - } -}; - -template<> -struct external_constructor -{ - template - static void construct(BasicJsonType& j, const typename BasicJsonType::object_t& obj) - { - j.m_type = value_t::object; - j.m_value = obj; - j.set_parents(); - j.assert_invariant(); - } - - template - static void construct(BasicJsonType& j, typename BasicJsonType::object_t&& obj) - { - j.m_type = value_t::object; - j.m_value = std::move(obj); - j.set_parents(); - j.assert_invariant(); - } - - template < typename BasicJsonType, typename CompatibleObjectType, - enable_if_t < !std::is_same::value, int > = 0 > - static void construct(BasicJsonType& j, const CompatibleObjectType& obj) - { - using std::begin; - using std::end; - - j.m_type = value_t::object; - j.m_value.object = j.template create(begin(obj), end(obj)); - j.set_parents(); - j.assert_invariant(); - } -}; - -///////////// -// to_json // -///////////// - -template::value, int> = 0> -void to_json(BasicJsonType& j, T b) noexcept -{ - external_constructor::construct(j, b); -} - -template::value, int> = 0> -void to_json(BasicJsonType& j, const CompatibleString& s) -{ - external_constructor::construct(j, s); -} - -template -void to_json(BasicJsonType& j, typename BasicJsonType::string_t&& s) -{ - external_constructor::construct(j, std::move(s)); -} - -template::value, int> = 0> -void to_json(BasicJsonType& j, FloatType val) noexcept -{ - external_constructor::construct(j, static_cast(val)); -} - -template::value, int> = 0> -void to_json(BasicJsonType& j, CompatibleNumberUnsignedType val) noexcept -{ - external_constructor::construct(j, static_cast(val)); -} - -template::value, int> = 0> -void to_json(BasicJsonType& j, CompatibleNumberIntegerType val) noexcept -{ - external_constructor::construct(j, static_cast(val)); -} - -template::value, int> = 0> -void to_json(BasicJsonType& j, EnumType e) noexcept -{ - using underlying_type = typename std::underlying_type::type; - external_constructor::construct(j, static_cast(e)); -} - -template -void to_json(BasicJsonType& j, const std::vector& e) -{ - external_constructor::construct(j, e); -} - -template < typename BasicJsonType, typename CompatibleArrayType, - enable_if_t < is_compatible_array_type::value&& - !is_compatible_object_type::value&& - !is_compatible_string_type::value&& - !std::is_same::value&& - !is_basic_json::value, - int > = 0 > -void to_json(BasicJsonType& j, const CompatibleArrayType& arr) -{ - external_constructor::construct(j, arr); -} - -template -void to_json(BasicJsonType& j, const typename BasicJsonType::binary_t& bin) -{ - external_constructor::construct(j, bin); -} - -template::value, int> = 0> -void to_json(BasicJsonType& j, const std::valarray& arr) -{ - external_constructor::construct(j, std::move(arr)); -} - -template -void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr) -{ - external_constructor::construct(j, std::move(arr)); -} - -template < typename BasicJsonType, typename CompatibleObjectType, - enable_if_t < is_compatible_object_type::value&& !is_basic_json::value, int > = 0 > -void to_json(BasicJsonType& j, const CompatibleObjectType& obj) -{ - external_constructor::construct(j, obj); -} - -template -void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj) -{ - external_constructor::construct(j, std::move(obj)); -} - -template < - typename BasicJsonType, typename T, std::size_t N, - enable_if_t < !std::is_constructible::value, - int > = 0 > -void to_json(BasicJsonType& j, const T(&arr)[N]) -{ - external_constructor::construct(j, arr); -} - -template < typename BasicJsonType, typename T1, typename T2, enable_if_t < std::is_constructible::value&& std::is_constructible::value, int > = 0 > -void to_json(BasicJsonType& j, const std::pair& p) -{ - j = { p.first, p.second }; -} - -// for https://github.com/nlohmann/json/pull/1134 -template>::value, int> = 0> -void to_json(BasicJsonType& j, const T& b) -{ - j = { {b.key(), b.value()} }; -} - -template -void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence /*unused*/) -{ - j = { std::get(t)... }; -} - -template::value, int > = 0> -void to_json(BasicJsonType& j, const T& t) -{ - to_json_tuple_impl(j, t, make_index_sequence::value> {}); -} - -struct to_json_fn -{ - template - auto operator()(BasicJsonType& j, T&& val) const noexcept(noexcept(to_json(j, std::forward(val)))) - -> decltype(to_json(j, std::forward(val)), void()) - { - return to_json(j, std::forward(val)); - } -}; -} // namespace detail - -/// namespace to hold default `to_json` function -namespace -{ -constexpr const auto& to_json = detail::static_const::value; -} // namespace -} // namespace nlohmann - - -namespace nlohmann -{ - -template -struct adl_serializer -{ - /*! - @brief convert a JSON value to any value type - - This function is usually called by the `get()` function of the - @ref basic_json class (either explicit or via conversion operators). - - @param[in] j JSON value to read from - @param[in,out] val value to write to - */ - template - static auto from_json(BasicJsonType&& j, ValueType& val) noexcept( - noexcept(::nlohmann::from_json(std::forward(j), val))) - -> decltype(::nlohmann::from_json(std::forward(j), val), void()) - { - ::nlohmann::from_json(std::forward(j), val); - } - - /*! - @brief convert any value type to a JSON value - - This function is usually called by the constructors of the @ref basic_json - class. - - @param[in,out] j JSON value to write to - @param[in] val value to read from - */ - template - static auto to_json(BasicJsonType& j, ValueType&& val) noexcept( - noexcept(::nlohmann::to_json(j, std::forward(val)))) - -> decltype(::nlohmann::to_json(j, std::forward(val)), void()) - { - ::nlohmann::to_json(j, std::forward(val)); - } -}; - -} // namespace nlohmann - -// #include - - -#include // uint8_t -#include // tie -#include // move - -namespace nlohmann -{ - -/*! -@brief an internal type for a backed binary type - -This type extends the template parameter @a BinaryType provided to `basic_json` -with a subtype used by BSON and MessagePack. This type exists so that the user -does not have to specify a type themselves with a specific naming scheme in -order to override the binary type. - -@tparam BinaryType container to store bytes (`std::vector` by - default) - -@since version 3.8.0 -*/ -template -class byte_container_with_subtype : public BinaryType -{ - public: - /// the type of the underlying container - using container_type = BinaryType; - - byte_container_with_subtype() noexcept(noexcept(container_type())) - : container_type() - {} - - byte_container_with_subtype(const container_type& b) noexcept(noexcept(container_type(b))) - : container_type(b) - {} - - byte_container_with_subtype(container_type&& b) noexcept(noexcept(container_type(std::move(b)))) - : container_type(std::move(b)) - {} - - byte_container_with_subtype(const container_type& b, std::uint8_t subtype_) noexcept(noexcept(container_type(b))) - : container_type(b) - , m_subtype(subtype_) - , m_has_subtype(true) - {} - - byte_container_with_subtype(container_type&& b, std::uint8_t subtype_) noexcept(noexcept(container_type(std::move(b)))) - : container_type(std::move(b)) - , m_subtype(subtype_) - , m_has_subtype(true) - {} - - bool operator==(const byte_container_with_subtype& rhs) const - { - return std::tie(static_cast(*this), m_subtype, m_has_subtype) == - std::tie(static_cast(rhs), rhs.m_subtype, rhs.m_has_subtype); - } - - bool operator!=(const byte_container_with_subtype& rhs) const - { - return !(rhs == *this); - } - - /*! - @brief sets the binary subtype - - Sets the binary subtype of the value, also flags a binary JSON value as - having a subtype, which has implications for serialization. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @sa @ref subtype() -- return the binary subtype - @sa @ref clear_subtype() -- clears the binary subtype - @sa @ref has_subtype() -- returns whether or not the binary value has a - subtype - - @since version 3.8.0 - */ - void set_subtype(std::uint8_t subtype_) noexcept - { - m_subtype = subtype_; - m_has_subtype = true; - } - - /*! - @brief return the binary subtype - - Returns the numerical subtype of the value if it has a subtype. If it does - not have a subtype, this function will return size_t(-1) as a sentinel - value. - - @return the numerical subtype of the binary value - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @sa @ref set_subtype() -- sets the binary subtype - @sa @ref clear_subtype() -- clears the binary subtype - @sa @ref has_subtype() -- returns whether or not the binary value has a - subtype - - @since version 3.8.0 - */ - constexpr std::uint8_t subtype() const noexcept - { - return m_subtype; - } - - /*! - @brief return whether the value has a subtype - - @return whether the value has a subtype - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @sa @ref subtype() -- return the binary subtype - @sa @ref set_subtype() -- sets the binary subtype - @sa @ref clear_subtype() -- clears the binary subtype - - @since version 3.8.0 - */ - constexpr bool has_subtype() const noexcept - { - return m_has_subtype; - } - - /*! - @brief clears the binary subtype - - Clears the binary subtype and flags the value as not having a subtype, which - has implications for serialization; for instance MessagePack will prefer the - bin family over the ext family. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @sa @ref subtype() -- return the binary subtype - @sa @ref set_subtype() -- sets the binary subtype - @sa @ref has_subtype() -- returns whether or not the binary value has a - subtype - - @since version 3.8.0 - */ - void clear_subtype() noexcept - { - m_subtype = 0; - m_has_subtype = false; - } - - private: - std::uint8_t m_subtype = 0; - bool m_has_subtype = false; -}; - -} // namespace nlohmann - -// #include - -// #include - -// #include - -// #include - - -#include // size_t, uint8_t -#include // hash - -// #include - - -namespace nlohmann -{ -namespace detail -{ - -// boost::hash_combine -inline std::size_t combine(std::size_t seed, std::size_t h) noexcept -{ - seed ^= h + 0x9e3779b9 + (seed << 6U) + (seed >> 2U); - return seed; -} - -/*! -@brief hash a JSON value - -The hash function tries to rely on std::hash where possible. Furthermore, the -type of the JSON value is taken into account to have different hash values for -null, 0, 0U, and false, etc. - -@tparam BasicJsonType basic_json specialization -@param j JSON value to hash -@return hash value of j -*/ -template -std::size_t hash(const BasicJsonType& j) -{ - using string_t = typename BasicJsonType::string_t; - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - - const auto type = static_cast(j.type()); - switch (j.type()) - { - case BasicJsonType::value_t::null: - case BasicJsonType::value_t::discarded: - { - return combine(type, 0); - } - - case BasicJsonType::value_t::object: - { - auto seed = combine(type, j.size()); - for (const auto& element : j.items()) - { - const auto h = std::hash {}(element.key()); - seed = combine(seed, h); - seed = combine(seed, hash(element.value())); - } - return seed; - } - - case BasicJsonType::value_t::array: - { - auto seed = combine(type, j.size()); - for (const auto& element : j) - { - seed = combine(seed, hash(element)); - } - return seed; - } - - case BasicJsonType::value_t::string: - { - const auto h = std::hash {}(j.template get_ref()); - return combine(type, h); - } - - case BasicJsonType::value_t::boolean: - { - const auto h = std::hash {}(j.template get()); - return combine(type, h); - } - - case BasicJsonType::value_t::number_integer: - { - const auto h = std::hash {}(j.template get()); - return combine(type, h); - } - - case BasicJsonType::value_t::number_unsigned: - { - const auto h = std::hash {}(j.template get()); - return combine(type, h); - } - - case BasicJsonType::value_t::number_float: - { - const auto h = std::hash {}(j.template get()); - return combine(type, h); - } - - case BasicJsonType::value_t::binary: - { - auto seed = combine(type, j.get_binary().size()); - const auto h = std::hash {}(j.get_binary().has_subtype()); - seed = combine(seed, h); - seed = combine(seed, j.get_binary().subtype()); - for (const auto byte : j.get_binary()) - { - seed = combine(seed, std::hash {}(byte)); - } - return seed; - } - - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - return 0; // LCOV_EXCL_LINE - } -} - -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // generate_n -#include // array -#include // ldexp -#include // size_t -#include // uint8_t, uint16_t, uint32_t, uint64_t -#include // snprintf -#include // memcpy -#include // back_inserter -#include // numeric_limits -#include // char_traits, string -#include // make_pair, move -#include // vector - -// #include - -// #include - - -#include // array -#include // size_t -#include //FILE * -#include // strlen -#include // istream -#include // begin, end, iterator_traits, random_access_iterator_tag, distance, next -#include // shared_ptr, make_shared, addressof -#include // accumulate -#include // string, char_traits -#include // enable_if, is_base_of, is_pointer, is_integral, remove_pointer -#include // pair, declval - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -/// the supported input formats -enum class input_format_t { json, cbor, msgpack, ubjson, bson }; - -//////////////////// -// input adapters // -//////////////////// - -/*! -Input adapter for stdio file access. This adapter read only 1 byte and do not use any - buffer. This adapter is a very low level adapter. -*/ -class file_input_adapter -{ - public: - using char_type = char; - - JSON_HEDLEY_NON_NULL(2) - explicit file_input_adapter(std::FILE* f) noexcept - : m_file(f) - {} - - // make class move-only - file_input_adapter(const file_input_adapter&) = delete; - file_input_adapter(file_input_adapter&&) = default; - file_input_adapter& operator=(const file_input_adapter&) = delete; - file_input_adapter& operator=(file_input_adapter&&) = delete; - - std::char_traits::int_type get_character() noexcept - { - return std::fgetc(m_file); - } - - private: - /// the file pointer to read from - std::FILE* m_file; -}; - - -/*! -Input adapter for a (caching) istream. Ignores a UFT Byte Order Mark at -beginning of input. Does not support changing the underlying std::streambuf -in mid-input. Maintains underlying std::istream and std::streambuf to support -subsequent use of standard std::istream operations to process any input -characters following those used in parsing the JSON input. Clears the -std::istream flags; any input errors (e.g., EOF) will be detected by the first -subsequent call for input from the std::istream. -*/ -class input_stream_adapter -{ - public: - using char_type = char; - - ~input_stream_adapter() - { - // clear stream flags; we use underlying streambuf I/O, do not - // maintain ifstream flags, except eof - if (is != nullptr) - { - is->clear(is->rdstate() & std::ios::eofbit); - } - } - - explicit input_stream_adapter(std::istream& i) - : is(&i), sb(i.rdbuf()) - {} - - // delete because of pointer members - input_stream_adapter(const input_stream_adapter&) = delete; - input_stream_adapter& operator=(input_stream_adapter&) = delete; - input_stream_adapter& operator=(input_stream_adapter&& rhs) = delete; - - input_stream_adapter(input_stream_adapter&& rhs) noexcept : is(rhs.is), sb(rhs.sb) - { - rhs.is = nullptr; - rhs.sb = nullptr; - } - - // std::istream/std::streambuf use std::char_traits::to_int_type, to - // ensure that std::char_traits::eof() and the character 0xFF do not - // end up as the same value, eg. 0xFFFFFFFF. - std::char_traits::int_type get_character() - { - auto res = sb->sbumpc(); - // set eof manually, as we don't use the istream interface. - if (JSON_HEDLEY_UNLIKELY(res == EOF)) - { - is->clear(is->rdstate() | std::ios::eofbit); - } - return res; - } - - private: - /// the associated input stream - std::istream* is = nullptr; - std::streambuf* sb = nullptr; -}; - -// General-purpose iterator-based adapter. It might not be as fast as -// theoretically possible for some containers, but it is extremely versatile. -template -class iterator_input_adapter -{ - public: - using char_type = typename std::iterator_traits::value_type; - - iterator_input_adapter(IteratorType first, IteratorType last) - : current(std::move(first)), end(std::move(last)) {} - - typename std::char_traits::int_type get_character() - { - if (JSON_HEDLEY_LIKELY(current != end)) - { - auto result = std::char_traits::to_int_type(*current); - std::advance(current, 1); - return result; - } - - return std::char_traits::eof(); - } - - private: - IteratorType current; - IteratorType end; - - template - friend struct wide_string_input_helper; - - bool empty() const - { - return current == end; - } - -}; - - -template -struct wide_string_input_helper; - -template -struct wide_string_input_helper -{ - // UTF-32 - static void fill_buffer(BaseInputAdapter& input, - std::array::int_type, 4>& utf8_bytes, - size_t& utf8_bytes_index, - size_t& utf8_bytes_filled) - { - utf8_bytes_index = 0; - - if (JSON_HEDLEY_UNLIKELY(input.empty())) - { - utf8_bytes[0] = std::char_traits::eof(); - utf8_bytes_filled = 1; - } - else - { - // get the current character - const auto wc = input.get_character(); - - // UTF-32 to UTF-8 encoding - if (wc < 0x80) - { - utf8_bytes[0] = static_cast::int_type>(wc); - utf8_bytes_filled = 1; - } - else if (wc <= 0x7FF) - { - utf8_bytes[0] = static_cast::int_type>(0xC0u | ((static_cast(wc) >> 6u) & 0x1Fu)); - utf8_bytes[1] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); - utf8_bytes_filled = 2; - } - else if (wc <= 0xFFFF) - { - utf8_bytes[0] = static_cast::int_type>(0xE0u | ((static_cast(wc) >> 12u) & 0x0Fu)); - utf8_bytes[1] = static_cast::int_type>(0x80u | ((static_cast(wc) >> 6u) & 0x3Fu)); - utf8_bytes[2] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); - utf8_bytes_filled = 3; - } - else if (wc <= 0x10FFFF) - { - utf8_bytes[0] = static_cast::int_type>(0xF0u | ((static_cast(wc) >> 18u) & 0x07u)); - utf8_bytes[1] = static_cast::int_type>(0x80u | ((static_cast(wc) >> 12u) & 0x3Fu)); - utf8_bytes[2] = static_cast::int_type>(0x80u | ((static_cast(wc) >> 6u) & 0x3Fu)); - utf8_bytes[3] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); - utf8_bytes_filled = 4; - } - else - { - // unknown character - utf8_bytes[0] = static_cast::int_type>(wc); - utf8_bytes_filled = 1; - } - } - } -}; - -template -struct wide_string_input_helper -{ - // UTF-16 - static void fill_buffer(BaseInputAdapter& input, - std::array::int_type, 4>& utf8_bytes, - size_t& utf8_bytes_index, - size_t& utf8_bytes_filled) - { - utf8_bytes_index = 0; - - if (JSON_HEDLEY_UNLIKELY(input.empty())) - { - utf8_bytes[0] = std::char_traits::eof(); - utf8_bytes_filled = 1; - } - else - { - // get the current character - const auto wc = input.get_character(); - - // UTF-16 to UTF-8 encoding - if (wc < 0x80) - { - utf8_bytes[0] = static_cast::int_type>(wc); - utf8_bytes_filled = 1; - } - else if (wc <= 0x7FF) - { - utf8_bytes[0] = static_cast::int_type>(0xC0u | ((static_cast(wc) >> 6u))); - utf8_bytes[1] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); - utf8_bytes_filled = 2; - } - else if (0xD800 > wc || wc >= 0xE000) - { - utf8_bytes[0] = static_cast::int_type>(0xE0u | ((static_cast(wc) >> 12u))); - utf8_bytes[1] = static_cast::int_type>(0x80u | ((static_cast(wc) >> 6u) & 0x3Fu)); - utf8_bytes[2] = static_cast::int_type>(0x80u | (static_cast(wc) & 0x3Fu)); - utf8_bytes_filled = 3; - } - else - { - if (JSON_HEDLEY_UNLIKELY(!input.empty())) - { - const auto wc2 = static_cast(input.get_character()); - const auto charcode = 0x10000u + (((static_cast(wc) & 0x3FFu) << 10u) | (wc2 & 0x3FFu)); - utf8_bytes[0] = static_cast::int_type>(0xF0u | (charcode >> 18u)); - utf8_bytes[1] = static_cast::int_type>(0x80u | ((charcode >> 12u) & 0x3Fu)); - utf8_bytes[2] = static_cast::int_type>(0x80u | ((charcode >> 6u) & 0x3Fu)); - utf8_bytes[3] = static_cast::int_type>(0x80u | (charcode & 0x3Fu)); - utf8_bytes_filled = 4; - } - else - { - utf8_bytes[0] = static_cast::int_type>(wc); - utf8_bytes_filled = 1; - } - } - } - } -}; - -// Wraps another input apdater to convert wide character types into individual bytes. -template -class wide_string_input_adapter -{ - public: - using char_type = char; - - wide_string_input_adapter(BaseInputAdapter base) - : base_adapter(base) {} - - typename std::char_traits::int_type get_character() noexcept - { - // check if buffer needs to be filled - if (utf8_bytes_index == utf8_bytes_filled) - { - fill_buffer(); - - JSON_ASSERT(utf8_bytes_filled > 0); - JSON_ASSERT(utf8_bytes_index == 0); - } - - // use buffer - JSON_ASSERT(utf8_bytes_filled > 0); - JSON_ASSERT(utf8_bytes_index < utf8_bytes_filled); - return utf8_bytes[utf8_bytes_index++]; - } - - private: - BaseInputAdapter base_adapter; - - template - void fill_buffer() - { - wide_string_input_helper::fill_buffer(base_adapter, utf8_bytes, utf8_bytes_index, utf8_bytes_filled); - } - - /// a buffer for UTF-8 bytes - std::array::int_type, 4> utf8_bytes = {{0, 0, 0, 0}}; - - /// index to the utf8_codes array for the next valid byte - std::size_t utf8_bytes_index = 0; - /// number of valid bytes in the utf8_codes array - std::size_t utf8_bytes_filled = 0; -}; - - -template -struct iterator_input_adapter_factory -{ - using iterator_type = IteratorType; - using char_type = typename std::iterator_traits::value_type; - using adapter_type = iterator_input_adapter; - - static adapter_type create(IteratorType first, IteratorType last) - { - return adapter_type(std::move(first), std::move(last)); - } -}; - -template -struct is_iterator_of_multibyte -{ - using value_type = typename std::iterator_traits::value_type; - enum - { - value = sizeof(value_type) > 1 - }; -}; - -template -struct iterator_input_adapter_factory::value>> -{ - using iterator_type = IteratorType; - using char_type = typename std::iterator_traits::value_type; - using base_adapter_type = iterator_input_adapter; - using adapter_type = wide_string_input_adapter; - - static adapter_type create(IteratorType first, IteratorType last) - { - return adapter_type(base_adapter_type(std::move(first), std::move(last))); - } -}; - -// General purpose iterator-based input -template -typename iterator_input_adapter_factory::adapter_type input_adapter(IteratorType first, IteratorType last) -{ - using factory_type = iterator_input_adapter_factory; - return factory_type::create(first, last); -} - -// Convenience shorthand from container to iterator -// Enables ADL on begin(container) and end(container) -// Encloses the using declarations in namespace for not to leak them to outside scope - -namespace container_input_adapter_factory_impl -{ - -using std::begin; -using std::end; - -template -struct container_input_adapter_factory {}; - -template -struct container_input_adapter_factory< ContainerType, - void_t()), end(std::declval()))>> - { - using adapter_type = decltype(input_adapter(begin(std::declval()), end(std::declval()))); - - static adapter_type create(const ContainerType& container) -{ - return input_adapter(begin(container), end(container)); -} - }; - -} - -template -typename container_input_adapter_factory_impl::container_input_adapter_factory::adapter_type input_adapter(const ContainerType& container) -{ - return container_input_adapter_factory_impl::container_input_adapter_factory::create(container); -} - -// Special cases with fast paths -inline file_input_adapter input_adapter(std::FILE* file) -{ - return file_input_adapter(file); -} - -inline input_stream_adapter input_adapter(std::istream& stream) -{ - return input_stream_adapter(stream); -} - -inline input_stream_adapter input_adapter(std::istream&& stream) -{ - return input_stream_adapter(stream); -} - -using contiguous_bytes_input_adapter = decltype(input_adapter(std::declval(), std::declval())); - -// Null-delimited strings, and the like. -template < typename CharT, - typename std::enable_if < - std::is_pointer::value&& - !std::is_array::value&& - std::is_integral::type>::value&& - sizeof(typename std::remove_pointer::type) == 1, - int >::type = 0 > -contiguous_bytes_input_adapter input_adapter(CharT b) -{ - auto length = std::strlen(reinterpret_cast(b)); - const auto* ptr = reinterpret_cast(b); - return input_adapter(ptr, ptr + length); -} - -template -auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N)) -{ - return input_adapter(array, array + N); -} - -// This class only handles inputs of input_buffer_adapter type. -// It's required so that expressions like {ptr, len} can be implicitely casted -// to the correct adapter. -class span_input_adapter -{ - public: - template < typename CharT, - typename std::enable_if < - std::is_pointer::value&& - std::is_integral::type>::value&& - sizeof(typename std::remove_pointer::type) == 1, - int >::type = 0 > - span_input_adapter(CharT b, std::size_t l) - : ia(reinterpret_cast(b), reinterpret_cast(b) + l) {} - - template::iterator_category, std::random_access_iterator_tag>::value, - int>::type = 0> - span_input_adapter(IteratorType first, IteratorType last) - : ia(input_adapter(first, last)) {} - - contiguous_bytes_input_adapter&& get() - { - return std::move(ia); - } - - private: - contiguous_bytes_input_adapter ia; -}; -} // namespace detail -} // namespace nlohmann - -// #include - - -#include -#include // string -#include // move -#include // vector - -// #include - -// #include - - -namespace nlohmann -{ - -/*! -@brief SAX interface - -This class describes the SAX interface used by @ref nlohmann::json::sax_parse. -Each function is called in different situations while the input is parsed. The -boolean return value informs the parser whether to continue processing the -input. -*/ -template -struct json_sax -{ - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - /*! - @brief a null value was read - @return whether parsing should proceed - */ - virtual bool null() = 0; - - /*! - @brief a boolean value was read - @param[in] val boolean value - @return whether parsing should proceed - */ - virtual bool boolean(bool val) = 0; - - /*! - @brief an integer number was read - @param[in] val integer value - @return whether parsing should proceed - */ - virtual bool number_integer(number_integer_t val) = 0; - - /*! - @brief an unsigned integer number was read - @param[in] val unsigned integer value - @return whether parsing should proceed - */ - virtual bool number_unsigned(number_unsigned_t val) = 0; - - /*! - @brief an floating-point number was read - @param[in] val floating-point value - @param[in] s raw token value - @return whether parsing should proceed - */ - virtual bool number_float(number_float_t val, const string_t& s) = 0; - - /*! - @brief a string was read - @param[in] val string value - @return whether parsing should proceed - @note It is safe to move the passed string. - */ - virtual bool string(string_t& val) = 0; - - /*! - @brief a binary string was read - @param[in] val binary value - @return whether parsing should proceed - @note It is safe to move the passed binary. - */ - virtual bool binary(binary_t& val) = 0; - - /*! - @brief the beginning of an object was read - @param[in] elements number of object elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_object(std::size_t elements) = 0; - - /*! - @brief an object key was read - @param[in] val object key - @return whether parsing should proceed - @note It is safe to move the passed string. - */ - virtual bool key(string_t& val) = 0; - - /*! - @brief the end of an object was read - @return whether parsing should proceed - */ - virtual bool end_object() = 0; - - /*! - @brief the beginning of an array was read - @param[in] elements number of array elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_array(std::size_t elements) = 0; - - /*! - @brief the end of an array was read - @return whether parsing should proceed - */ - virtual bool end_array() = 0; - - /*! - @brief a parse error occurred - @param[in] position the position in the input where the error occurs - @param[in] last_token the last read token - @param[in] ex an exception object describing the error - @return whether parsing should proceed (must return false) - */ - virtual bool parse_error(std::size_t position, - const std::string& last_token, - const detail::exception& ex) = 0; - - virtual ~json_sax() = default; -}; - - -namespace detail -{ -/*! -@brief SAX implementation to create a JSON value from SAX events - -This class implements the @ref json_sax interface and processes the SAX events -to create a JSON value which makes it basically a DOM parser. The structure or -hierarchy of the JSON value is managed by the stack `ref_stack` which contains -a pointer to the respective array or object for each recursion depth. - -After successful parsing, the value that is passed by reference to the -constructor contains the parsed value. - -@tparam BasicJsonType the JSON type -*/ -template -class json_sax_dom_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - /*! - @param[in, out] r reference to a JSON value that is manipulated while - parsing - @param[in] allow_exceptions_ whether parse errors yield exceptions - */ - explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true) - : root(r), allow_exceptions(allow_exceptions_) - {} - - // make class move-only - json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) = default; - json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; - ~json_sax_dom_parser() = default; - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool binary(binary_t& val) - { - handle_value(std::move(val)); - return true; - } - - bool start_object(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); - - if (JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, "excessive object size: " + std::to_string(len), *ref_stack.back())); - } - - return true; - } - - bool key(string_t& val) - { - // add null at given key and store the reference for later - object_element = &(ref_stack.back()->m_value.object->operator[](val)); - return true; - } - - bool end_object() - { - ref_stack.back()->set_parents(); - ref_stack.pop_back(); - return true; - } - - bool start_array(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); - - if (JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, "excessive array size: " + std::to_string(len), *ref_stack.back())); - } - - return true; - } - - bool end_array() - { - ref_stack.back()->set_parents(); - ref_stack.pop_back(); - return true; - } - - template - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const Exception& ex) - { - errored = true; - static_cast(ex); - if (allow_exceptions) - { - JSON_THROW(ex); - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - */ - template - JSON_HEDLEY_RETURNS_NON_NULL - BasicJsonType* handle_value(Value&& v) - { - if (ref_stack.empty()) - { - root = BasicJsonType(std::forward(v)); - return &root; - } - - JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); - - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_value.array->emplace_back(std::forward(v)); - return &(ref_stack.back()->m_value.array->back()); - } - - JSON_ASSERT(ref_stack.back()->is_object()); - JSON_ASSERT(object_element); - *object_element = BasicJsonType(std::forward(v)); - return object_element; - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack {}; - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; -}; - -template -class json_sax_dom_callback_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - using parser_callback_t = typename BasicJsonType::parser_callback_t; - using parse_event_t = typename BasicJsonType::parse_event_t; - - json_sax_dom_callback_parser(BasicJsonType& r, - const parser_callback_t cb, - const bool allow_exceptions_ = true) - : root(r), callback(cb), allow_exceptions(allow_exceptions_) - { - keep_stack.push_back(true); - } - - // make class move-only - json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; - json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; - ~json_sax_dom_callback_parser() = default; - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool binary(binary_t& val) - { - handle_value(std::move(val)); - return true; - } - - bool start_object(std::size_t len) - { - // check callback for object start - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::object, true); - ref_stack.push_back(val.second); - - // check object limit - if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, "excessive object size: " + std::to_string(len), *ref_stack.back())); - } - - return true; - } - - bool key(string_t& val) - { - BasicJsonType k = BasicJsonType(val); - - // check callback for key - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); - key_keep_stack.push_back(keep); - - // add discarded value at given key and store the reference for later - if (keep && ref_stack.back()) - { - object_element = &(ref_stack.back()->m_value.object->operator[](val) = discarded); - } - - return true; - } - - bool end_object() - { - if (ref_stack.back()) - { - if (!callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) - { - // discard object - *ref_stack.back() = discarded; - } - else - { - ref_stack.back()->set_parents(); - } - } - - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(!keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured()) - { - // remove discarded value - for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) - { - if (it->is_discarded()) - { - ref_stack.back()->erase(it); - break; - } - } - } - - return true; - } - - bool start_array(std::size_t len) - { - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::array, true); - ref_stack.push_back(val.second); - - // check array limit - if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, "excessive array size: " + std::to_string(len), *ref_stack.back())); - } - - return true; - } - - bool end_array() - { - bool keep = true; - - if (ref_stack.back()) - { - keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); - if (keep) - { - ref_stack.back()->set_parents(); - } - else - { - // discard array - *ref_stack.back() = discarded; - } - } - - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(!keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - // remove discarded value - if (!keep && !ref_stack.empty() && ref_stack.back()->is_array()) - { - ref_stack.back()->m_value.array->pop_back(); - } - - return true; - } - - template - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const Exception& ex) - { - errored = true; - static_cast(ex); - if (allow_exceptions) - { - JSON_THROW(ex); - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @param[in] v value to add to the JSON value we build during parsing - @param[in] skip_callback whether we should skip calling the callback - function; this is required after start_array() and - start_object() SAX events, because otherwise we would call the - callback function with an empty array or object, respectively. - - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - - @return pair of boolean (whether value should be kept) and pointer (to the - passed value in the ref_stack hierarchy; nullptr if not kept) - */ - template - std::pair handle_value(Value&& v, const bool skip_callback = false) - { - JSON_ASSERT(!keep_stack.empty()); - - // do not handle this value if we know it would be added to a discarded - // container - if (!keep_stack.back()) - { - return {false, nullptr}; - } - - // create value - auto value = BasicJsonType(std::forward(v)); - - // check callback - const bool keep = skip_callback || callback(static_cast(ref_stack.size()), parse_event_t::value, value); - - // do not handle this value if we just learnt it shall be discarded - if (!keep) - { - return {false, nullptr}; - } - - if (ref_stack.empty()) - { - root = std::move(value); - return {true, &root}; - } - - // skip this value if we already decided to skip the parent - // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) - if (!ref_stack.back()) - { - return {false, nullptr}; - } - - // we now only expect arrays and objects - JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); - - // array - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_value.array->emplace_back(std::move(value)); - return {true, &(ref_stack.back()->m_value.array->back())}; - } - - // object - JSON_ASSERT(ref_stack.back()->is_object()); - // check if we should store an element for the current key - JSON_ASSERT(!key_keep_stack.empty()); - const bool store_element = key_keep_stack.back(); - key_keep_stack.pop_back(); - - if (!store_element) - { - return {false, nullptr}; - } - - JSON_ASSERT(object_element); - *object_element = std::move(value); - return {true, object_element}; - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack {}; - /// stack to manage which values to keep - std::vector keep_stack {}; - /// stack to manage which object keys to keep - std::vector key_keep_stack {}; - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// callback function - const parser_callback_t callback = nullptr; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; - /// a discarded value for the callback - BasicJsonType discarded = BasicJsonType::value_t::discarded; -}; - -template -class json_sax_acceptor -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - bool null() - { - return true; - } - - bool boolean(bool /*unused*/) - { - return true; - } - - bool number_integer(number_integer_t /*unused*/) - { - return true; - } - - bool number_unsigned(number_unsigned_t /*unused*/) - { - return true; - } - - bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) - { - return true; - } - - bool string(string_t& /*unused*/) - { - return true; - } - - bool binary(binary_t& /*unused*/) - { - return true; - } - - bool start_object(std::size_t /*unused*/ = std::size_t(-1)) - { - return true; - } - - bool key(string_t& /*unused*/) - { - return true; - } - - bool end_object() - { - return true; - } - - bool start_array(std::size_t /*unused*/ = std::size_t(-1)) - { - return true; - } - - bool end_array() - { - return true; - } - - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) - { - return false; - } -}; -} // namespace detail - -} // namespace nlohmann - -// #include - - -#include // array -#include // localeconv -#include // size_t -#include // snprintf -#include // strtof, strtod, strtold, strtoll, strtoull -#include // initializer_list -#include // char_traits, string -#include // move -#include // vector - -// #include - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -/////////// -// lexer // -/////////// - -template -class lexer_base -{ - public: - /// token types for the parser - enum class token_type - { - uninitialized, ///< indicating the scanner is uninitialized - literal_true, ///< the `true` literal - literal_false, ///< the `false` literal - literal_null, ///< the `null` literal - value_string, ///< a string -- use get_string() for actual value - value_unsigned, ///< an unsigned integer -- use get_number_unsigned() for actual value - value_integer, ///< a signed integer -- use get_number_integer() for actual value - value_float, ///< an floating point number -- use get_number_float() for actual value - begin_array, ///< the character for array begin `[` - begin_object, ///< the character for object begin `{` - end_array, ///< the character for array end `]` - end_object, ///< the character for object end `}` - name_separator, ///< the name separator `:` - value_separator, ///< the value separator `,` - parse_error, ///< indicating a parse error - end_of_input, ///< indicating the end of the input buffer - literal_or_value ///< a literal or the begin of a value (only for diagnostics) - }; - - /// return name of values of type token_type (only used for errors) - JSON_HEDLEY_RETURNS_NON_NULL - JSON_HEDLEY_CONST - static const char* token_type_name(const token_type t) noexcept - { - switch (t) - { - case token_type::uninitialized: - return ""; - case token_type::literal_true: - return "true literal"; - case token_type::literal_false: - return "false literal"; - case token_type::literal_null: - return "null literal"; - case token_type::value_string: - return "string literal"; - case token_type::value_unsigned: - case token_type::value_integer: - case token_type::value_float: - return "number literal"; - case token_type::begin_array: - return "'['"; - case token_type::begin_object: - return "'{'"; - case token_type::end_array: - return "']'"; - case token_type::end_object: - return "'}'"; - case token_type::name_separator: - return "':'"; - case token_type::value_separator: - return "','"; - case token_type::parse_error: - return ""; - case token_type::end_of_input: - return "end of input"; - case token_type::literal_or_value: - return "'[', '{', or a literal"; - // LCOV_EXCL_START - default: // catch non-enum values - return "unknown token"; - // LCOV_EXCL_STOP - } - } -}; -/*! -@brief lexical analysis - -This class organizes the lexical analysis during JSON deserialization. -*/ -template -class lexer : public lexer_base -{ - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using char_type = typename InputAdapterType::char_type; - using char_int_type = typename std::char_traits::int_type; - - public: - using token_type = typename lexer_base::token_type; - - explicit lexer(InputAdapterType&& adapter, bool ignore_comments_ = false) - : ia(std::move(adapter)) - , ignore_comments(ignore_comments_) - , decimal_point_char(static_cast(get_decimal_point())) - {} - - // delete because of pointer members - lexer(const lexer&) = delete; - lexer(lexer&&) = default; - lexer& operator=(lexer&) = delete; - lexer& operator=(lexer&&) = default; - ~lexer() = default; - - private: - ///////////////////// - // locales - ///////////////////// - - /// return the locale-dependent decimal point - JSON_HEDLEY_PURE - static char get_decimal_point() noexcept - { - const auto* loc = localeconv(); - JSON_ASSERT(loc != nullptr); - return (loc->decimal_point == nullptr) ? '.' : *(loc->decimal_point); - } - - ///////////////////// - // scan functions - ///////////////////// - - /*! - @brief get codepoint from 4 hex characters following `\u` - - For input "\u c1 c2 c3 c4" the codepoint is: - (c1 * 0x1000) + (c2 * 0x0100) + (c3 * 0x0010) + c4 - = (c1 << 12) + (c2 << 8) + (c3 << 4) + (c4 << 0) - - Furthermore, the possible characters '0'..'9', 'A'..'F', and 'a'..'f' - must be converted to the integers 0x0..0x9, 0xA..0xF, 0xA..0xF, resp. The - conversion is done by subtracting the offset (0x30, 0x37, and 0x57) - between the ASCII value of the character and the desired integer value. - - @return codepoint (0x0000..0xFFFF) or -1 in case of an error (e.g. EOF or - non-hex character) - */ - int get_codepoint() - { - // this function only makes sense after reading `\u` - JSON_ASSERT(current == 'u'); - int codepoint = 0; - - const auto factors = { 12u, 8u, 4u, 0u }; - for (const auto factor : factors) - { - get(); - - if (current >= '0' && current <= '9') - { - codepoint += static_cast((static_cast(current) - 0x30u) << factor); - } - else if (current >= 'A' && current <= 'F') - { - codepoint += static_cast((static_cast(current) - 0x37u) << factor); - } - else if (current >= 'a' && current <= 'f') - { - codepoint += static_cast((static_cast(current) - 0x57u) << factor); - } - else - { - return -1; - } - } - - JSON_ASSERT(0x0000 <= codepoint && codepoint <= 0xFFFF); - return codepoint; - } - - /*! - @brief check if the next byte(s) are inside a given range - - Adds the current byte and, for each passed range, reads a new byte and - checks if it is inside the range. If a violation was detected, set up an - error message and return false. Otherwise, return true. - - @param[in] ranges list of integers; interpreted as list of pairs of - inclusive lower and upper bound, respectively - - @pre The passed list @a ranges must have 2, 4, or 6 elements; that is, - 1, 2, or 3 pairs. This precondition is enforced by an assertion. - - @return true if and only if no range violation was detected - */ - bool next_byte_in_range(std::initializer_list ranges) - { - JSON_ASSERT(ranges.size() == 2 || ranges.size() == 4 || ranges.size() == 6); - add(current); - - for (auto range = ranges.begin(); range != ranges.end(); ++range) - { - get(); - if (JSON_HEDLEY_LIKELY(*range <= current && current <= *(++range))) - { - add(current); - } - else - { - error_message = "invalid string: ill-formed UTF-8 byte"; - return false; - } - } - - return true; - } - - /*! - @brief scan a string literal - - This function scans a string according to Sect. 7 of RFC 7159. While - scanning, bytes are escaped and copied into buffer token_buffer. Then the - function returns successfully, token_buffer is *not* null-terminated (as it - may contain \0 bytes), and token_buffer.size() is the number of bytes in the - string. - - @return token_type::value_string if string could be successfully scanned, - token_type::parse_error otherwise - - @note In case of errors, variable error_message contains a textual - description. - */ - token_type scan_string() - { - // reset token_buffer (ignore opening quote) - reset(); - - // we entered the function by reading an open quote - JSON_ASSERT(current == '\"'); - - while (true) - { - // get next character - switch (get()) - { - // end of file while parsing string - case std::char_traits::eof(): - { - error_message = "invalid string: missing closing quote"; - return token_type::parse_error; - } - - // closing quote - case '\"': - { - return token_type::value_string; - } - - // escapes - case '\\': - { - switch (get()) - { - // quotation mark - case '\"': - add('\"'); - break; - // reverse solidus - case '\\': - add('\\'); - break; - // solidus - case '/': - add('/'); - break; - // backspace - case 'b': - add('\b'); - break; - // form feed - case 'f': - add('\f'); - break; - // line feed - case 'n': - add('\n'); - break; - // carriage return - case 'r': - add('\r'); - break; - // tab - case 't': - add('\t'); - break; - - // unicode escapes - case 'u': - { - const int codepoint1 = get_codepoint(); - int codepoint = codepoint1; // start with codepoint1 - - if (JSON_HEDLEY_UNLIKELY(codepoint1 == -1)) - { - error_message = "invalid string: '\\u' must be followed by 4 hex digits"; - return token_type::parse_error; - } - - // check if code point is a high surrogate - if (0xD800 <= codepoint1 && codepoint1 <= 0xDBFF) - { - // expect next \uxxxx entry - if (JSON_HEDLEY_LIKELY(get() == '\\' && get() == 'u')) - { - const int codepoint2 = get_codepoint(); - - if (JSON_HEDLEY_UNLIKELY(codepoint2 == -1)) - { - error_message = "invalid string: '\\u' must be followed by 4 hex digits"; - return token_type::parse_error; - } - - // check if codepoint2 is a low surrogate - if (JSON_HEDLEY_LIKELY(0xDC00 <= codepoint2 && codepoint2 <= 0xDFFF)) - { - // overwrite codepoint - codepoint = static_cast( - // high surrogate occupies the most significant 22 bits - (static_cast(codepoint1) << 10u) - // low surrogate occupies the least significant 15 bits - + static_cast(codepoint2) - // there is still the 0xD800, 0xDC00 and 0x10000 noise - // in the result so we have to subtract with: - // (0xD800 << 10) + DC00 - 0x10000 = 0x35FDC00 - - 0x35FDC00u); - } - else - { - error_message = "invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF"; - return token_type::parse_error; - } - } - else - { - error_message = "invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF"; - return token_type::parse_error; - } - } - else - { - if (JSON_HEDLEY_UNLIKELY(0xDC00 <= codepoint1 && codepoint1 <= 0xDFFF)) - { - error_message = "invalid string: surrogate U+DC00..U+DFFF must follow U+D800..U+DBFF"; - return token_type::parse_error; - } - } - - // result of the above calculation yields a proper codepoint - JSON_ASSERT(0x00 <= codepoint && codepoint <= 0x10FFFF); - - // translate codepoint into bytes - if (codepoint < 0x80) - { - // 1-byte characters: 0xxxxxxx (ASCII) - add(static_cast(codepoint)); - } - else if (codepoint <= 0x7FF) - { - // 2-byte characters: 110xxxxx 10xxxxxx - add(static_cast(0xC0u | (static_cast(codepoint) >> 6u))); - add(static_cast(0x80u | (static_cast(codepoint) & 0x3Fu))); - } - else if (codepoint <= 0xFFFF) - { - // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx - add(static_cast(0xE0u | (static_cast(codepoint) >> 12u))); - add(static_cast(0x80u | ((static_cast(codepoint) >> 6u) & 0x3Fu))); - add(static_cast(0x80u | (static_cast(codepoint) & 0x3Fu))); - } - else - { - // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - add(static_cast(0xF0u | (static_cast(codepoint) >> 18u))); - add(static_cast(0x80u | ((static_cast(codepoint) >> 12u) & 0x3Fu))); - add(static_cast(0x80u | ((static_cast(codepoint) >> 6u) & 0x3Fu))); - add(static_cast(0x80u | (static_cast(codepoint) & 0x3Fu))); - } - - break; - } - - // other characters after escape - default: - error_message = "invalid string: forbidden character after backslash"; - return token_type::parse_error; - } - - break; - } - - // invalid control characters - case 0x00: - { - error_message = "invalid string: control character U+0000 (NUL) must be escaped to \\u0000"; - return token_type::parse_error; - } - - case 0x01: - { - error_message = "invalid string: control character U+0001 (SOH) must be escaped to \\u0001"; - return token_type::parse_error; - } - - case 0x02: - { - error_message = "invalid string: control character U+0002 (STX) must be escaped to \\u0002"; - return token_type::parse_error; - } - - case 0x03: - { - error_message = "invalid string: control character U+0003 (ETX) must be escaped to \\u0003"; - return token_type::parse_error; - } - - case 0x04: - { - error_message = "invalid string: control character U+0004 (EOT) must be escaped to \\u0004"; - return token_type::parse_error; - } - - case 0x05: - { - error_message = "invalid string: control character U+0005 (ENQ) must be escaped to \\u0005"; - return token_type::parse_error; - } - - case 0x06: - { - error_message = "invalid string: control character U+0006 (ACK) must be escaped to \\u0006"; - return token_type::parse_error; - } - - case 0x07: - { - error_message = "invalid string: control character U+0007 (BEL) must be escaped to \\u0007"; - return token_type::parse_error; - } - - case 0x08: - { - error_message = "invalid string: control character U+0008 (BS) must be escaped to \\u0008 or \\b"; - return token_type::parse_error; - } - - case 0x09: - { - error_message = "invalid string: control character U+0009 (HT) must be escaped to \\u0009 or \\t"; - return token_type::parse_error; - } - - case 0x0A: - { - error_message = "invalid string: control character U+000A (LF) must be escaped to \\u000A or \\n"; - return token_type::parse_error; - } - - case 0x0B: - { - error_message = "invalid string: control character U+000B (VT) must be escaped to \\u000B"; - return token_type::parse_error; - } - - case 0x0C: - { - error_message = "invalid string: control character U+000C (FF) must be escaped to \\u000C or \\f"; - return token_type::parse_error; - } - - case 0x0D: - { - error_message = "invalid string: control character U+000D (CR) must be escaped to \\u000D or \\r"; - return token_type::parse_error; - } - - case 0x0E: - { - error_message = "invalid string: control character U+000E (SO) must be escaped to \\u000E"; - return token_type::parse_error; - } - - case 0x0F: - { - error_message = "invalid string: control character U+000F (SI) must be escaped to \\u000F"; - return token_type::parse_error; - } - - case 0x10: - { - error_message = "invalid string: control character U+0010 (DLE) must be escaped to \\u0010"; - return token_type::parse_error; - } - - case 0x11: - { - error_message = "invalid string: control character U+0011 (DC1) must be escaped to \\u0011"; - return token_type::parse_error; - } - - case 0x12: - { - error_message = "invalid string: control character U+0012 (DC2) must be escaped to \\u0012"; - return token_type::parse_error; - } - - case 0x13: - { - error_message = "invalid string: control character U+0013 (DC3) must be escaped to \\u0013"; - return token_type::parse_error; - } - - case 0x14: - { - error_message = "invalid string: control character U+0014 (DC4) must be escaped to \\u0014"; - return token_type::parse_error; - } - - case 0x15: - { - error_message = "invalid string: control character U+0015 (NAK) must be escaped to \\u0015"; - return token_type::parse_error; - } - - case 0x16: - { - error_message = "invalid string: control character U+0016 (SYN) must be escaped to \\u0016"; - return token_type::parse_error; - } - - case 0x17: - { - error_message = "invalid string: control character U+0017 (ETB) must be escaped to \\u0017"; - return token_type::parse_error; - } - - case 0x18: - { - error_message = "invalid string: control character U+0018 (CAN) must be escaped to \\u0018"; - return token_type::parse_error; - } - - case 0x19: - { - error_message = "invalid string: control character U+0019 (EM) must be escaped to \\u0019"; - return token_type::parse_error; - } - - case 0x1A: - { - error_message = "invalid string: control character U+001A (SUB) must be escaped to \\u001A"; - return token_type::parse_error; - } - - case 0x1B: - { - error_message = "invalid string: control character U+001B (ESC) must be escaped to \\u001B"; - return token_type::parse_error; - } - - case 0x1C: - { - error_message = "invalid string: control character U+001C (FS) must be escaped to \\u001C"; - return token_type::parse_error; - } - - case 0x1D: - { - error_message = "invalid string: control character U+001D (GS) must be escaped to \\u001D"; - return token_type::parse_error; - } - - case 0x1E: - { - error_message = "invalid string: control character U+001E (RS) must be escaped to \\u001E"; - return token_type::parse_error; - } - - case 0x1F: - { - error_message = "invalid string: control character U+001F (US) must be escaped to \\u001F"; - return token_type::parse_error; - } - - // U+0020..U+007F (except U+0022 (quote) and U+005C (backspace)) - case 0x20: - case 0x21: - case 0x23: - case 0x24: - case 0x25: - case 0x26: - case 0x27: - case 0x28: - case 0x29: - case 0x2A: - case 0x2B: - case 0x2C: - case 0x2D: - case 0x2E: - case 0x2F: - case 0x30: - case 0x31: - case 0x32: - case 0x33: - case 0x34: - case 0x35: - case 0x36: - case 0x37: - case 0x38: - case 0x39: - case 0x3A: - case 0x3B: - case 0x3C: - case 0x3D: - case 0x3E: - case 0x3F: - case 0x40: - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x45: - case 0x46: - case 0x47: - case 0x48: - case 0x49: - case 0x4A: - case 0x4B: - case 0x4C: - case 0x4D: - case 0x4E: - case 0x4F: - case 0x50: - case 0x51: - case 0x52: - case 0x53: - case 0x54: - case 0x55: - case 0x56: - case 0x57: - case 0x58: - case 0x59: - case 0x5A: - case 0x5B: - case 0x5D: - case 0x5E: - case 0x5F: - case 0x60: - case 0x61: - case 0x62: - case 0x63: - case 0x64: - case 0x65: - case 0x66: - case 0x67: - case 0x68: - case 0x69: - case 0x6A: - case 0x6B: - case 0x6C: - case 0x6D: - case 0x6E: - case 0x6F: - case 0x70: - case 0x71: - case 0x72: - case 0x73: - case 0x74: - case 0x75: - case 0x76: - case 0x77: - case 0x78: - case 0x79: - case 0x7A: - case 0x7B: - case 0x7C: - case 0x7D: - case 0x7E: - case 0x7F: - { - add(current); - break; - } - - // U+0080..U+07FF: bytes C2..DF 80..BF - case 0xC2: - case 0xC3: - case 0xC4: - case 0xC5: - case 0xC6: - case 0xC7: - case 0xC8: - case 0xC9: - case 0xCA: - case 0xCB: - case 0xCC: - case 0xCD: - case 0xCE: - case 0xCF: - case 0xD0: - case 0xD1: - case 0xD2: - case 0xD3: - case 0xD4: - case 0xD5: - case 0xD6: - case 0xD7: - case 0xD8: - case 0xD9: - case 0xDA: - case 0xDB: - case 0xDC: - case 0xDD: - case 0xDE: - case 0xDF: - { - if (JSON_HEDLEY_UNLIKELY(!next_byte_in_range({0x80, 0xBF}))) - { - return token_type::parse_error; - } - break; - } - - // U+0800..U+0FFF: bytes E0 A0..BF 80..BF - case 0xE0: - { - if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0xA0, 0xBF, 0x80, 0xBF})))) - { - return token_type::parse_error; - } - break; - } - - // U+1000..U+CFFF: bytes E1..EC 80..BF 80..BF - // U+E000..U+FFFF: bytes EE..EF 80..BF 80..BF - case 0xE1: - case 0xE2: - case 0xE3: - case 0xE4: - case 0xE5: - case 0xE6: - case 0xE7: - case 0xE8: - case 0xE9: - case 0xEA: - case 0xEB: - case 0xEC: - case 0xEE: - case 0xEF: - { - if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0xBF, 0x80, 0xBF})))) - { - return token_type::parse_error; - } - break; - } - - // U+D000..U+D7FF: bytes ED 80..9F 80..BF - case 0xED: - { - if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0x9F, 0x80, 0xBF})))) - { - return token_type::parse_error; - } - break; - } - - // U+10000..U+3FFFF F0 90..BF 80..BF 80..BF - case 0xF0: - { - if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x90, 0xBF, 0x80, 0xBF, 0x80, 0xBF})))) - { - return token_type::parse_error; - } - break; - } - - // U+40000..U+FFFFF F1..F3 80..BF 80..BF 80..BF - case 0xF1: - case 0xF2: - case 0xF3: - { - if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF})))) - { - return token_type::parse_error; - } - break; - } - - // U+100000..U+10FFFF F4 80..8F 80..BF 80..BF - case 0xF4: - { - if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0x8F, 0x80, 0xBF, 0x80, 0xBF})))) - { - return token_type::parse_error; - } - break; - } - - // remaining bytes (80..C1 and F5..FF) are ill-formed - default: - { - error_message = "invalid string: ill-formed UTF-8 byte"; - return token_type::parse_error; - } - } - } - } - - /*! - * @brief scan a comment - * @return whether comment could be scanned successfully - */ - bool scan_comment() - { - switch (get()) - { - // single-line comments skip input until a newline or EOF is read - case '/': - { - while (true) - { - switch (get()) - { - case '\n': - case '\r': - case std::char_traits::eof(): - case '\0': - return true; - - default: - break; - } - } - } - - // multi-line comments skip input until */ is read - case '*': - { - while (true) - { - switch (get()) - { - case std::char_traits::eof(): - case '\0': - { - error_message = "invalid comment; missing closing '*/'"; - return false; - } - - case '*': - { - switch (get()) - { - case '/': - return true; - - default: - { - unget(); - continue; - } - } - } - - default: - continue; - } - } - } - - // unexpected character after reading '/' - default: - { - error_message = "invalid comment; expecting '/' or '*' after '/'"; - return false; - } - } - } - - JSON_HEDLEY_NON_NULL(2) - static void strtof(float& f, const char* str, char** endptr) noexcept - { - f = std::strtof(str, endptr); - } - - JSON_HEDLEY_NON_NULL(2) - static void strtof(double& f, const char* str, char** endptr) noexcept - { - f = std::strtod(str, endptr); - } - - JSON_HEDLEY_NON_NULL(2) - static void strtof(long double& f, const char* str, char** endptr) noexcept - { - f = std::strtold(str, endptr); - } - - /*! - @brief scan a number literal - - This function scans a string according to Sect. 6 of RFC 7159. - - The function is realized with a deterministic finite state machine derived - from the grammar described in RFC 7159. Starting in state "init", the - input is read and used to determined the next state. Only state "done" - accepts the number. State "error" is a trap state to model errors. In the - table below, "anything" means any character but the ones listed before. - - state | 0 | 1-9 | e E | + | - | . | anything - ---------|----------|----------|----------|---------|---------|----------|----------- - init | zero | any1 | [error] | [error] | minus | [error] | [error] - minus | zero | any1 | [error] | [error] | [error] | [error] | [error] - zero | done | done | exponent | done | done | decimal1 | done - any1 | any1 | any1 | exponent | done | done | decimal1 | done - decimal1 | decimal2 | decimal2 | [error] | [error] | [error] | [error] | [error] - decimal2 | decimal2 | decimal2 | exponent | done | done | done | done - exponent | any2 | any2 | [error] | sign | sign | [error] | [error] - sign | any2 | any2 | [error] | [error] | [error] | [error] | [error] - any2 | any2 | any2 | done | done | done | done | done - - The state machine is realized with one label per state (prefixed with - "scan_number_") and `goto` statements between them. The state machine - contains cycles, but any cycle can be left when EOF is read. Therefore, - the function is guaranteed to terminate. - - During scanning, the read bytes are stored in token_buffer. This string is - then converted to a signed integer, an unsigned integer, or a - floating-point number. - - @return token_type::value_unsigned, token_type::value_integer, or - token_type::value_float if number could be successfully scanned, - token_type::parse_error otherwise - - @note The scanner is independent of the current locale. Internally, the - locale's decimal point is used instead of `.` to work with the - locale-dependent converters. - */ - token_type scan_number() // lgtm [cpp/use-of-goto] - { - // reset token_buffer to store the number's bytes - reset(); - - // the type of the parsed number; initially set to unsigned; will be - // changed if minus sign, decimal point or exponent is read - token_type number_type = token_type::value_unsigned; - - // state (init): we just found out we need to scan a number - switch (current) - { - case '-': - { - add(current); - goto scan_number_minus; - } - - case '0': - { - add(current); - goto scan_number_zero; - } - - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - { - add(current); - goto scan_number_any1; - } - - // all other characters are rejected outside scan_number() - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - } - -scan_number_minus: - // state: we just parsed a leading minus sign - number_type = token_type::value_integer; - switch (get()) - { - case '0': - { - add(current); - goto scan_number_zero; - } - - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - { - add(current); - goto scan_number_any1; - } - - default: - { - error_message = "invalid number; expected digit after '-'"; - return token_type::parse_error; - } - } - -scan_number_zero: - // state: we just parse a zero (maybe with a leading minus sign) - switch (get()) - { - case '.': - { - add(decimal_point_char); - goto scan_number_decimal1; - } - - case 'e': - case 'E': - { - add(current); - goto scan_number_exponent; - } - - default: - goto scan_number_done; - } - -scan_number_any1: - // state: we just parsed a number 0-9 (maybe with a leading minus sign) - switch (get()) - { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - { - add(current); - goto scan_number_any1; - } - - case '.': - { - add(decimal_point_char); - goto scan_number_decimal1; - } - - case 'e': - case 'E': - { - add(current); - goto scan_number_exponent; - } - - default: - goto scan_number_done; - } - -scan_number_decimal1: - // state: we just parsed a decimal point - number_type = token_type::value_float; - switch (get()) - { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - { - add(current); - goto scan_number_decimal2; - } - - default: - { - error_message = "invalid number; expected digit after '.'"; - return token_type::parse_error; - } - } - -scan_number_decimal2: - // we just parsed at least one number after a decimal point - switch (get()) - { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - { - add(current); - goto scan_number_decimal2; - } - - case 'e': - case 'E': - { - add(current); - goto scan_number_exponent; - } - - default: - goto scan_number_done; - } - -scan_number_exponent: - // we just parsed an exponent - number_type = token_type::value_float; - switch (get()) - { - case '+': - case '-': - { - add(current); - goto scan_number_sign; - } - - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - { - add(current); - goto scan_number_any2; - } - - default: - { - error_message = - "invalid number; expected '+', '-', or digit after exponent"; - return token_type::parse_error; - } - } - -scan_number_sign: - // we just parsed an exponent sign - switch (get()) - { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - { - add(current); - goto scan_number_any2; - } - - default: - { - error_message = "invalid number; expected digit after exponent sign"; - return token_type::parse_error; - } - } - -scan_number_any2: - // we just parsed a number after the exponent or exponent sign - switch (get()) - { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - { - add(current); - goto scan_number_any2; - } - - default: - goto scan_number_done; - } - -scan_number_done: - // unget the character after the number (we only read it to know that - // we are done scanning a number) - unget(); - - char* endptr = nullptr; - errno = 0; - - // try to parse integers first and fall back to floats - if (number_type == token_type::value_unsigned) - { - const auto x = std::strtoull(token_buffer.data(), &endptr, 10); - - // we checked the number format before - JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); - - if (errno == 0) - { - value_unsigned = static_cast(x); - if (value_unsigned == x) - { - return token_type::value_unsigned; - } - } - } - else if (number_type == token_type::value_integer) - { - const auto x = std::strtoll(token_buffer.data(), &endptr, 10); - - // we checked the number format before - JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); - - if (errno == 0) - { - value_integer = static_cast(x); - if (value_integer == x) - { - return token_type::value_integer; - } - } - } - - // this code is reached if we parse a floating-point number or if an - // integer conversion above failed - strtof(value_float, token_buffer.data(), &endptr); - - // we checked the number format before - JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); - - return token_type::value_float; - } - - /*! - @param[in] literal_text the literal text to expect - @param[in] length the length of the passed literal text - @param[in] return_type the token type to return on success - */ - JSON_HEDLEY_NON_NULL(2) - token_type scan_literal(const char_type* literal_text, const std::size_t length, - token_type return_type) - { - JSON_ASSERT(std::char_traits::to_char_type(current) == literal_text[0]); - for (std::size_t i = 1; i < length; ++i) - { - if (JSON_HEDLEY_UNLIKELY(std::char_traits::to_char_type(get()) != literal_text[i])) - { - error_message = "invalid literal"; - return token_type::parse_error; - } - } - return return_type; - } - - ///////////////////// - // input management - ///////////////////// - - /// reset token_buffer; current character is beginning of token - void reset() noexcept - { - token_buffer.clear(); - token_string.clear(); - token_string.push_back(std::char_traits::to_char_type(current)); - } - - /* - @brief get next character from the input - - This function provides the interface to the used input adapter. It does - not throw in case the input reached EOF, but returns a - `std::char_traits::eof()` in that case. Stores the scanned characters - for use in error messages. - - @return character read from the input - */ - char_int_type get() - { - ++position.chars_read_total; - ++position.chars_read_current_line; - - if (next_unget) - { - // just reset the next_unget variable and work with current - next_unget = false; - } - else - { - current = ia.get_character(); - } - - if (JSON_HEDLEY_LIKELY(current != std::char_traits::eof())) - { - token_string.push_back(std::char_traits::to_char_type(current)); - } - - if (current == '\n') - { - ++position.lines_read; - position.chars_read_current_line = 0; - } - - return current; - } - - /*! - @brief unget current character (read it again on next get) - - We implement unget by setting variable next_unget to true. The input is not - changed - we just simulate ungetting by modifying chars_read_total, - chars_read_current_line, and token_string. The next call to get() will - behave as if the unget character is read again. - */ - void unget() - { - next_unget = true; - - --position.chars_read_total; - - // in case we "unget" a newline, we have to also decrement the lines_read - if (position.chars_read_current_line == 0) - { - if (position.lines_read > 0) - { - --position.lines_read; - } - } - else - { - --position.chars_read_current_line; - } - - if (JSON_HEDLEY_LIKELY(current != std::char_traits::eof())) - { - JSON_ASSERT(!token_string.empty()); - token_string.pop_back(); - } - } - - /// add a character to token_buffer - void add(char_int_type c) - { - token_buffer.push_back(static_cast(c)); - } - - public: - ///////////////////// - // value getters - ///////////////////// - - /// return integer value - constexpr number_integer_t get_number_integer() const noexcept - { - return value_integer; - } - - /// return unsigned integer value - constexpr number_unsigned_t get_number_unsigned() const noexcept - { - return value_unsigned; - } - - /// return floating-point value - constexpr number_float_t get_number_float() const noexcept - { - return value_float; - } - - /// return current string value (implicitly resets the token; useful only once) - string_t& get_string() - { - return token_buffer; - } - - ///////////////////// - // diagnostics - ///////////////////// - - /// return position of last read token - constexpr position_t get_position() const noexcept - { - return position; - } - - /// return the last read token (for errors only). Will never contain EOF - /// (an arbitrary value that is not a valid char value, often -1), because - /// 255 may legitimately occur. May contain NUL, which should be escaped. - std::string get_token_string() const - { - // escape control characters - std::string result; - for (const auto c : token_string) - { - if (static_cast(c) <= '\x1F') - { - // escape control characters - std::array cs{{}}; - (std::snprintf)(cs.data(), cs.size(), "", static_cast(c)); - result += cs.data(); - } - else - { - // add character as is - result.push_back(static_cast(c)); - } - } - - return result; - } - - /// return syntax error message - JSON_HEDLEY_RETURNS_NON_NULL - constexpr const char* get_error_message() const noexcept - { - return error_message; - } - - ///////////////////// - // actual scanner - ///////////////////// - - /*! - @brief skip the UTF-8 byte order mark - @return true iff there is no BOM or the correct BOM has been skipped - */ - bool skip_bom() - { - if (get() == 0xEF) - { - // check if we completely parse the BOM - return get() == 0xBB && get() == 0xBF; - } - - // the first character is not the beginning of the BOM; unget it to - // process is later - unget(); - return true; - } - - void skip_whitespace() - { - do - { - get(); - } - while (current == ' ' || current == '\t' || current == '\n' || current == '\r'); - } - - token_type scan() - { - // initially, skip the BOM - if (position.chars_read_total == 0 && !skip_bom()) - { - error_message = "invalid BOM; must be 0xEF 0xBB 0xBF if given"; - return token_type::parse_error; - } - - // read next character and ignore whitespace - skip_whitespace(); - - // ignore comments - while (ignore_comments && current == '/') - { - if (!scan_comment()) - { - return token_type::parse_error; - } - - // skip following whitespace - skip_whitespace(); - } - - switch (current) - { - // structural characters - case '[': - return token_type::begin_array; - case ']': - return token_type::end_array; - case '{': - return token_type::begin_object; - case '}': - return token_type::end_object; - case ':': - return token_type::name_separator; - case ',': - return token_type::value_separator; - - // literals - case 't': - { - std::array true_literal = {{char_type('t'), char_type('r'), char_type('u'), char_type('e')}}; - return scan_literal(true_literal.data(), true_literal.size(), token_type::literal_true); - } - case 'f': - { - std::array false_literal = {{char_type('f'), char_type('a'), char_type('l'), char_type('s'), char_type('e')}}; - return scan_literal(false_literal.data(), false_literal.size(), token_type::literal_false); - } - case 'n': - { - std::array null_literal = {{char_type('n'), char_type('u'), char_type('l'), char_type('l')}}; - return scan_literal(null_literal.data(), null_literal.size(), token_type::literal_null); - } - - // string - case '\"': - return scan_string(); - - // number - case '-': - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - return scan_number(); - - // end of input (the null byte is needed when parsing from - // string literals) - case '\0': - case std::char_traits::eof(): - return token_type::end_of_input; - - // error - default: - error_message = "invalid literal"; - return token_type::parse_error; - } - } - - private: - /// input adapter - InputAdapterType ia; - - /// whether comments should be ignored (true) or signaled as errors (false) - const bool ignore_comments = false; - - /// the current character - char_int_type current = std::char_traits::eof(); - - /// whether the next get() call should just return current - bool next_unget = false; - - /// the start position of the current token - position_t position {}; - - /// raw input token string (for error messages) - std::vector token_string {}; - - /// buffer for variable-length tokens (numbers, strings) - string_t token_buffer {}; - - /// a description of occurred lexer errors - const char* error_message = ""; - - // number values - number_integer_t value_integer = 0; - number_unsigned_t value_unsigned = 0; - number_float_t value_float = 0; - - /// the decimal point - const char_int_type decimal_point_char = '.'; -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - - -#include // size_t -#include // declval -#include // string - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template -using null_function_t = decltype(std::declval().null()); - -template -using boolean_function_t = - decltype(std::declval().boolean(std::declval())); - -template -using number_integer_function_t = - decltype(std::declval().number_integer(std::declval())); - -template -using number_unsigned_function_t = - decltype(std::declval().number_unsigned(std::declval())); - -template -using number_float_function_t = decltype(std::declval().number_float( - std::declval(), std::declval())); - -template -using string_function_t = - decltype(std::declval().string(std::declval())); - -template -using binary_function_t = - decltype(std::declval().binary(std::declval())); - -template -using start_object_function_t = - decltype(std::declval().start_object(std::declval())); - -template -using key_function_t = - decltype(std::declval().key(std::declval())); - -template -using end_object_function_t = decltype(std::declval().end_object()); - -template -using start_array_function_t = - decltype(std::declval().start_array(std::declval())); - -template -using end_array_function_t = decltype(std::declval().end_array()); - -template -using parse_error_function_t = decltype(std::declval().parse_error( - std::declval(), std::declval(), - std::declval())); - -template -struct is_sax -{ - private: - static_assert(is_basic_json::value, - "BasicJsonType must be of type basic_json<...>"); - - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - using exception_t = typename BasicJsonType::exception; - - public: - static constexpr bool value = - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value && - is_detected_exact::value; -}; - -template -struct is_sax_static_asserts -{ - private: - static_assert(is_basic_json::value, - "BasicJsonType must be of type basic_json<...>"); - - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - using exception_t = typename BasicJsonType::exception; - - public: - static_assert(is_detected_exact::value, - "Missing/invalid function: bool null()"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool boolean(bool)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool boolean(bool)"); - static_assert( - is_detected_exact::value, - "Missing/invalid function: bool number_integer(number_integer_t)"); - static_assert( - is_detected_exact::value, - "Missing/invalid function: bool number_unsigned(number_unsigned_t)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool number_float(number_float_t, const string_t&)"); - static_assert( - is_detected_exact::value, - "Missing/invalid function: bool string(string_t&)"); - static_assert( - is_detected_exact::value, - "Missing/invalid function: bool binary(binary_t&)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool start_object(std::size_t)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool key(string_t&)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool end_object()"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool start_array(std::size_t)"); - static_assert(is_detected_exact::value, - "Missing/invalid function: bool end_array()"); - static_assert( - is_detected_exact::value, - "Missing/invalid function: bool parse_error(std::size_t, const " - "std::string&, const exception&)"); -}; -} // namespace detail -} // namespace nlohmann - -// #include - - -namespace nlohmann -{ -namespace detail -{ - -/// how to treat CBOR tags -enum class cbor_tag_handler_t -{ - error, ///< throw a parse_error exception in case of a tag - ignore ///< ignore tags -}; - -/*! -@brief determine system byte order - -@return true if and only if system's byte order is little endian - -@note from https://stackoverflow.com/a/1001328/266378 -*/ -static inline bool little_endianess(int num = 1) noexcept -{ - return *reinterpret_cast(&num) == 1; -} - - -/////////////////// -// binary reader // -/////////////////// - -/*! -@brief deserialization of CBOR, MessagePack, and UBJSON values -*/ -template> -class binary_reader -{ - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - using json_sax_t = SAX; - using char_type = typename InputAdapterType::char_type; - using char_int_type = typename std::char_traits::int_type; - - public: - /*! - @brief create a binary reader - - @param[in] adapter input adapter to read from - */ - explicit binary_reader(InputAdapterType&& adapter) : ia(std::move(adapter)) - { - (void)detail::is_sax_static_asserts {}; - } - - // make class move-only - binary_reader(const binary_reader&) = delete; - binary_reader(binary_reader&&) = default; - binary_reader& operator=(const binary_reader&) = delete; - binary_reader& operator=(binary_reader&&) = default; - ~binary_reader() = default; - - /*! - @param[in] format the binary format to parse - @param[in] sax_ a SAX event processor - @param[in] strict whether to expect the input to be consumed completed - @param[in] tag_handler how to treat CBOR tags - - @return - */ - JSON_HEDLEY_NON_NULL(3) - bool sax_parse(const input_format_t format, - json_sax_t* sax_, - const bool strict = true, - const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) - { - sax = sax_; - bool result = false; - - switch (format) - { - case input_format_t::bson: - result = parse_bson_internal(); - break; - - case input_format_t::cbor: - result = parse_cbor_internal(true, tag_handler); - break; - - case input_format_t::msgpack: - result = parse_msgpack_internal(); - break; - - case input_format_t::ubjson: - result = parse_ubjson_internal(); - break; - - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - } - - // strict mode: next byte must be EOF - if (result && strict) - { - if (format == input_format_t::ubjson) - { - get_ignore_noop(); - } - else - { - get(); - } - - if (JSON_HEDLEY_UNLIKELY(current != std::char_traits::eof())) - { - return sax->parse_error(chars_read, get_token_string(), - parse_error::create(110, chars_read, exception_message(format, "expected end of input; last byte: 0x" + get_token_string(), "value"), BasicJsonType())); - } - } - - return result; - } - - private: - ////////// - // BSON // - ////////// - - /*! - @brief Reads in a BSON-object and passes it to the SAX-parser. - @return whether a valid BSON-value was passed to the SAX parser - */ - bool parse_bson_internal() - { - std::int32_t document_size{}; - get_number(input_format_t::bson, document_size); - - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1)))) - { - return false; - } - - if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_list(/*is_array*/false))) - { - return false; - } - - return sax->end_object(); - } - - /*! - @brief Parses a C-style string from the BSON input. - @param[in, out] result A reference to the string variable where the read - string is to be stored. - @return `true` if the \x00-byte indicating the end of the string was - encountered before the EOF; false` indicates an unexpected EOF. - */ - bool get_bson_cstr(string_t& result) - { - auto out = std::back_inserter(result); - while (true) - { - get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::bson, "cstring"))) - { - return false; - } - if (current == 0x00) - { - return true; - } - *out++ = static_cast(current); - } - } - - /*! - @brief Parses a zero-terminated string of length @a len from the BSON - input. - @param[in] len The length (including the zero-byte at the end) of the - string to be read. - @param[in, out] result A reference to the string variable where the read - string is to be stored. - @tparam NumberType The type of the length @a len - @pre len >= 1 - @return `true` if the string was successfully parsed - */ - template - bool get_bson_string(const NumberType len, string_t& result) - { - if (JSON_HEDLEY_UNLIKELY(len < 1)) - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "string length must be at least 1, is " + std::to_string(len), "string"), BasicJsonType())); - } - - return get_string(input_format_t::bson, len - static_cast(1), result) && get() != std::char_traits::eof(); - } - - /*! - @brief Parses a byte array input of length @a len from the BSON input. - @param[in] len The length of the byte array to be read. - @param[in, out] result A reference to the binary variable where the read - array is to be stored. - @tparam NumberType The type of the length @a len - @pre len >= 0 - @return `true` if the byte array was successfully parsed - */ - template - bool get_bson_binary(const NumberType len, binary_t& result) - { - if (JSON_HEDLEY_UNLIKELY(len < 0)) - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "byte array length cannot be negative, is " + std::to_string(len), "binary"), BasicJsonType())); - } - - // All BSON binary values have a subtype - std::uint8_t subtype{}; - get_number(input_format_t::bson, subtype); - result.set_subtype(subtype); - - return get_binary(input_format_t::bson, len, result); - } - - /*! - @brief Read a BSON document element of the given @a element_type. - @param[in] element_type The BSON element type, c.f. http://bsonspec.org/spec.html - @param[in] element_type_parse_position The position in the input stream, - where the `element_type` was read. - @warning Not all BSON element types are supported yet. An unsupported - @a element_type will give rise to a parse_error.114: - Unsupported BSON record type 0x... - @return whether a valid BSON-object/array was passed to the SAX parser - */ - bool parse_bson_element_internal(const char_int_type element_type, - const std::size_t element_type_parse_position) - { - switch (element_type) - { - case 0x01: // double - { - double number{}; - return get_number(input_format_t::bson, number) && sax->number_float(static_cast(number), ""); - } - - case 0x02: // string - { - std::int32_t len{}; - string_t value; - return get_number(input_format_t::bson, len) && get_bson_string(len, value) && sax->string(value); - } - - case 0x03: // object - { - return parse_bson_internal(); - } - - case 0x04: // array - { - return parse_bson_array(); - } - - case 0x05: // binary - { - std::int32_t len{}; - binary_t value; - return get_number(input_format_t::bson, len) && get_bson_binary(len, value) && sax->binary(value); - } - - case 0x08: // boolean - { - return sax->boolean(get() != 0); - } - - case 0x0A: // null - { - return sax->null(); - } - - case 0x10: // int32 - { - std::int32_t value{}; - return get_number(input_format_t::bson, value) && sax->number_integer(value); - } - - case 0x12: // int64 - { - std::int64_t value{}; - return get_number(input_format_t::bson, value) && sax->number_integer(value); - } - - default: // anything else not supported (yet) - { - std::array cr{{}}; - (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(element_type)); - return sax->parse_error(element_type_parse_position, std::string(cr.data()), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr.data()), BasicJsonType())); - } - } - } - - /*! - @brief Read a BSON element list (as specified in the BSON-spec) - - The same binary layout is used for objects and arrays, hence it must be - indicated with the argument @a is_array which one is expected - (true --> array, false --> object). - - @param[in] is_array Determines if the element list being read is to be - treated as an object (@a is_array == false), or as an - array (@a is_array == true). - @return whether a valid BSON-object/array was passed to the SAX parser - */ - bool parse_bson_element_list(const bool is_array) - { - string_t key; - - while (auto element_type = get()) - { - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::bson, "element list"))) - { - return false; - } - - const std::size_t element_type_parse_position = chars_read; - if (JSON_HEDLEY_UNLIKELY(!get_bson_cstr(key))) - { - return false; - } - - if (!is_array && !sax->key(key)) - { - return false; - } - - if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_internal(element_type, element_type_parse_position))) - { - return false; - } - - // get_bson_cstr only appends - key.clear(); - } - - return true; - } - - /*! - @brief Reads an array from the BSON input and passes it to the SAX-parser. - @return whether a valid BSON-array was passed to the SAX parser - */ - bool parse_bson_array() - { - std::int32_t document_size{}; - get_number(input_format_t::bson, document_size); - - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1)))) - { - return false; - } - - if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_list(/*is_array*/true))) - { - return false; - } - - return sax->end_array(); - } - - ////////// - // CBOR // - ////////// - - /*! - @param[in] get_char whether a new character should be retrieved from the - input (true) or whether the last read character should - be considered instead (false) - @param[in] tag_handler how CBOR tags should be treated - - @return whether a valid CBOR value was passed to the SAX parser - */ - bool parse_cbor_internal(const bool get_char, - const cbor_tag_handler_t tag_handler) - { - switch (get_char ? get() : current) - { - // EOF - case std::char_traits::eof(): - return unexpect_eof(input_format_t::cbor, "value"); - - // Integer 0x00..0x17 (0..23) - case 0x00: - case 0x01: - case 0x02: - case 0x03: - case 0x04: - case 0x05: - case 0x06: - case 0x07: - case 0x08: - case 0x09: - case 0x0A: - case 0x0B: - case 0x0C: - case 0x0D: - case 0x0E: - case 0x0F: - case 0x10: - case 0x11: - case 0x12: - case 0x13: - case 0x14: - case 0x15: - case 0x16: - case 0x17: - return sax->number_unsigned(static_cast(current)); - - case 0x18: // Unsigned integer (one-byte uint8_t follows) - { - std::uint8_t number{}; - return get_number(input_format_t::cbor, number) && sax->number_unsigned(number); - } - - case 0x19: // Unsigned integer (two-byte uint16_t follows) - { - std::uint16_t number{}; - return get_number(input_format_t::cbor, number) && sax->number_unsigned(number); - } - - case 0x1A: // Unsigned integer (four-byte uint32_t follows) - { - std::uint32_t number{}; - return get_number(input_format_t::cbor, number) && sax->number_unsigned(number); - } - - case 0x1B: // Unsigned integer (eight-byte uint64_t follows) - { - std::uint64_t number{}; - return get_number(input_format_t::cbor, number) && sax->number_unsigned(number); - } - - // Negative integer -1-0x00..-1-0x17 (-1..-24) - case 0x20: - case 0x21: - case 0x22: - case 0x23: - case 0x24: - case 0x25: - case 0x26: - case 0x27: - case 0x28: - case 0x29: - case 0x2A: - case 0x2B: - case 0x2C: - case 0x2D: - case 0x2E: - case 0x2F: - case 0x30: - case 0x31: - case 0x32: - case 0x33: - case 0x34: - case 0x35: - case 0x36: - case 0x37: - return sax->number_integer(static_cast(0x20 - 1 - current)); - - case 0x38: // Negative integer (one-byte uint8_t follows) - { - std::uint8_t number{}; - return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast(-1) - number); - } - - case 0x39: // Negative integer -1-n (two-byte uint16_t follows) - { - std::uint16_t number{}; - return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast(-1) - number); - } - - case 0x3A: // Negative integer -1-n (four-byte uint32_t follows) - { - std::uint32_t number{}; - return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast(-1) - number); - } - - case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows) - { - std::uint64_t number{}; - return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast(-1) - - static_cast(number)); - } - - // Binary data (0x00..0x17 bytes follow) - case 0x40: - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x45: - case 0x46: - case 0x47: - case 0x48: - case 0x49: - case 0x4A: - case 0x4B: - case 0x4C: - case 0x4D: - case 0x4E: - case 0x4F: - case 0x50: - case 0x51: - case 0x52: - case 0x53: - case 0x54: - case 0x55: - case 0x56: - case 0x57: - case 0x58: // Binary data (one-byte uint8_t for n follows) - case 0x59: // Binary data (two-byte uint16_t for n follow) - case 0x5A: // Binary data (four-byte uint32_t for n follow) - case 0x5B: // Binary data (eight-byte uint64_t for n follow) - case 0x5F: // Binary data (indefinite length) - { - binary_t b; - return get_cbor_binary(b) && sax->binary(b); - } - - // UTF-8 string (0x00..0x17 bytes follow) - case 0x60: - case 0x61: - case 0x62: - case 0x63: - case 0x64: - case 0x65: - case 0x66: - case 0x67: - case 0x68: - case 0x69: - case 0x6A: - case 0x6B: - case 0x6C: - case 0x6D: - case 0x6E: - case 0x6F: - case 0x70: - case 0x71: - case 0x72: - case 0x73: - case 0x74: - case 0x75: - case 0x76: - case 0x77: - case 0x78: // UTF-8 string (one-byte uint8_t for n follows) - case 0x79: // UTF-8 string (two-byte uint16_t for n follow) - case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) - case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) - case 0x7F: // UTF-8 string (indefinite length) - { - string_t s; - return get_cbor_string(s) && sax->string(s); - } - - // array (0x00..0x17 data items follow) - case 0x80: - case 0x81: - case 0x82: - case 0x83: - case 0x84: - case 0x85: - case 0x86: - case 0x87: - case 0x88: - case 0x89: - case 0x8A: - case 0x8B: - case 0x8C: - case 0x8D: - case 0x8E: - case 0x8F: - case 0x90: - case 0x91: - case 0x92: - case 0x93: - case 0x94: - case 0x95: - case 0x96: - case 0x97: - return get_cbor_array(static_cast(static_cast(current) & 0x1Fu), tag_handler); - - case 0x98: // array (one-byte uint8_t for n follows) - { - std::uint8_t len{}; - return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast(len), tag_handler); - } - - case 0x99: // array (two-byte uint16_t for n follow) - { - std::uint16_t len{}; - return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast(len), tag_handler); - } - - case 0x9A: // array (four-byte uint32_t for n follow) - { - std::uint32_t len{}; - return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast(len), tag_handler); - } - - case 0x9B: // array (eight-byte uint64_t for n follow) - { - std::uint64_t len{}; - return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast(len), tag_handler); - } - - case 0x9F: // array (indefinite length) - return get_cbor_array(std::size_t(-1), tag_handler); - - // map (0x00..0x17 pairs of data items follow) - case 0xA0: - case 0xA1: - case 0xA2: - case 0xA3: - case 0xA4: - case 0xA5: - case 0xA6: - case 0xA7: - case 0xA8: - case 0xA9: - case 0xAA: - case 0xAB: - case 0xAC: - case 0xAD: - case 0xAE: - case 0xAF: - case 0xB0: - case 0xB1: - case 0xB2: - case 0xB3: - case 0xB4: - case 0xB5: - case 0xB6: - case 0xB7: - return get_cbor_object(static_cast(static_cast(current) & 0x1Fu), tag_handler); - - case 0xB8: // map (one-byte uint8_t for n follows) - { - std::uint8_t len{}; - return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast(len), tag_handler); - } - - case 0xB9: // map (two-byte uint16_t for n follow) - { - std::uint16_t len{}; - return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast(len), tag_handler); - } - - case 0xBA: // map (four-byte uint32_t for n follow) - { - std::uint32_t len{}; - return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast(len), tag_handler); - } - - case 0xBB: // map (eight-byte uint64_t for n follow) - { - std::uint64_t len{}; - return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast(len), tag_handler); - } - - case 0xBF: // map (indefinite length) - return get_cbor_object(std::size_t(-1), tag_handler); - - case 0xC6: // tagged item - case 0xC7: - case 0xC8: - case 0xC9: - case 0xCA: - case 0xCB: - case 0xCC: - case 0xCD: - case 0xCE: - case 0xCF: - case 0xD0: - case 0xD1: - case 0xD2: - case 0xD3: - case 0xD4: - case 0xD8: // tagged item (1 bytes follow) - case 0xD9: // tagged item (2 bytes follow) - case 0xDA: // tagged item (4 bytes follow) - case 0xDB: // tagged item (8 bytes follow) - { - switch (tag_handler) - { - case cbor_tag_handler_t::error: - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value"), BasicJsonType())); - } - - case cbor_tag_handler_t::ignore: - { - switch (current) - { - case 0xD8: - { - std::uint8_t len{}; - get_number(input_format_t::cbor, len); - break; - } - case 0xD9: - { - std::uint16_t len{}; - get_number(input_format_t::cbor, len); - break; - } - case 0xDA: - { - std::uint32_t len{}; - get_number(input_format_t::cbor, len); - break; - } - case 0xDB: - { - std::uint64_t len{}; - get_number(input_format_t::cbor, len); - break; - } - default: - break; - } - return parse_cbor_internal(true, tag_handler); - } - - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - return false; // LCOV_EXCL_LINE - } - } - - case 0xF4: // false - return sax->boolean(false); - - case 0xF5: // true - return sax->boolean(true); - - case 0xF6: // null - return sax->null(); - - case 0xF9: // Half-Precision Float (two-byte IEEE 754) - { - const auto byte1_raw = get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "number"))) - { - return false; - } - const auto byte2_raw = get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "number"))) - { - return false; - } - - const auto byte1 = static_cast(byte1_raw); - const auto byte2 = static_cast(byte2_raw); - - // code from RFC 7049, Appendix D, Figure 3: - // As half-precision floating-point numbers were only added - // to IEEE 754 in 2008, today's programming platforms often - // still only have limited support for them. It is very - // easy to include at least decoding support for them even - // without such support. An example of a small decoder for - // half-precision floating-point numbers in the C language - // is shown in Fig. 3. - const auto half = static_cast((byte1 << 8u) + byte2); - const double val = [&half] - { - const int exp = (half >> 10u) & 0x1Fu; - const unsigned int mant = half & 0x3FFu; - JSON_ASSERT(0 <= exp&& exp <= 32); - JSON_ASSERT(mant <= 1024); - switch (exp) - { - case 0: - return std::ldexp(mant, -24); - case 31: - return (mant == 0) - ? std::numeric_limits::infinity() - : std::numeric_limits::quiet_NaN(); - default: - return std::ldexp(mant + 1024, exp - 25); - } - }(); - return sax->number_float((half & 0x8000u) != 0 - ? static_cast(-val) - : static_cast(val), ""); - } - - case 0xFA: // Single-Precision Float (four-byte IEEE 754) - { - float number{}; - return get_number(input_format_t::cbor, number) && sax->number_float(static_cast(number), ""); - } - - case 0xFB: // Double-Precision Float (eight-byte IEEE 754) - { - double number{}; - return get_number(input_format_t::cbor, number) && sax->number_float(static_cast(number), ""); - } - - default: // anything else (0xFF is handled inside the other types) - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value"), BasicJsonType())); - } - } - } - - /*! - @brief reads a CBOR string - - This function first reads starting bytes to determine the expected - string length and then copies this number of bytes into a string. - Additionally, CBOR's strings with indefinite lengths are supported. - - @param[out] result created string - - @return whether string creation completed - */ - bool get_cbor_string(string_t& result) - { - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "string"))) - { - return false; - } - - switch (current) - { - // UTF-8 string (0x00..0x17 bytes follow) - case 0x60: - case 0x61: - case 0x62: - case 0x63: - case 0x64: - case 0x65: - case 0x66: - case 0x67: - case 0x68: - case 0x69: - case 0x6A: - case 0x6B: - case 0x6C: - case 0x6D: - case 0x6E: - case 0x6F: - case 0x70: - case 0x71: - case 0x72: - case 0x73: - case 0x74: - case 0x75: - case 0x76: - case 0x77: - { - return get_string(input_format_t::cbor, static_cast(current) & 0x1Fu, result); - } - - case 0x78: // UTF-8 string (one-byte uint8_t for n follows) - { - std::uint8_t len{}; - return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result); - } - - case 0x79: // UTF-8 string (two-byte uint16_t for n follow) - { - std::uint16_t len{}; - return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result); - } - - case 0x7A: // UTF-8 string (four-byte uint32_t for n follow) - { - std::uint32_t len{}; - return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result); - } - - case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow) - { - std::uint64_t len{}; - return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result); - } - - case 0x7F: // UTF-8 string (indefinite length) - { - while (get() != 0xFF) - { - string_t chunk; - if (!get_cbor_string(chunk)) - { - return false; - } - result.append(chunk); - } - return true; - } - - default: - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x60-0x7B) or indefinite string type (0x7F); last byte: 0x" + last_token, "string"), BasicJsonType())); - } - } - } - - /*! - @brief reads a CBOR byte array - - This function first reads starting bytes to determine the expected - byte array length and then copies this number of bytes into the byte array. - Additionally, CBOR's byte arrays with indefinite lengths are supported. - - @param[out] result created byte array - - @return whether byte array creation completed - */ - bool get_cbor_binary(binary_t& result) - { - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "binary"))) - { - return false; - } - - switch (current) - { - // Binary data (0x00..0x17 bytes follow) - case 0x40: - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x45: - case 0x46: - case 0x47: - case 0x48: - case 0x49: - case 0x4A: - case 0x4B: - case 0x4C: - case 0x4D: - case 0x4E: - case 0x4F: - case 0x50: - case 0x51: - case 0x52: - case 0x53: - case 0x54: - case 0x55: - case 0x56: - case 0x57: - { - return get_binary(input_format_t::cbor, static_cast(current) & 0x1Fu, result); - } - - case 0x58: // Binary data (one-byte uint8_t for n follows) - { - std::uint8_t len{}; - return get_number(input_format_t::cbor, len) && - get_binary(input_format_t::cbor, len, result); - } - - case 0x59: // Binary data (two-byte uint16_t for n follow) - { - std::uint16_t len{}; - return get_number(input_format_t::cbor, len) && - get_binary(input_format_t::cbor, len, result); - } - - case 0x5A: // Binary data (four-byte uint32_t for n follow) - { - std::uint32_t len{}; - return get_number(input_format_t::cbor, len) && - get_binary(input_format_t::cbor, len, result); - } - - case 0x5B: // Binary data (eight-byte uint64_t for n follow) - { - std::uint64_t len{}; - return get_number(input_format_t::cbor, len) && - get_binary(input_format_t::cbor, len, result); - } - - case 0x5F: // Binary data (indefinite length) - { - while (get() != 0xFF) - { - binary_t chunk; - if (!get_cbor_binary(chunk)) - { - return false; - } - result.insert(result.end(), chunk.begin(), chunk.end()); - } - return true; - } - - default: - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x40-0x5B) or indefinite binary array type (0x5F); last byte: 0x" + last_token, "binary"), BasicJsonType())); - } - } - } - - /*! - @param[in] len the length of the array or std::size_t(-1) for an - array of indefinite size - @param[in] tag_handler how CBOR tags should be treated - @return whether array creation completed - */ - bool get_cbor_array(const std::size_t len, - const cbor_tag_handler_t tag_handler) - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(len))) - { - return false; - } - - if (len != std::size_t(-1)) - { - for (std::size_t i = 0; i < len; ++i) - { - if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler))) - { - return false; - } - } - } - else - { - while (get() != 0xFF) - { - if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(false, tag_handler))) - { - return false; - } - } - } - - return sax->end_array(); - } - - /*! - @param[in] len the length of the object or std::size_t(-1) for an - object of indefinite size - @param[in] tag_handler how CBOR tags should be treated - @return whether object creation completed - */ - bool get_cbor_object(const std::size_t len, - const cbor_tag_handler_t tag_handler) - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(len))) - { - return false; - } - - string_t key; - if (len != std::size_t(-1)) - { - for (std::size_t i = 0; i < len; ++i) - { - get(); - if (JSON_HEDLEY_UNLIKELY(!get_cbor_string(key) || !sax->key(key))) - { - return false; - } - - if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler))) - { - return false; - } - key.clear(); - } - } - else - { - while (get() != 0xFF) - { - if (JSON_HEDLEY_UNLIKELY(!get_cbor_string(key) || !sax->key(key))) - { - return false; - } - - if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler))) - { - return false; - } - key.clear(); - } - } - - return sax->end_object(); - } - - ///////////// - // MsgPack // - ///////////// - - /*! - @return whether a valid MessagePack value was passed to the SAX parser - */ - bool parse_msgpack_internal() - { - switch (get()) - { - // EOF - case std::char_traits::eof(): - return unexpect_eof(input_format_t::msgpack, "value"); - - // positive fixint - case 0x00: - case 0x01: - case 0x02: - case 0x03: - case 0x04: - case 0x05: - case 0x06: - case 0x07: - case 0x08: - case 0x09: - case 0x0A: - case 0x0B: - case 0x0C: - case 0x0D: - case 0x0E: - case 0x0F: - case 0x10: - case 0x11: - case 0x12: - case 0x13: - case 0x14: - case 0x15: - case 0x16: - case 0x17: - case 0x18: - case 0x19: - case 0x1A: - case 0x1B: - case 0x1C: - case 0x1D: - case 0x1E: - case 0x1F: - case 0x20: - case 0x21: - case 0x22: - case 0x23: - case 0x24: - case 0x25: - case 0x26: - case 0x27: - case 0x28: - case 0x29: - case 0x2A: - case 0x2B: - case 0x2C: - case 0x2D: - case 0x2E: - case 0x2F: - case 0x30: - case 0x31: - case 0x32: - case 0x33: - case 0x34: - case 0x35: - case 0x36: - case 0x37: - case 0x38: - case 0x39: - case 0x3A: - case 0x3B: - case 0x3C: - case 0x3D: - case 0x3E: - case 0x3F: - case 0x40: - case 0x41: - case 0x42: - case 0x43: - case 0x44: - case 0x45: - case 0x46: - case 0x47: - case 0x48: - case 0x49: - case 0x4A: - case 0x4B: - case 0x4C: - case 0x4D: - case 0x4E: - case 0x4F: - case 0x50: - case 0x51: - case 0x52: - case 0x53: - case 0x54: - case 0x55: - case 0x56: - case 0x57: - case 0x58: - case 0x59: - case 0x5A: - case 0x5B: - case 0x5C: - case 0x5D: - case 0x5E: - case 0x5F: - case 0x60: - case 0x61: - case 0x62: - case 0x63: - case 0x64: - case 0x65: - case 0x66: - case 0x67: - case 0x68: - case 0x69: - case 0x6A: - case 0x6B: - case 0x6C: - case 0x6D: - case 0x6E: - case 0x6F: - case 0x70: - case 0x71: - case 0x72: - case 0x73: - case 0x74: - case 0x75: - case 0x76: - case 0x77: - case 0x78: - case 0x79: - case 0x7A: - case 0x7B: - case 0x7C: - case 0x7D: - case 0x7E: - case 0x7F: - return sax->number_unsigned(static_cast(current)); - - // fixmap - case 0x80: - case 0x81: - case 0x82: - case 0x83: - case 0x84: - case 0x85: - case 0x86: - case 0x87: - case 0x88: - case 0x89: - case 0x8A: - case 0x8B: - case 0x8C: - case 0x8D: - case 0x8E: - case 0x8F: - return get_msgpack_object(static_cast(static_cast(current) & 0x0Fu)); - - // fixarray - case 0x90: - case 0x91: - case 0x92: - case 0x93: - case 0x94: - case 0x95: - case 0x96: - case 0x97: - case 0x98: - case 0x99: - case 0x9A: - case 0x9B: - case 0x9C: - case 0x9D: - case 0x9E: - case 0x9F: - return get_msgpack_array(static_cast(static_cast(current) & 0x0Fu)); - - // fixstr - case 0xA0: - case 0xA1: - case 0xA2: - case 0xA3: - case 0xA4: - case 0xA5: - case 0xA6: - case 0xA7: - case 0xA8: - case 0xA9: - case 0xAA: - case 0xAB: - case 0xAC: - case 0xAD: - case 0xAE: - case 0xAF: - case 0xB0: - case 0xB1: - case 0xB2: - case 0xB3: - case 0xB4: - case 0xB5: - case 0xB6: - case 0xB7: - case 0xB8: - case 0xB9: - case 0xBA: - case 0xBB: - case 0xBC: - case 0xBD: - case 0xBE: - case 0xBF: - case 0xD9: // str 8 - case 0xDA: // str 16 - case 0xDB: // str 32 - { - string_t s; - return get_msgpack_string(s) && sax->string(s); - } - - case 0xC0: // nil - return sax->null(); - - case 0xC2: // false - return sax->boolean(false); - - case 0xC3: // true - return sax->boolean(true); - - case 0xC4: // bin 8 - case 0xC5: // bin 16 - case 0xC6: // bin 32 - case 0xC7: // ext 8 - case 0xC8: // ext 16 - case 0xC9: // ext 32 - case 0xD4: // fixext 1 - case 0xD5: // fixext 2 - case 0xD6: // fixext 4 - case 0xD7: // fixext 8 - case 0xD8: // fixext 16 - { - binary_t b; - return get_msgpack_binary(b) && sax->binary(b); - } - - case 0xCA: // float 32 - { - float number{}; - return get_number(input_format_t::msgpack, number) && sax->number_float(static_cast(number), ""); - } - - case 0xCB: // float 64 - { - double number{}; - return get_number(input_format_t::msgpack, number) && sax->number_float(static_cast(number), ""); - } - - case 0xCC: // uint 8 - { - std::uint8_t number{}; - return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number); - } - - case 0xCD: // uint 16 - { - std::uint16_t number{}; - return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number); - } - - case 0xCE: // uint 32 - { - std::uint32_t number{}; - return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number); - } - - case 0xCF: // uint 64 - { - std::uint64_t number{}; - return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number); - } - - case 0xD0: // int 8 - { - std::int8_t number{}; - return get_number(input_format_t::msgpack, number) && sax->number_integer(number); - } - - case 0xD1: // int 16 - { - std::int16_t number{}; - return get_number(input_format_t::msgpack, number) && sax->number_integer(number); - } - - case 0xD2: // int 32 - { - std::int32_t number{}; - return get_number(input_format_t::msgpack, number) && sax->number_integer(number); - } - - case 0xD3: // int 64 - { - std::int64_t number{}; - return get_number(input_format_t::msgpack, number) && sax->number_integer(number); - } - - case 0xDC: // array 16 - { - std::uint16_t len{}; - return get_number(input_format_t::msgpack, len) && get_msgpack_array(static_cast(len)); - } - - case 0xDD: // array 32 - { - std::uint32_t len{}; - return get_number(input_format_t::msgpack, len) && get_msgpack_array(static_cast(len)); - } - - case 0xDE: // map 16 - { - std::uint16_t len{}; - return get_number(input_format_t::msgpack, len) && get_msgpack_object(static_cast(len)); - } - - case 0xDF: // map 32 - { - std::uint32_t len{}; - return get_number(input_format_t::msgpack, len) && get_msgpack_object(static_cast(len)); - } - - // negative fixint - case 0xE0: - case 0xE1: - case 0xE2: - case 0xE3: - case 0xE4: - case 0xE5: - case 0xE6: - case 0xE7: - case 0xE8: - case 0xE9: - case 0xEA: - case 0xEB: - case 0xEC: - case 0xED: - case 0xEE: - case 0xEF: - case 0xF0: - case 0xF1: - case 0xF2: - case 0xF3: - case 0xF4: - case 0xF5: - case 0xF6: - case 0xF7: - case 0xF8: - case 0xF9: - case 0xFA: - case 0xFB: - case 0xFC: - case 0xFD: - case 0xFE: - case 0xFF: - return sax->number_integer(static_cast(current)); - - default: // anything else - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::msgpack, "invalid byte: 0x" + last_token, "value"), BasicJsonType())); - } - } - } - - /*! - @brief reads a MessagePack string - - This function first reads starting bytes to determine the expected - string length and then copies this number of bytes into a string. - - @param[out] result created string - - @return whether string creation completed - */ - bool get_msgpack_string(string_t& result) - { - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::msgpack, "string"))) - { - return false; - } - - switch (current) - { - // fixstr - case 0xA0: - case 0xA1: - case 0xA2: - case 0xA3: - case 0xA4: - case 0xA5: - case 0xA6: - case 0xA7: - case 0xA8: - case 0xA9: - case 0xAA: - case 0xAB: - case 0xAC: - case 0xAD: - case 0xAE: - case 0xAF: - case 0xB0: - case 0xB1: - case 0xB2: - case 0xB3: - case 0xB4: - case 0xB5: - case 0xB6: - case 0xB7: - case 0xB8: - case 0xB9: - case 0xBA: - case 0xBB: - case 0xBC: - case 0xBD: - case 0xBE: - case 0xBF: - { - return get_string(input_format_t::msgpack, static_cast(current) & 0x1Fu, result); - } - - case 0xD9: // str 8 - { - std::uint8_t len{}; - return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result); - } - - case 0xDA: // str 16 - { - std::uint16_t len{}; - return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result); - } - - case 0xDB: // str 32 - { - std::uint32_t len{}; - return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result); - } - - default: - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::msgpack, "expected length specification (0xA0-0xBF, 0xD9-0xDB); last byte: 0x" + last_token, "string"), BasicJsonType())); - } - } - } - - /*! - @brief reads a MessagePack byte array - - This function first reads starting bytes to determine the expected - byte array length and then copies this number of bytes into a byte array. - - @param[out] result created byte array - - @return whether byte array creation completed - */ - bool get_msgpack_binary(binary_t& result) - { - // helper function to set the subtype - auto assign_and_return_true = [&result](std::int8_t subtype) - { - result.set_subtype(static_cast(subtype)); - return true; - }; - - switch (current) - { - case 0xC4: // bin 8 - { - std::uint8_t len{}; - return get_number(input_format_t::msgpack, len) && - get_binary(input_format_t::msgpack, len, result); - } - - case 0xC5: // bin 16 - { - std::uint16_t len{}; - return get_number(input_format_t::msgpack, len) && - get_binary(input_format_t::msgpack, len, result); - } - - case 0xC6: // bin 32 - { - std::uint32_t len{}; - return get_number(input_format_t::msgpack, len) && - get_binary(input_format_t::msgpack, len, result); - } - - case 0xC7: // ext 8 - { - std::uint8_t len{}; - std::int8_t subtype{}; - return get_number(input_format_t::msgpack, len) && - get_number(input_format_t::msgpack, subtype) && - get_binary(input_format_t::msgpack, len, result) && - assign_and_return_true(subtype); - } - - case 0xC8: // ext 16 - { - std::uint16_t len{}; - std::int8_t subtype{}; - return get_number(input_format_t::msgpack, len) && - get_number(input_format_t::msgpack, subtype) && - get_binary(input_format_t::msgpack, len, result) && - assign_and_return_true(subtype); - } - - case 0xC9: // ext 32 - { - std::uint32_t len{}; - std::int8_t subtype{}; - return get_number(input_format_t::msgpack, len) && - get_number(input_format_t::msgpack, subtype) && - get_binary(input_format_t::msgpack, len, result) && - assign_and_return_true(subtype); - } - - case 0xD4: // fixext 1 - { - std::int8_t subtype{}; - return get_number(input_format_t::msgpack, subtype) && - get_binary(input_format_t::msgpack, 1, result) && - assign_and_return_true(subtype); - } - - case 0xD5: // fixext 2 - { - std::int8_t subtype{}; - return get_number(input_format_t::msgpack, subtype) && - get_binary(input_format_t::msgpack, 2, result) && - assign_and_return_true(subtype); - } - - case 0xD6: // fixext 4 - { - std::int8_t subtype{}; - return get_number(input_format_t::msgpack, subtype) && - get_binary(input_format_t::msgpack, 4, result) && - assign_and_return_true(subtype); - } - - case 0xD7: // fixext 8 - { - std::int8_t subtype{}; - return get_number(input_format_t::msgpack, subtype) && - get_binary(input_format_t::msgpack, 8, result) && - assign_and_return_true(subtype); - } - - case 0xD8: // fixext 16 - { - std::int8_t subtype{}; - return get_number(input_format_t::msgpack, subtype) && - get_binary(input_format_t::msgpack, 16, result) && - assign_and_return_true(subtype); - } - - default: // LCOV_EXCL_LINE - return false; // LCOV_EXCL_LINE - } - } - - /*! - @param[in] len the length of the array - @return whether array creation completed - */ - bool get_msgpack_array(const std::size_t len) - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(len))) - { - return false; - } - - for (std::size_t i = 0; i < len; ++i) - { - if (JSON_HEDLEY_UNLIKELY(!parse_msgpack_internal())) - { - return false; - } - } - - return sax->end_array(); - } - - /*! - @param[in] len the length of the object - @return whether object creation completed - */ - bool get_msgpack_object(const std::size_t len) - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(len))) - { - return false; - } - - string_t key; - for (std::size_t i = 0; i < len; ++i) - { - get(); - if (JSON_HEDLEY_UNLIKELY(!get_msgpack_string(key) || !sax->key(key))) - { - return false; - } - - if (JSON_HEDLEY_UNLIKELY(!parse_msgpack_internal())) - { - return false; - } - key.clear(); - } - - return sax->end_object(); - } - - //////////// - // UBJSON // - //////////// - - /*! - @param[in] get_char whether a new character should be retrieved from the - input (true, default) or whether the last read - character should be considered instead - - @return whether a valid UBJSON value was passed to the SAX parser - */ - bool parse_ubjson_internal(const bool get_char = true) - { - return get_ubjson_value(get_char ? get_ignore_noop() : current); - } - - /*! - @brief reads a UBJSON string - - This function is either called after reading the 'S' byte explicitly - indicating a string, or in case of an object key where the 'S' byte can be - left out. - - @param[out] result created string - @param[in] get_char whether a new character should be retrieved from the - input (true, default) or whether the last read - character should be considered instead - - @return whether string creation completed - */ - bool get_ubjson_string(string_t& result, const bool get_char = true) - { - if (get_char) - { - get(); // TODO(niels): may we ignore N here? - } - - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "value"))) - { - return false; - } - - switch (current) - { - case 'U': - { - std::uint8_t len{}; - return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); - } - - case 'i': - { - std::int8_t len{}; - return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); - } - - case 'I': - { - std::int16_t len{}; - return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); - } - - case 'l': - { - std::int32_t len{}; - return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); - } - - case 'L': - { - std::int64_t len{}; - return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result); - } - - default: - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L); last byte: 0x" + last_token, "string"), BasicJsonType())); - } - } - - /*! - @param[out] result determined size - @return whether size determination completed - */ - bool get_ubjson_size_value(std::size_t& result) - { - switch (get_ignore_noop()) - { - case 'U': - { - std::uint8_t number{}; - if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - case 'i': - { - std::int8_t number{}; - if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - case 'I': - { - std::int16_t number{}; - if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - case 'l': - { - std::int32_t number{}; - if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - case 'L': - { - std::int64_t number{}; - if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number))) - { - return false; - } - result = static_cast(number); - return true; - } - - default: - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L) after '#'; last byte: 0x" + last_token, "size"), BasicJsonType())); - } - } - } - - /*! - @brief determine the type and size for a container - - In the optimized UBJSON format, a type and a size can be provided to allow - for a more compact representation. - - @param[out] result pair of the size and the type - - @return whether pair creation completed - */ - bool get_ubjson_size_type(std::pair& result) - { - result.first = string_t::npos; // size - result.second = 0; // type - - get_ignore_noop(); - - if (current == '$') - { - result.second = get(); // must not ignore 'N', because 'N' maybe the type - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "type"))) - { - return false; - } - - get_ignore_noop(); - if (JSON_HEDLEY_UNLIKELY(current != '#')) - { - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "value"))) - { - return false; - } - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "expected '#' after type information; last byte: 0x" + last_token, "size"), BasicJsonType())); - } - - return get_ubjson_size_value(result.first); - } - - if (current == '#') - { - return get_ubjson_size_value(result.first); - } - - return true; - } - - /*! - @param prefix the previously read or set type prefix - @return whether value creation completed - */ - bool get_ubjson_value(const char_int_type prefix) - { - switch (prefix) - { - case std::char_traits::eof(): // EOF - return unexpect_eof(input_format_t::ubjson, "value"); - - case 'T': // true - return sax->boolean(true); - case 'F': // false - return sax->boolean(false); - - case 'Z': // null - return sax->null(); - - case 'U': - { - std::uint8_t number{}; - return get_number(input_format_t::ubjson, number) && sax->number_unsigned(number); - } - - case 'i': - { - std::int8_t number{}; - return get_number(input_format_t::ubjson, number) && sax->number_integer(number); - } - - case 'I': - { - std::int16_t number{}; - return get_number(input_format_t::ubjson, number) && sax->number_integer(number); - } - - case 'l': - { - std::int32_t number{}; - return get_number(input_format_t::ubjson, number) && sax->number_integer(number); - } - - case 'L': - { - std::int64_t number{}; - return get_number(input_format_t::ubjson, number) && sax->number_integer(number); - } - - case 'd': - { - float number{}; - return get_number(input_format_t::ubjson, number) && sax->number_float(static_cast(number), ""); - } - - case 'D': - { - double number{}; - return get_number(input_format_t::ubjson, number) && sax->number_float(static_cast(number), ""); - } - - case 'H': - { - return get_ubjson_high_precision_number(); - } - - case 'C': // char - { - get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "char"))) - { - return false; - } - if (JSON_HEDLEY_UNLIKELY(current > 127)) - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "byte after 'C' must be in range 0x00..0x7F; last byte: 0x" + last_token, "char"), BasicJsonType())); - } - string_t s(1, static_cast(current)); - return sax->string(s); - } - - case 'S': // string - { - string_t s; - return get_ubjson_string(s) && sax->string(s); - } - - case '[': // array - return get_ubjson_array(); - - case '{': // object - return get_ubjson_object(); - - default: // anything else - { - auto last_token = get_token_string(); - return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "invalid byte: 0x" + last_token, "value"), BasicJsonType())); - } - } - } - - /*! - @return whether array creation completed - */ - bool get_ubjson_array() - { - std::pair size_and_type; - if (JSON_HEDLEY_UNLIKELY(!get_ubjson_size_type(size_and_type))) - { - return false; - } - - if (size_and_type.first != string_t::npos) - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(size_and_type.first))) - { - return false; - } - - if (size_and_type.second != 0) - { - if (size_and_type.second != 'N') - { - for (std::size_t i = 0; i < size_and_type.first; ++i) - { - if (JSON_HEDLEY_UNLIKELY(!get_ubjson_value(size_and_type.second))) - { - return false; - } - } - } - } - else - { - for (std::size_t i = 0; i < size_and_type.first; ++i) - { - if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal())) - { - return false; - } - } - } - } - else - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1)))) - { - return false; - } - - while (current != ']') - { - if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal(false))) - { - return false; - } - get_ignore_noop(); - } - } - - return sax->end_array(); - } - - /*! - @return whether object creation completed - */ - bool get_ubjson_object() - { - std::pair size_and_type; - if (JSON_HEDLEY_UNLIKELY(!get_ubjson_size_type(size_and_type))) - { - return false; - } - - string_t key; - if (size_and_type.first != string_t::npos) - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(size_and_type.first))) - { - return false; - } - - if (size_and_type.second != 0) - { - for (std::size_t i = 0; i < size_and_type.first; ++i) - { - if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key) || !sax->key(key))) - { - return false; - } - if (JSON_HEDLEY_UNLIKELY(!get_ubjson_value(size_and_type.second))) - { - return false; - } - key.clear(); - } - } - else - { - for (std::size_t i = 0; i < size_and_type.first; ++i) - { - if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key) || !sax->key(key))) - { - return false; - } - if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal())) - { - return false; - } - key.clear(); - } - } - } - else - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1)))) - { - return false; - } - - while (current != '}') - { - if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key, false) || !sax->key(key))) - { - return false; - } - if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal())) - { - return false; - } - get_ignore_noop(); - key.clear(); - } - } - - return sax->end_object(); - } - - // Note, no reader for UBJSON binary types is implemented because they do - // not exist - - bool get_ubjson_high_precision_number() - { - // get size of following number string - std::size_t size{}; - auto res = get_ubjson_size_value(size); - if (JSON_HEDLEY_UNLIKELY(!res)) - { - return res; - } - - // get number string - std::vector number_vector; - for (std::size_t i = 0; i < size; ++i) - { - get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "number"))) - { - return false; - } - number_vector.push_back(static_cast(current)); - } - - // parse number string - auto number_ia = detail::input_adapter(std::forward(number_vector)); - auto number_lexer = detail::lexer(std::move(number_ia), false); - const auto result_number = number_lexer.scan(); - const auto number_string = number_lexer.get_token_string(); - const auto result_remainder = number_lexer.scan(); - - using token_type = typename detail::lexer_base::token_type; - - if (JSON_HEDLEY_UNLIKELY(result_remainder != token_type::end_of_input)) - { - return sax->parse_error(chars_read, number_string, parse_error::create(115, chars_read, exception_message(input_format_t::ubjson, "invalid number text: " + number_lexer.get_token_string(), "high-precision number"), BasicJsonType())); - } - - switch (result_number) - { - case token_type::value_integer: - return sax->number_integer(number_lexer.get_number_integer()); - case token_type::value_unsigned: - return sax->number_unsigned(number_lexer.get_number_unsigned()); - case token_type::value_float: - return sax->number_float(number_lexer.get_number_float(), std::move(number_string)); - default: - return sax->parse_error(chars_read, number_string, parse_error::create(115, chars_read, exception_message(input_format_t::ubjson, "invalid number text: " + number_lexer.get_token_string(), "high-precision number"), BasicJsonType())); - } - } - - /////////////////////// - // Utility functions // - /////////////////////// - - /*! - @brief get next character from the input - - This function provides the interface to the used input adapter. It does - not throw in case the input reached EOF, but returns a -'ve valued - `std::char_traits::eof()` in that case. - - @return character read from the input - */ - char_int_type get() - { - ++chars_read; - return current = ia.get_character(); - } - - /*! - @return character read from the input after ignoring all 'N' entries - */ - char_int_type get_ignore_noop() - { - do - { - get(); - } - while (current == 'N'); - - return current; - } - - /* - @brief read a number from the input - - @tparam NumberType the type of the number - @param[in] format the current format (for diagnostics) - @param[out] result number of type @a NumberType - - @return whether conversion completed - - @note This function needs to respect the system's endianess, because - bytes in CBOR, MessagePack, and UBJSON are stored in network order - (big endian) and therefore need reordering on little endian systems. - */ - template - bool get_number(const input_format_t format, NumberType& result) - { - // step 1: read input into array with system's byte order - std::array vec; - for (std::size_t i = 0; i < sizeof(NumberType); ++i) - { - get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "number"))) - { - return false; - } - - // reverse byte order prior to conversion if necessary - if (is_little_endian != InputIsLittleEndian) - { - vec[sizeof(NumberType) - i - 1] = static_cast(current); - } - else - { - vec[i] = static_cast(current); // LCOV_EXCL_LINE - } - } - - // step 2: convert array into number of type T and return - std::memcpy(&result, vec.data(), sizeof(NumberType)); - return true; - } - - /*! - @brief create a string by reading characters from the input - - @tparam NumberType the type of the number - @param[in] format the current format (for diagnostics) - @param[in] len number of characters to read - @param[out] result string created by reading @a len bytes - - @return whether string creation completed - - @note We can not reserve @a len bytes for the result, because @a len - may be too large. Usually, @ref unexpect_eof() detects the end of - the input before we run out of string memory. - */ - template - bool get_string(const input_format_t format, - const NumberType len, - string_t& result) - { - bool success = true; - for (NumberType i = 0; i < len; i++) - { - get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "string"))) - { - success = false; - break; - } - result.push_back(static_cast(current)); - } - return success; - } - - /*! - @brief create a byte array by reading bytes from the input - - @tparam NumberType the type of the number - @param[in] format the current format (for diagnostics) - @param[in] len number of bytes to read - @param[out] result byte array created by reading @a len bytes - - @return whether byte array creation completed - - @note We can not reserve @a len bytes for the result, because @a len - may be too large. Usually, @ref unexpect_eof() detects the end of - the input before we run out of memory. - */ - template - bool get_binary(const input_format_t format, - const NumberType len, - binary_t& result) - { - bool success = true; - for (NumberType i = 0; i < len; i++) - { - get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "binary"))) - { - success = false; - break; - } - result.push_back(static_cast(current)); - } - return success; - } - - /*! - @param[in] format the current format (for diagnostics) - @param[in] context further context information (for diagnostics) - @return whether the last read character is not EOF - */ - JSON_HEDLEY_NON_NULL(3) - bool unexpect_eof(const input_format_t format, const char* context) const - { - if (JSON_HEDLEY_UNLIKELY(current == std::char_traits::eof())) - { - return sax->parse_error(chars_read, "", - parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context), BasicJsonType())); - } - return true; - } - - /*! - @return a string representation of the last read byte - */ - std::string get_token_string() const - { - std::array cr{{}}; - (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast(current)); - return std::string{cr.data()}; - } - - /*! - @param[in] format the current format - @param[in] detail a detailed error message - @param[in] context further context information - @return a message string to use in the parse_error exceptions - */ - std::string exception_message(const input_format_t format, - const std::string& detail, - const std::string& context) const - { - std::string error_msg = "syntax error while parsing "; - - switch (format) - { - case input_format_t::cbor: - error_msg += "CBOR"; - break; - - case input_format_t::msgpack: - error_msg += "MessagePack"; - break; - - case input_format_t::ubjson: - error_msg += "UBJSON"; - break; - - case input_format_t::bson: - error_msg += "BSON"; - break; - - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - } - - return error_msg + " " + context + ": " + detail; - } - - private: - /// input adapter - InputAdapterType ia; - - /// the current character - char_int_type current = std::char_traits::eof(); - - /// the number of characters read - std::size_t chars_read = 0; - - /// whether we can assume little endianess - const bool is_little_endian = little_endianess(); - - /// the SAX parser - json_sax_t* sax = nullptr; -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - -// #include - - -#include // isfinite -#include // uint8_t -#include // function -#include // string -#include // move -#include // vector - -// #include - -// #include - -// #include - -// #include - -// #include - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -//////////// -// parser // -//////////// - -enum class parse_event_t : uint8_t -{ - /// the parser read `{` and started to process a JSON object - object_start, - /// the parser read `}` and finished processing a JSON object - object_end, - /// the parser read `[` and started to process a JSON array - array_start, - /// the parser read `]` and finished processing a JSON array - array_end, - /// the parser read a key of a value in an object - key, - /// the parser finished reading a JSON value - value -}; - -template -using parser_callback_t = - std::function; - -/*! -@brief syntax analysis - -This class implements a recursive descent parser. -*/ -template -class parser -{ - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using lexer_t = lexer; - using token_type = typename lexer_t::token_type; - - public: - /// a parser reading from an input adapter - explicit parser(InputAdapterType&& adapter, - const parser_callback_t cb = nullptr, - const bool allow_exceptions_ = true, - const bool skip_comments = false) - : callback(cb) - , m_lexer(std::move(adapter), skip_comments) - , allow_exceptions(allow_exceptions_) - { - // read first token - get_token(); - } - - /*! - @brief public parser interface - - @param[in] strict whether to expect the last token to be EOF - @param[in,out] result parsed JSON value - - @throw parse_error.101 in case of an unexpected token - @throw parse_error.102 if to_unicode fails or surrogate error - @throw parse_error.103 if to_unicode fails - */ - void parse(const bool strict, BasicJsonType& result) - { - if (callback) - { - json_sax_dom_callback_parser sdp(result, callback, allow_exceptions); - sax_parse_internal(&sdp); - - // in strict mode, input must be completely read - if (strict && (get_token() != token_type::end_of_input)) - { - sdp.parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), - exception_message(token_type::end_of_input, "value"), BasicJsonType())); - } - - // in case of an error, return discarded value - if (sdp.is_errored()) - { - result = value_t::discarded; - return; - } - - // set top-level value to null if it was discarded by the callback - // function - if (result.is_discarded()) - { - result = nullptr; - } - } - else - { - json_sax_dom_parser sdp(result, allow_exceptions); - sax_parse_internal(&sdp); - - // in strict mode, input must be completely read - if (strict && (get_token() != token_type::end_of_input)) - { - sdp.parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), BasicJsonType())); - } - - // in case of an error, return discarded value - if (sdp.is_errored()) - { - result = value_t::discarded; - return; - } - } - - result.assert_invariant(); - } - - /*! - @brief public accept interface - - @param[in] strict whether to expect the last token to be EOF - @return whether the input is a proper JSON text - */ - bool accept(const bool strict = true) - { - json_sax_acceptor sax_acceptor; - return sax_parse(&sax_acceptor, strict); - } - - template - JSON_HEDLEY_NON_NULL(2) - bool sax_parse(SAX* sax, const bool strict = true) - { - (void)detail::is_sax_static_asserts {}; - const bool result = sax_parse_internal(sax); - - // strict mode: next byte must be EOF - if (result && strict && (get_token() != token_type::end_of_input)) - { - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_of_input, "value"), BasicJsonType())); - } - - return result; - } - - private: - template - JSON_HEDLEY_NON_NULL(2) - bool sax_parse_internal(SAX* sax) - { - // stack to remember the hierarchy of structured values we are parsing - // true = array; false = object - std::vector states; - // value to avoid a goto (see comment where set to true) - bool skip_to_state_evaluation = false; - - while (true) - { - if (!skip_to_state_evaluation) - { - // invariant: get_token() was called before each iteration - switch (last_token) - { - case token_type::begin_object: - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1)))) - { - return false; - } - - // closing } -> we are done - if (get_token() == token_type::end_object) - { - if (JSON_HEDLEY_UNLIKELY(!sax->end_object())) - { - return false; - } - break; - } - - // parse key - if (JSON_HEDLEY_UNLIKELY(last_token != token_type::value_string)) - { - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), BasicJsonType())); - } - if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string()))) - { - return false; - } - - // parse separator (:) - if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator)) - { - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), BasicJsonType())); - } - - // remember we are now inside an object - states.push_back(false); - - // parse values - get_token(); - continue; - } - - case token_type::begin_array: - { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1)))) - { - return false; - } - - // closing ] -> we are done - if (get_token() == token_type::end_array) - { - if (JSON_HEDLEY_UNLIKELY(!sax->end_array())) - { - return false; - } - break; - } - - // remember we are now inside an array - states.push_back(true); - - // parse values (no need to call get_token) - continue; - } - - case token_type::value_float: - { - const auto res = m_lexer.get_number_float(); - - if (JSON_HEDLEY_UNLIKELY(!std::isfinite(res))) - { - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - out_of_range::create(406, "number overflow parsing '" + m_lexer.get_token_string() + "'", BasicJsonType())); - } - - if (JSON_HEDLEY_UNLIKELY(!sax->number_float(res, m_lexer.get_string()))) - { - return false; - } - - break; - } - - case token_type::literal_false: - { - if (JSON_HEDLEY_UNLIKELY(!sax->boolean(false))) - { - return false; - } - break; - } - - case token_type::literal_null: - { - if (JSON_HEDLEY_UNLIKELY(!sax->null())) - { - return false; - } - break; - } - - case token_type::literal_true: - { - if (JSON_HEDLEY_UNLIKELY(!sax->boolean(true))) - { - return false; - } - break; - } - - case token_type::value_integer: - { - if (JSON_HEDLEY_UNLIKELY(!sax->number_integer(m_lexer.get_number_integer()))) - { - return false; - } - break; - } - - case token_type::value_string: - { - if (JSON_HEDLEY_UNLIKELY(!sax->string(m_lexer.get_string()))) - { - return false; - } - break; - } - - case token_type::value_unsigned: - { - if (JSON_HEDLEY_UNLIKELY(!sax->number_unsigned(m_lexer.get_number_unsigned()))) - { - return false; - } - break; - } - - case token_type::parse_error: - { - // using "uninitialized" to avoid "expected" message - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::uninitialized, "value"), BasicJsonType())); - } - - default: // the last token was unexpected - { - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::literal_or_value, "value"), BasicJsonType())); - } - } - } - else - { - skip_to_state_evaluation = false; - } - - // we reached this line after we successfully parsed a value - if (states.empty()) - { - // empty stack: we reached the end of the hierarchy: done - return true; - } - - if (states.back()) // array - { - // comma -> next value - if (get_token() == token_type::value_separator) - { - // parse a new value - get_token(); - continue; - } - - // closing ] - if (JSON_HEDLEY_LIKELY(last_token == token_type::end_array)) - { - if (JSON_HEDLEY_UNLIKELY(!sax->end_array())) - { - return false; - } - - // We are done with this array. Before we can parse a - // new value, we need to evaluate the new state first. - // By setting skip_to_state_evaluation to false, we - // are effectively jumping to the beginning of this if. - JSON_ASSERT(!states.empty()); - states.pop_back(); - skip_to_state_evaluation = true; - continue; - } - - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_array, "array"), BasicJsonType())); - } - - // states.back() is false -> object - - // comma -> next value - if (get_token() == token_type::value_separator) - { - // parse key - if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::value_string)) - { - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::value_string, "object key"), BasicJsonType())); - } - - if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string()))) - { - return false; - } - - // parse separator (:) - if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator)) - { - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::name_separator, "object separator"), BasicJsonType())); - } - - // parse values - get_token(); - continue; - } - - // closing } - if (JSON_HEDLEY_LIKELY(last_token == token_type::end_object)) - { - if (JSON_HEDLEY_UNLIKELY(!sax->end_object())) - { - return false; - } - - // We are done with this object. Before we can parse a - // new value, we need to evaluate the new state first. - // By setting skip_to_state_evaluation to false, we - // are effectively jumping to the beginning of this if. - JSON_ASSERT(!states.empty()); - states.pop_back(); - skip_to_state_evaluation = true; - continue; - } - - return sax->parse_error(m_lexer.get_position(), - m_lexer.get_token_string(), - parse_error::create(101, m_lexer.get_position(), exception_message(token_type::end_object, "object"), BasicJsonType())); - } - } - - /// get next token from lexer - token_type get_token() - { - return last_token = m_lexer.scan(); - } - - std::string exception_message(const token_type expected, const std::string& context) - { - std::string error_msg = "syntax error "; - - if (!context.empty()) - { - error_msg += "while parsing " + context + " "; - } - - error_msg += "- "; - - if (last_token == token_type::parse_error) - { - error_msg += std::string(m_lexer.get_error_message()) + "; last read: '" + - m_lexer.get_token_string() + "'"; - } - else - { - error_msg += "unexpected " + std::string(lexer_t::token_type_name(last_token)); - } - - if (expected != token_type::uninitialized) - { - error_msg += "; expected " + std::string(lexer_t::token_type_name(expected)); - } - - return error_msg; - } - - private: - /// callback function - const parser_callback_t callback = nullptr; - /// the type of the last read token - token_type last_token = token_type::uninitialized; - /// the lexer - lexer_t m_lexer; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; -}; -} // namespace detail -} // namespace nlohmann - -// #include - - -// #include - - -#include // ptrdiff_t -#include // numeric_limits - -// #include - - -namespace nlohmann -{ -namespace detail -{ -/* -@brief an iterator for primitive JSON types - -This class models an iterator for primitive JSON types (boolean, number, -string). It's only purpose is to allow the iterator/const_iterator classes -to "iterate" over primitive values. Internally, the iterator is modeled by -a `difference_type` variable. Value begin_value (`0`) models the begin, -end_value (`1`) models past the end. -*/ -class primitive_iterator_t -{ - private: - using difference_type = std::ptrdiff_t; - static constexpr difference_type begin_value = 0; - static constexpr difference_type end_value = begin_value + 1; - - JSON_PRIVATE_UNLESS_TESTED: - /// iterator as signed integer type - difference_type m_it = (std::numeric_limits::min)(); - - public: - constexpr difference_type get_value() const noexcept - { - return m_it; - } - - /// set iterator to a defined beginning - void set_begin() noexcept - { - m_it = begin_value; - } - - /// set iterator to a defined past the end - void set_end() noexcept - { - m_it = end_value; - } - - /// return whether the iterator can be dereferenced - constexpr bool is_begin() const noexcept - { - return m_it == begin_value; - } - - /// return whether the iterator is at end - constexpr bool is_end() const noexcept - { - return m_it == end_value; - } - - friend constexpr bool operator==(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept - { - return lhs.m_it == rhs.m_it; - } - - friend constexpr bool operator<(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept - { - return lhs.m_it < rhs.m_it; - } - - primitive_iterator_t operator+(difference_type n) noexcept - { - auto result = *this; - result += n; - return result; - } - - friend constexpr difference_type operator-(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept - { - return lhs.m_it - rhs.m_it; - } - - primitive_iterator_t& operator++() noexcept - { - ++m_it; - return *this; - } - - primitive_iterator_t const operator++(int) noexcept - { - auto result = *this; - ++m_it; - return result; - } - - primitive_iterator_t& operator--() noexcept - { - --m_it; - return *this; - } - - primitive_iterator_t const operator--(int) noexcept - { - auto result = *this; - --m_it; - return result; - } - - primitive_iterator_t& operator+=(difference_type n) noexcept - { - m_it += n; - return *this; - } - - primitive_iterator_t& operator-=(difference_type n) noexcept - { - m_it -= n; - return *this; - } -}; -} // namespace detail -} // namespace nlohmann - - -namespace nlohmann -{ -namespace detail -{ -/*! -@brief an iterator value - -@note This structure could easily be a union, but MSVC currently does not allow -unions members with complex constructors, see https://github.com/nlohmann/json/pull/105. -*/ -template struct internal_iterator -{ - /// iterator for JSON objects - typename BasicJsonType::object_t::iterator object_iterator {}; - /// iterator for JSON arrays - typename BasicJsonType::array_t::iterator array_iterator {}; - /// generic iterator for all other types - primitive_iterator_t primitive_iterator {}; -}; -} // namespace detail -} // namespace nlohmann - -// #include - - -#include // iterator, random_access_iterator_tag, bidirectional_iterator_tag, advance, next -#include // conditional, is_const, remove_const - -// #include - -// #include - -// #include - -// #include - -// #include - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -// forward declare, to be able to friend it later on -template class iteration_proxy; -template class iteration_proxy_value; - -/*! -@brief a template for a bidirectional iterator for the @ref basic_json class -This class implements a both iterators (iterator and const_iterator) for the -@ref basic_json class. -@note An iterator is called *initialized* when a pointer to a JSON value has - been set (e.g., by a constructor or a copy assignment). If the iterator is - default-constructed, it is *uninitialized* and most methods are undefined. - **The library uses assertions to detect calls on uninitialized iterators.** -@requirement The class satisfies the following concept requirements: -- -[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator): - The iterator that can be moved can be moved in both directions (i.e. - incremented and decremented). -@since version 1.0.0, simplified in version 2.0.9, change to bidirectional - iterators in version 3.0.0 (see https://github.com/nlohmann/json/issues/593) -*/ -template -class iter_impl -{ - /// the iterator with BasicJsonType of different const-ness - using other_iter_impl = iter_impl::value, typename std::remove_const::type, const BasicJsonType>::type>; - /// allow basic_json to access private members - friend other_iter_impl; - friend BasicJsonType; - friend iteration_proxy; - friend iteration_proxy_value; - - using object_t = typename BasicJsonType::object_t; - using array_t = typename BasicJsonType::array_t; - // make sure BasicJsonType is basic_json or const basic_json - static_assert(is_basic_json::type>::value, - "iter_impl only accepts (const) basic_json"); - - public: - - /// The std::iterator class template (used as a base class to provide typedefs) is deprecated in C++17. - /// The C++ Standard has never required user-defined iterators to derive from std::iterator. - /// A user-defined iterator should provide publicly accessible typedefs named - /// iterator_category, value_type, difference_type, pointer, and reference. - /// Note that value_type is required to be non-const, even for constant iterators. - using iterator_category = std::bidirectional_iterator_tag; - - /// the type of the values when the iterator is dereferenced - using value_type = typename BasicJsonType::value_type; - /// a type to represent differences between iterators - using difference_type = typename BasicJsonType::difference_type; - /// defines a pointer to the type iterated over (value_type) - using pointer = typename std::conditional::value, - typename BasicJsonType::const_pointer, - typename BasicJsonType::pointer>::type; - /// defines a reference to the type iterated over (value_type) - using reference = - typename std::conditional::value, - typename BasicJsonType::const_reference, - typename BasicJsonType::reference>::type; - - /// default constructor - iter_impl() = default; - - /*! - @brief constructor for a given JSON instance - @param[in] object pointer to a JSON object for this iterator - @pre object != nullptr - @post The iterator is initialized; i.e. `m_object != nullptr`. - */ - explicit iter_impl(pointer object) noexcept : m_object(object) - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - { - m_it.object_iterator = typename object_t::iterator(); - break; - } - - case value_t::array: - { - m_it.array_iterator = typename array_t::iterator(); - break; - } - - default: - { - m_it.primitive_iterator = primitive_iterator_t(); - break; - } - } - } - - /*! - @note The conventional copy constructor and copy assignment are implicitly - defined. Combined with the following converting constructor and - assignment, they support: (1) copy from iterator to iterator, (2) - copy from const iterator to const iterator, and (3) conversion from - iterator to const iterator. However conversion from const iterator - to iterator is not defined. - */ - - /*! - @brief const copy constructor - @param[in] other const iterator to copy from - @note This copy constructor had to be defined explicitly to circumvent a bug - occurring on msvc v19.0 compiler (VS 2015) debug build. For more - information refer to: https://github.com/nlohmann/json/issues/1608 - */ - iter_impl(const iter_impl& other) noexcept - : m_object(other.m_object), m_it(other.m_it) - {} - - /*! - @brief converting assignment - @param[in] other const iterator to copy from - @return const/non-const iterator - @note It is not checked whether @a other is initialized. - */ - iter_impl& operator=(const iter_impl& other) noexcept - { - m_object = other.m_object; - m_it = other.m_it; - return *this; - } - - /*! - @brief converting constructor - @param[in] other non-const iterator to copy from - @note It is not checked whether @a other is initialized. - */ - iter_impl(const iter_impl::type>& other) noexcept - : m_object(other.m_object), m_it(other.m_it) - {} - - /*! - @brief converting assignment - @param[in] other non-const iterator to copy from - @return const/non-const iterator - @note It is not checked whether @a other is initialized. - */ - iter_impl& operator=(const iter_impl::type>& other) noexcept - { - m_object = other.m_object; - m_it = other.m_it; - return *this; - } - - JSON_PRIVATE_UNLESS_TESTED: - /*! - @brief set the iterator to the first value - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - void set_begin() noexcept - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - { - m_it.object_iterator = m_object->m_value.object->begin(); - break; - } - - case value_t::array: - { - m_it.array_iterator = m_object->m_value.array->begin(); - break; - } - - case value_t::null: - { - // set to end so begin()==end() is true: null is empty - m_it.primitive_iterator.set_end(); - break; - } - - default: - { - m_it.primitive_iterator.set_begin(); - break; - } - } - } - - /*! - @brief set the iterator past the last value - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - void set_end() noexcept - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - { - m_it.object_iterator = m_object->m_value.object->end(); - break; - } - - case value_t::array: - { - m_it.array_iterator = m_object->m_value.array->end(); - break; - } - - default: - { - m_it.primitive_iterator.set_end(); - break; - } - } - } - - public: - /*! - @brief return a reference to the value pointed to by the iterator - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - reference operator*() const - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - { - JSON_ASSERT(m_it.object_iterator != m_object->m_value.object->end()); - return m_it.object_iterator->second; - } - - case value_t::array: - { - JSON_ASSERT(m_it.array_iterator != m_object->m_value.array->end()); - return *m_it.array_iterator; - } - - case value_t::null: - JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); - - default: - { - if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin())) - { - return *m_object; - } - - JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); - } - } - } - - /*! - @brief dereference the iterator - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - pointer operator->() const - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - { - JSON_ASSERT(m_it.object_iterator != m_object->m_value.object->end()); - return &(m_it.object_iterator->second); - } - - case value_t::array: - { - JSON_ASSERT(m_it.array_iterator != m_object->m_value.array->end()); - return &*m_it.array_iterator; - } - - default: - { - if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin())) - { - return m_object; - } - - JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); - } - } - } - - /*! - @brief post-increment (it++) - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - iter_impl const operator++(int) - { - auto result = *this; - ++(*this); - return result; - } - - /*! - @brief pre-increment (++it) - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - iter_impl& operator++() - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - { - std::advance(m_it.object_iterator, 1); - break; - } - - case value_t::array: - { - std::advance(m_it.array_iterator, 1); - break; - } - - default: - { - ++m_it.primitive_iterator; - break; - } - } - - return *this; - } - - /*! - @brief post-decrement (it--) - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - iter_impl const operator--(int) - { - auto result = *this; - --(*this); - return result; - } - - /*! - @brief pre-decrement (--it) - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - iter_impl& operator--() - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - { - std::advance(m_it.object_iterator, -1); - break; - } - - case value_t::array: - { - std::advance(m_it.array_iterator, -1); - break; - } - - default: - { - --m_it.primitive_iterator; - break; - } - } - - return *this; - } - - /*! - @brief comparison: equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > - bool operator==(const IterImpl& other) const - { - // if objects are not the same, the comparison is undefined - if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object)) - { - JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", *m_object)); - } - - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - return (m_it.object_iterator == other.m_it.object_iterator); - - case value_t::array: - return (m_it.array_iterator == other.m_it.array_iterator); - - default: - return (m_it.primitive_iterator == other.m_it.primitive_iterator); - } - } - - /*! - @brief comparison: not equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > - bool operator!=(const IterImpl& other) const - { - return !operator==(other); - } - - /*! - @brief comparison: smaller - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - bool operator<(const iter_impl& other) const - { - // if objects are not the same, the comparison is undefined - if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object)) - { - JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", *m_object)); - } - - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - JSON_THROW(invalid_iterator::create(213, "cannot compare order of object iterators", *m_object)); - - case value_t::array: - return (m_it.array_iterator < other.m_it.array_iterator); - - default: - return (m_it.primitive_iterator < other.m_it.primitive_iterator); - } - } - - /*! - @brief comparison: less than or equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - bool operator<=(const iter_impl& other) const - { - return !other.operator < (*this); - } - - /*! - @brief comparison: greater than - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - bool operator>(const iter_impl& other) const - { - return !operator<=(other); - } - - /*! - @brief comparison: greater than or equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - bool operator>=(const iter_impl& other) const - { - return !operator<(other); - } - - /*! - @brief add to iterator - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - iter_impl& operator+=(difference_type i) - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", *m_object)); - - case value_t::array: - { - std::advance(m_it.array_iterator, i); - break; - } - - default: - { - m_it.primitive_iterator += i; - break; - } - } - - return *this; - } - - /*! - @brief subtract from iterator - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - iter_impl& operator-=(difference_type i) - { - return operator+=(-i); - } - - /*! - @brief add to iterator - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - iter_impl operator+(difference_type i) const - { - auto result = *this; - result += i; - return result; - } - - /*! - @brief addition of distance and iterator - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - friend iter_impl operator+(difference_type i, const iter_impl& it) - { - auto result = it; - result += i; - return result; - } - - /*! - @brief subtract from iterator - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - iter_impl operator-(difference_type i) const - { - auto result = *this; - result -= i; - return result; - } - - /*! - @brief return difference - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - difference_type operator-(const iter_impl& other) const - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators", *m_object)); - - case value_t::array: - return m_it.array_iterator - other.m_it.array_iterator; - - default: - return m_it.primitive_iterator - other.m_it.primitive_iterator; - } - } - - /*! - @brief access to successor - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - reference operator[](difference_type n) const - { - JSON_ASSERT(m_object != nullptr); - - switch (m_object->m_type) - { - case value_t::object: - JSON_THROW(invalid_iterator::create(208, "cannot use operator[] for object iterators", *m_object)); - - case value_t::array: - return *std::next(m_it.array_iterator, n); - - case value_t::null: - JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); - - default: - { - if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.get_value() == -n)) - { - return *m_object; - } - - JSON_THROW(invalid_iterator::create(214, "cannot get value", *m_object)); - } - } - } - - /*! - @brief return the key of an object iterator - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - const typename object_t::key_type& key() const - { - JSON_ASSERT(m_object != nullptr); - - if (JSON_HEDLEY_LIKELY(m_object->is_object())) - { - return m_it.object_iterator->first; - } - - JSON_THROW(invalid_iterator::create(207, "cannot use key() for non-object iterators", *m_object)); - } - - /*! - @brief return the value of an iterator - @pre The iterator is initialized; i.e. `m_object != nullptr`. - */ - reference value() const - { - return operator*(); - } - - JSON_PRIVATE_UNLESS_TESTED: - /// associated JSON instance - pointer m_object = nullptr; - /// the actual iterator of the associated instance - internal_iterator::type> m_it {}; -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - - -#include // ptrdiff_t -#include // reverse_iterator -#include // declval - -namespace nlohmann -{ -namespace detail -{ -////////////////////// -// reverse_iterator // -////////////////////// - -/*! -@brief a template for a reverse iterator class - -@tparam Base the base iterator type to reverse. Valid types are @ref -iterator (to create @ref reverse_iterator) and @ref const_iterator (to -create @ref const_reverse_iterator). - -@requirement The class satisfies the following concept requirements: -- -[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator): - The iterator that can be moved can be moved in both directions (i.e. - incremented and decremented). -- [OutputIterator](https://en.cppreference.com/w/cpp/named_req/OutputIterator): - It is possible to write to the pointed-to element (only if @a Base is - @ref iterator). - -@since version 1.0.0 -*/ -template -class json_reverse_iterator : public std::reverse_iterator -{ - public: - using difference_type = std::ptrdiff_t; - /// shortcut to the reverse iterator adapter - using base_iterator = std::reverse_iterator; - /// the reference type for the pointed-to element - using reference = typename Base::reference; - - /// create reverse iterator from iterator - explicit json_reverse_iterator(const typename base_iterator::iterator_type& it) noexcept - : base_iterator(it) {} - - /// create reverse iterator from base class - explicit json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {} - - /// post-increment (it++) - json_reverse_iterator const operator++(int) - { - return static_cast(base_iterator::operator++(1)); - } - - /// pre-increment (++it) - json_reverse_iterator& operator++() - { - return static_cast(base_iterator::operator++()); - } - - /// post-decrement (it--) - json_reverse_iterator const operator--(int) - { - return static_cast(base_iterator::operator--(1)); - } - - /// pre-decrement (--it) - json_reverse_iterator& operator--() - { - return static_cast(base_iterator::operator--()); - } - - /// add to iterator - json_reverse_iterator& operator+=(difference_type i) - { - return static_cast(base_iterator::operator+=(i)); - } - - /// add to iterator - json_reverse_iterator operator+(difference_type i) const - { - return static_cast(base_iterator::operator+(i)); - } - - /// subtract from iterator - json_reverse_iterator operator-(difference_type i) const - { - return static_cast(base_iterator::operator-(i)); - } - - /// return difference - difference_type operator-(const json_reverse_iterator& other) const - { - return base_iterator(*this) - base_iterator(other); - } - - /// access to successor - reference operator[](difference_type n) const - { - return *(this->operator+(n)); - } - - /// return the key of an object iterator - auto key() const -> decltype(std::declval().key()) - { - auto it = --this->base(); - return it.key(); - } - - /// return the value of an iterator - reference value() const - { - auto it = --this->base(); - return it.operator * (); - } -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - - -#include // all_of -#include // isdigit -#include // max -#include // accumulate -#include // string -#include // move -#include // vector - -// #include - -// #include - -// #include - -// #include - - -namespace nlohmann -{ -template -class json_pointer -{ - // allow basic_json to access private members - NLOHMANN_BASIC_JSON_TPL_DECLARATION - friend class basic_json; - - public: - /*! - @brief create JSON pointer - - Create a JSON pointer according to the syntax described in - [Section 3 of RFC6901](https://tools.ietf.org/html/rfc6901#section-3). - - @param[in] s string representing the JSON pointer; if omitted, the empty - string is assumed which references the whole JSON value - - @throw parse_error.107 if the given JSON pointer @a s is nonempty and does - not begin with a slash (`/`); see example below - - @throw parse_error.108 if a tilde (`~`) in the given JSON pointer @a s is - not followed by `0` (representing `~`) or `1` (representing `/`); see - example below - - @liveexample{The example shows the construction several valid JSON pointers - as well as the exceptional behavior.,json_pointer} - - @since version 2.0.0 - */ - explicit json_pointer(const std::string& s = "") - : reference_tokens(split(s)) - {} - - /*! - @brief return a string representation of the JSON pointer - - @invariant For each JSON pointer `ptr`, it holds: - @code {.cpp} - ptr == json_pointer(ptr.to_string()); - @endcode - - @return a string representation of the JSON pointer - - @liveexample{The example shows the result of `to_string`.,json_pointer__to_string} - - @since version 2.0.0 - */ - std::string to_string() const - { - return std::accumulate(reference_tokens.begin(), reference_tokens.end(), - std::string{}, - [](const std::string & a, const std::string & b) - { - return a + "/" + detail::escape(b); - }); - } - - /// @copydoc to_string() - operator std::string() const - { - return to_string(); - } - - /*! - @brief append another JSON pointer at the end of this JSON pointer - - @param[in] ptr JSON pointer to append - @return JSON pointer with @a ptr appended - - @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add} - - @complexity Linear in the length of @a ptr. - - @sa @ref operator/=(std::string) to append a reference token - @sa @ref operator/=(std::size_t) to append an array index - @sa @ref operator/(const json_pointer&, const json_pointer&) for a binary operator - - @since version 3.6.0 - */ - json_pointer& operator/=(const json_pointer& ptr) - { - reference_tokens.insert(reference_tokens.end(), - ptr.reference_tokens.begin(), - ptr.reference_tokens.end()); - return *this; - } - - /*! - @brief append an unescaped reference token at the end of this JSON pointer - - @param[in] token reference token to append - @return JSON pointer with @a token appended without escaping @a token - - @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add} - - @complexity Amortized constant. - - @sa @ref operator/=(const json_pointer&) to append a JSON pointer - @sa @ref operator/=(std::size_t) to append an array index - @sa @ref operator/(const json_pointer&, std::size_t) for a binary operator - - @since version 3.6.0 - */ - json_pointer& operator/=(std::string token) - { - push_back(std::move(token)); - return *this; - } - - /*! - @brief append an array index at the end of this JSON pointer - - @param[in] array_idx array index to append - @return JSON pointer with @a array_idx appended - - @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add} - - @complexity Amortized constant. - - @sa @ref operator/=(const json_pointer&) to append a JSON pointer - @sa @ref operator/=(std::string) to append a reference token - @sa @ref operator/(const json_pointer&, std::string) for a binary operator - - @since version 3.6.0 - */ - json_pointer& operator/=(std::size_t array_idx) - { - return *this /= std::to_string(array_idx); - } - - /*! - @brief create a new JSON pointer by appending the right JSON pointer at the end of the left JSON pointer - - @param[in] lhs JSON pointer - @param[in] rhs JSON pointer - @return a new JSON pointer with @a rhs appended to @a lhs - - @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary} - - @complexity Linear in the length of @a lhs and @a rhs. - - @sa @ref operator/=(const json_pointer&) to append a JSON pointer - - @since version 3.6.0 - */ - friend json_pointer operator/(const json_pointer& lhs, - const json_pointer& rhs) - { - return json_pointer(lhs) /= rhs; - } - - /*! - @brief create a new JSON pointer by appending the unescaped token at the end of the JSON pointer - - @param[in] ptr JSON pointer - @param[in] token reference token - @return a new JSON pointer with unescaped @a token appended to @a ptr - - @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary} - - @complexity Linear in the length of @a ptr. - - @sa @ref operator/=(std::string) to append a reference token - - @since version 3.6.0 - */ - friend json_pointer operator/(const json_pointer& ptr, std::string token) - { - return json_pointer(ptr) /= std::move(token); - } - - /*! - @brief create a new JSON pointer by appending the array-index-token at the end of the JSON pointer - - @param[in] ptr JSON pointer - @param[in] array_idx array index - @return a new JSON pointer with @a array_idx appended to @a ptr - - @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary} - - @complexity Linear in the length of @a ptr. - - @sa @ref operator/=(std::size_t) to append an array index - - @since version 3.6.0 - */ - friend json_pointer operator/(const json_pointer& ptr, std::size_t array_idx) - { - return json_pointer(ptr) /= array_idx; - } - - /*! - @brief returns the parent of this JSON pointer - - @return parent of this JSON pointer; in case this JSON pointer is the root, - the root itself is returned - - @complexity Linear in the length of the JSON pointer. - - @liveexample{The example shows the result of `parent_pointer` for different - JSON Pointers.,json_pointer__parent_pointer} - - @since version 3.6.0 - */ - json_pointer parent_pointer() const - { - if (empty()) - { - return *this; - } - - json_pointer res = *this; - res.pop_back(); - return res; - } - - /*! - @brief remove last reference token - - @pre not `empty()` - - @liveexample{The example shows the usage of `pop_back`.,json_pointer__pop_back} - - @complexity Constant. - - @throw out_of_range.405 if JSON pointer has no parent - - @since version 3.6.0 - */ - void pop_back() - { - if (JSON_HEDLEY_UNLIKELY(empty())) - { - JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent", BasicJsonType())); - } - - reference_tokens.pop_back(); - } - - /*! - @brief return last reference token - - @pre not `empty()` - @return last reference token - - @liveexample{The example shows the usage of `back`.,json_pointer__back} - - @complexity Constant. - - @throw out_of_range.405 if JSON pointer has no parent - - @since version 3.6.0 - */ - const std::string& back() const - { - if (JSON_HEDLEY_UNLIKELY(empty())) - { - JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent", BasicJsonType())); - } - - return reference_tokens.back(); - } - - /*! - @brief append an unescaped token at the end of the reference pointer - - @param[in] token token to add - - @complexity Amortized constant. - - @liveexample{The example shows the result of `push_back` for different - JSON Pointers.,json_pointer__push_back} - - @since version 3.6.0 - */ - void push_back(const std::string& token) - { - reference_tokens.push_back(token); - } - - /// @copydoc push_back(const std::string&) - void push_back(std::string&& token) - { - reference_tokens.push_back(std::move(token)); - } - - /*! - @brief return whether pointer points to the root document - - @return true iff the JSON pointer points to the root document - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @liveexample{The example shows the result of `empty` for different JSON - Pointers.,json_pointer__empty} - - @since version 3.6.0 - */ - bool empty() const noexcept - { - return reference_tokens.empty(); - } - - private: - /*! - @param[in] s reference token to be converted into an array index - - @return integer representation of @a s - - @throw parse_error.106 if an array index begins with '0' - @throw parse_error.109 if an array index begins not with a digit - @throw out_of_range.404 if string @a s could not be converted to an integer - @throw out_of_range.410 if an array index exceeds size_type - */ - static typename BasicJsonType::size_type array_index(const std::string& s) - { - using size_type = typename BasicJsonType::size_type; - - // error condition (cf. RFC 6901, Sect. 4) - if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && s[0] == '0')) - { - JSON_THROW(detail::parse_error::create(106, 0, "array index '" + s + "' must not begin with '0'", BasicJsonType())); - } - - // error condition (cf. RFC 6901, Sect. 4) - if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && !(s[0] >= '1' && s[0] <= '9'))) - { - JSON_THROW(detail::parse_error::create(109, 0, "array index '" + s + "' is not a number", BasicJsonType())); - } - - std::size_t processed_chars = 0; - unsigned long long res = 0; - JSON_TRY - { - res = std::stoull(s, &processed_chars); - } - JSON_CATCH(std::out_of_range&) - { - JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'", BasicJsonType())); - } - - // check if the string was completely read - if (JSON_HEDLEY_UNLIKELY(processed_chars != s.size())) - { - JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'", BasicJsonType())); - } - - // only triggered on special platforms (like 32bit), see also - // https://github.com/nlohmann/json/pull/2203 - if (res >= static_cast((std::numeric_limits::max)())) - { - JSON_THROW(detail::out_of_range::create(410, "array index " + s + " exceeds size_type", BasicJsonType())); // LCOV_EXCL_LINE - } - - return static_cast(res); - } - - JSON_PRIVATE_UNLESS_TESTED: - json_pointer top() const - { - if (JSON_HEDLEY_UNLIKELY(empty())) - { - JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent", BasicJsonType())); - } - - json_pointer result = *this; - result.reference_tokens = {reference_tokens[0]}; - return result; - } - - private: - /*! - @brief create and return a reference to the pointed to value - - @complexity Linear in the number of reference tokens. - - @throw parse_error.109 if array index is not a number - @throw type_error.313 if value cannot be unflattened - */ - BasicJsonType& get_and_create(BasicJsonType& j) const - { - auto result = &j; - - // in case no reference tokens exist, return a reference to the JSON value - // j which will be overwritten by a primitive value - for (const auto& reference_token : reference_tokens) - { - switch (result->type()) - { - case detail::value_t::null: - { - if (reference_token == "0") - { - // start a new array if reference token is 0 - result = &result->operator[](0); - } - else - { - // start a new object otherwise - result = &result->operator[](reference_token); - } - break; - } - - case detail::value_t::object: - { - // create an entry in the object - result = &result->operator[](reference_token); - break; - } - - case detail::value_t::array: - { - // create an entry in the array - result = &result->operator[](array_index(reference_token)); - break; - } - - /* - The following code is only reached if there exists a reference - token _and_ the current value is primitive. In this case, we have - an error situation, because primitive values may only occur as - single value; that is, with an empty list of reference tokens. - */ - default: - JSON_THROW(detail::type_error::create(313, "invalid value to unflatten", j)); - } - } - - return *result; - } - - /*! - @brief return a reference to the pointed to value - - @note This version does not throw if a value is not present, but tries to - create nested values instead. For instance, calling this function - with pointer `"/this/that"` on a null value is equivalent to calling - `operator[]("this").operator[]("that")` on that value, effectively - changing the null value to an object. - - @param[in] ptr a JSON value - - @return reference to the JSON value pointed to by the JSON pointer - - @complexity Linear in the length of the JSON pointer. - - @throw parse_error.106 if an array index begins with '0' - @throw parse_error.109 if an array index was not a number - @throw out_of_range.404 if the JSON pointer can not be resolved - */ - BasicJsonType& get_unchecked(BasicJsonType* ptr) const - { - for (const auto& reference_token : reference_tokens) - { - // convert null values to arrays or objects before continuing - if (ptr->is_null()) - { - // check if reference token is a number - const bool nums = - std::all_of(reference_token.begin(), reference_token.end(), - [](const unsigned char x) - { - return std::isdigit(x); - }); - - // change value to array for numbers or "-" or to object otherwise - *ptr = (nums || reference_token == "-") - ? detail::value_t::array - : detail::value_t::object; - } - - switch (ptr->type()) - { - case detail::value_t::object: - { - // use unchecked object access - ptr = &ptr->operator[](reference_token); - break; - } - - case detail::value_t::array: - { - if (reference_token == "-") - { - // explicitly treat "-" as index beyond the end - ptr = &ptr->operator[](ptr->m_value.array->size()); - } - else - { - // convert array index to number; unchecked access - ptr = &ptr->operator[](array_index(reference_token)); - } - break; - } - - default: - JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'", *ptr)); - } - } - - return *ptr; - } - - /*! - @throw parse_error.106 if an array index begins with '0' - @throw parse_error.109 if an array index was not a number - @throw out_of_range.402 if the array index '-' is used - @throw out_of_range.404 if the JSON pointer can not be resolved - */ - BasicJsonType& get_checked(BasicJsonType* ptr) const - { - for (const auto& reference_token : reference_tokens) - { - switch (ptr->type()) - { - case detail::value_t::object: - { - // note: at performs range check - ptr = &ptr->at(reference_token); - break; - } - - case detail::value_t::array: - { - if (JSON_HEDLEY_UNLIKELY(reference_token == "-")) - { - // "-" always fails the range check - JSON_THROW(detail::out_of_range::create(402, - "array index '-' (" + std::to_string(ptr->m_value.array->size()) + - ") is out of range", *ptr)); - } - - // note: at performs range check - ptr = &ptr->at(array_index(reference_token)); - break; - } - - default: - JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'", *ptr)); - } - } - - return *ptr; - } - - /*! - @brief return a const reference to the pointed to value - - @param[in] ptr a JSON value - - @return const reference to the JSON value pointed to by the JSON - pointer - - @throw parse_error.106 if an array index begins with '0' - @throw parse_error.109 if an array index was not a number - @throw out_of_range.402 if the array index '-' is used - @throw out_of_range.404 if the JSON pointer can not be resolved - */ - const BasicJsonType& get_unchecked(const BasicJsonType* ptr) const - { - for (const auto& reference_token : reference_tokens) - { - switch (ptr->type()) - { - case detail::value_t::object: - { - // use unchecked object access - ptr = &ptr->operator[](reference_token); - break; - } - - case detail::value_t::array: - { - if (JSON_HEDLEY_UNLIKELY(reference_token == "-")) - { - // "-" cannot be used for const access - JSON_THROW(detail::out_of_range::create(402, "array index '-' (" + std::to_string(ptr->m_value.array->size()) + ") is out of range", *ptr)); - } - - // use unchecked array access - ptr = &ptr->operator[](array_index(reference_token)); - break; - } - - default: - JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'", *ptr)); - } - } - - return *ptr; - } - - /*! - @throw parse_error.106 if an array index begins with '0' - @throw parse_error.109 if an array index was not a number - @throw out_of_range.402 if the array index '-' is used - @throw out_of_range.404 if the JSON pointer can not be resolved - */ - const BasicJsonType& get_checked(const BasicJsonType* ptr) const - { - for (const auto& reference_token : reference_tokens) - { - switch (ptr->type()) - { - case detail::value_t::object: - { - // note: at performs range check - ptr = &ptr->at(reference_token); - break; - } - - case detail::value_t::array: - { - if (JSON_HEDLEY_UNLIKELY(reference_token == "-")) - { - // "-" always fails the range check - JSON_THROW(detail::out_of_range::create(402, - "array index '-' (" + std::to_string(ptr->m_value.array->size()) + - ") is out of range", *ptr)); - } - - // note: at performs range check - ptr = &ptr->at(array_index(reference_token)); - break; - } - - default: - JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'", *ptr)); - } - } - - return *ptr; - } - - /*! - @throw parse_error.106 if an array index begins with '0' - @throw parse_error.109 if an array index was not a number - */ - bool contains(const BasicJsonType* ptr) const - { - for (const auto& reference_token : reference_tokens) - { - switch (ptr->type()) - { - case detail::value_t::object: - { - if (!ptr->contains(reference_token)) - { - // we did not find the key in the object - return false; - } - - ptr = &ptr->operator[](reference_token); - break; - } - - case detail::value_t::array: - { - if (JSON_HEDLEY_UNLIKELY(reference_token == "-")) - { - // "-" always fails the range check - return false; - } - if (JSON_HEDLEY_UNLIKELY(reference_token.size() == 1 && !("0" <= reference_token && reference_token <= "9"))) - { - // invalid char - return false; - } - if (JSON_HEDLEY_UNLIKELY(reference_token.size() > 1)) - { - if (JSON_HEDLEY_UNLIKELY(!('1' <= reference_token[0] && reference_token[0] <= '9'))) - { - // first char should be between '1' and '9' - return false; - } - for (std::size_t i = 1; i < reference_token.size(); i++) - { - if (JSON_HEDLEY_UNLIKELY(!('0' <= reference_token[i] && reference_token[i] <= '9'))) - { - // other char should be between '0' and '9' - return false; - } - } - } - - const auto idx = array_index(reference_token); - if (idx >= ptr->size()) - { - // index out of range - return false; - } - - ptr = &ptr->operator[](idx); - break; - } - - default: - { - // we do not expect primitive values if there is still a - // reference token to process - return false; - } - } - } - - // no reference token left means we found a primitive value - return true; - } - - /*! - @brief split the string input to reference tokens - - @note This function is only called by the json_pointer constructor. - All exceptions below are documented there. - - @throw parse_error.107 if the pointer is not empty or begins with '/' - @throw parse_error.108 if character '~' is not followed by '0' or '1' - */ - static std::vector split(const std::string& reference_string) - { - std::vector result; - - // special case: empty reference string -> no reference tokens - if (reference_string.empty()) - { - return result; - } - - // check if nonempty reference string begins with slash - if (JSON_HEDLEY_UNLIKELY(reference_string[0] != '/')) - { - JSON_THROW(detail::parse_error::create(107, 1, "JSON pointer must be empty or begin with '/' - was: '" + reference_string + "'", BasicJsonType())); - } - - // extract the reference tokens: - // - slash: position of the last read slash (or end of string) - // - start: position after the previous slash - for ( - // search for the first slash after the first character - std::size_t slash = reference_string.find_first_of('/', 1), - // set the beginning of the first reference token - start = 1; - // we can stop if start == 0 (if slash == std::string::npos) - start != 0; - // set the beginning of the next reference token - // (will eventually be 0 if slash == std::string::npos) - start = (slash == std::string::npos) ? 0 : slash + 1, - // find next slash - slash = reference_string.find_first_of('/', start)) - { - // use the text between the beginning of the reference token - // (start) and the last slash (slash). - auto reference_token = reference_string.substr(start, slash - start); - - // check reference tokens are properly escaped - for (std::size_t pos = reference_token.find_first_of('~'); - pos != std::string::npos; - pos = reference_token.find_first_of('~', pos + 1)) - { - JSON_ASSERT(reference_token[pos] == '~'); - - // ~ must be followed by 0 or 1 - if (JSON_HEDLEY_UNLIKELY(pos == reference_token.size() - 1 || - (reference_token[pos + 1] != '0' && - reference_token[pos + 1] != '1'))) - { - JSON_THROW(detail::parse_error::create(108, 0, "escape character '~' must be followed with '0' or '1'", BasicJsonType())); - } - } - - // finally, store the reference token - detail::unescape(reference_token); - result.push_back(reference_token); - } - - return result; - } - - private: - /*! - @param[in] reference_string the reference string to the current value - @param[in] value the value to consider - @param[in,out] result the result object to insert values to - - @note Empty objects or arrays are flattened to `null`. - */ - static void flatten(const std::string& reference_string, - const BasicJsonType& value, - BasicJsonType& result) - { - switch (value.type()) - { - case detail::value_t::array: - { - if (value.m_value.array->empty()) - { - // flatten empty array as null - result[reference_string] = nullptr; - } - else - { - // iterate array and use index as reference string - for (std::size_t i = 0; i < value.m_value.array->size(); ++i) - { - flatten(reference_string + "/" + std::to_string(i), - value.m_value.array->operator[](i), result); - } - } - break; - } - - case detail::value_t::object: - { - if (value.m_value.object->empty()) - { - // flatten empty object as null - result[reference_string] = nullptr; - } - else - { - // iterate object and use keys as reference string - for (const auto& element : *value.m_value.object) - { - flatten(reference_string + "/" + detail::escape(element.first), element.second, result); - } - } - break; - } - - default: - { - // add primitive value with its reference string - result[reference_string] = value; - break; - } - } - } - - /*! - @param[in] value flattened JSON - - @return unflattened JSON - - @throw parse_error.109 if array index is not a number - @throw type_error.314 if value is not an object - @throw type_error.315 if object values are not primitive - @throw type_error.313 if value cannot be unflattened - */ - static BasicJsonType - unflatten(const BasicJsonType& value) - { - if (JSON_HEDLEY_UNLIKELY(!value.is_object())) - { - JSON_THROW(detail::type_error::create(314, "only objects can be unflattened", value)); - } - - BasicJsonType result; - - // iterate the JSON object values - for (const auto& element : *value.m_value.object) - { - if (JSON_HEDLEY_UNLIKELY(!element.second.is_primitive())) - { - JSON_THROW(detail::type_error::create(315, "values in object must be primitive", element.second)); - } - - // assign value to reference pointed to by JSON pointer; Note that if - // the JSON pointer is "" (i.e., points to the whole value), function - // get_and_create returns a reference to result itself. An assignment - // will then create a primitive value. - json_pointer(element.first).get_and_create(result) = element.second; - } - - return result; - } - - /*! - @brief compares two JSON pointers for equality - - @param[in] lhs JSON pointer to compare - @param[in] rhs JSON pointer to compare - @return whether @a lhs is equal to @a rhs - - @complexity Linear in the length of the JSON pointer - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - */ - friend bool operator==(json_pointer const& lhs, - json_pointer const& rhs) noexcept - { - return lhs.reference_tokens == rhs.reference_tokens; - } - - /*! - @brief compares two JSON pointers for inequality - - @param[in] lhs JSON pointer to compare - @param[in] rhs JSON pointer to compare - @return whether @a lhs is not equal @a rhs - - @complexity Linear in the length of the JSON pointer - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - */ - friend bool operator!=(json_pointer const& lhs, - json_pointer const& rhs) noexcept - { - return !(lhs == rhs); - } - - /// the reference tokens - std::vector reference_tokens; -}; -} // namespace nlohmann - -// #include - - -#include -#include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -template -class json_ref -{ - public: - using value_type = BasicJsonType; - - json_ref(value_type&& value) - : owned_value(std::move(value)) - {} - - json_ref(const value_type& value) - : value_ref(&value) - {} - - json_ref(std::initializer_list init) - : owned_value(init) - {} - - template < - class... Args, - enable_if_t::value, int> = 0 > - json_ref(Args && ... args) - : owned_value(std::forward(args)...) - {} - - // class should be movable only - json_ref(json_ref&&) = default; - json_ref(const json_ref&) = delete; - json_ref& operator=(const json_ref&) = delete; - json_ref& operator=(json_ref&&) = delete; - ~json_ref() = default; - - value_type moved_or_copied() const - { - if (value_ref == nullptr) - { - return std::move(owned_value); - } - return *value_ref; - } - - value_type const& operator*() const - { - return value_ref ? *value_ref : owned_value; - } - - value_type const* operator->() const - { - return &** this; - } - - private: - mutable value_type owned_value = nullptr; - value_type const* value_ref = nullptr; -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - -// #include - -// #include - - -#include // reverse -#include // array -#include // uint8_t, uint16_t, uint32_t, uint64_t -#include // memcpy -#include // numeric_limits -#include // string -#include // isnan, isinf - -// #include - -// #include - -// #include - - -#include // copy -#include // size_t -#include // streamsize -#include // back_inserter -#include // shared_ptr, make_shared -#include // basic_ostream -#include // basic_string -#include // vector -// #include - - -namespace nlohmann -{ -namespace detail -{ -/// abstract output adapter interface -template struct output_adapter_protocol -{ - virtual void write_character(CharType c) = 0; - virtual void write_characters(const CharType* s, std::size_t length) = 0; - virtual ~output_adapter_protocol() = default; -}; - -/// a type to simplify interfaces -template -using output_adapter_t = std::shared_ptr>; - -/// output adapter for byte vectors -template -class output_vector_adapter : public output_adapter_protocol -{ - public: - explicit output_vector_adapter(std::vector& vec) noexcept - : v(vec) - {} - - void write_character(CharType c) override - { - v.push_back(c); - } - - JSON_HEDLEY_NON_NULL(2) - void write_characters(const CharType* s, std::size_t length) override - { - std::copy(s, s + length, std::back_inserter(v)); - } - - private: - std::vector& v; -}; - -/// output adapter for output streams -template -class output_stream_adapter : public output_adapter_protocol -{ - public: - explicit output_stream_adapter(std::basic_ostream& s) noexcept - : stream(s) - {} - - void write_character(CharType c) override - { - stream.put(c); - } - - JSON_HEDLEY_NON_NULL(2) - void write_characters(const CharType* s, std::size_t length) override - { - stream.write(s, static_cast(length)); - } - - private: - std::basic_ostream& stream; -}; - -/// output adapter for basic_string -template> -class output_string_adapter : public output_adapter_protocol -{ - public: - explicit output_string_adapter(StringType& s) noexcept - : str(s) - {} - - void write_character(CharType c) override - { - str.push_back(c); - } - - JSON_HEDLEY_NON_NULL(2) - void write_characters(const CharType* s, std::size_t length) override - { - str.append(s, length); - } - - private: - StringType& str; -}; - -template> -class output_adapter -{ - public: - output_adapter(std::vector& vec) - : oa(std::make_shared>(vec)) {} - - output_adapter(std::basic_ostream& s) - : oa(std::make_shared>(s)) {} - - output_adapter(StringType& s) - : oa(std::make_shared>(s)) {} - - operator output_adapter_t() - { - return oa; - } - - private: - output_adapter_t oa = nullptr; -}; -} // namespace detail -} // namespace nlohmann - - -namespace nlohmann -{ -namespace detail -{ -/////////////////// -// binary writer // -/////////////////// - -/*! -@brief serialization to CBOR and MessagePack values -*/ -template -class binary_writer -{ - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - using number_float_t = typename BasicJsonType::number_float_t; - - public: - /*! - @brief create a binary writer - - @param[in] adapter output adapter to write to - */ - explicit binary_writer(output_adapter_t adapter) : oa(adapter) - { - JSON_ASSERT(oa); - } - - /*! - @param[in] j JSON value to serialize - @pre j.type() == value_t::object - */ - void write_bson(const BasicJsonType& j) - { - switch (j.type()) - { - case value_t::object: - { - write_bson_object(*j.m_value.object); - break; - } - - default: - { - JSON_THROW(type_error::create(317, "to serialize to BSON, top-level type must be object, but is " + std::string(j.type_name()), j));; - } - } - } - - /*! - @param[in] j JSON value to serialize - */ - void write_cbor(const BasicJsonType& j) - { - switch (j.type()) - { - case value_t::null: - { - oa->write_character(to_char_type(0xF6)); - break; - } - - case value_t::boolean: - { - oa->write_character(j.m_value.boolean - ? to_char_type(0xF5) - : to_char_type(0xF4)); - break; - } - - case value_t::number_integer: - { - if (j.m_value.number_integer >= 0) - { - // CBOR does not differentiate between positive signed - // integers and unsigned integers. Therefore, we used the - // code from the value_t::number_unsigned case here. - if (j.m_value.number_integer <= 0x17) - { - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x18)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x19)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x1A)); - write_number(static_cast(j.m_value.number_integer)); - } - else - { - oa->write_character(to_char_type(0x1B)); - write_number(static_cast(j.m_value.number_integer)); - } - } - else - { - // The conversions below encode the sign in the first - // byte, and the value is converted to a positive number. - const auto positive_number = -1 - j.m_value.number_integer; - if (j.m_value.number_integer >= -24) - { - write_number(static_cast(0x20 + positive_number)); - } - else if (positive_number <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x38)); - write_number(static_cast(positive_number)); - } - else if (positive_number <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x39)); - write_number(static_cast(positive_number)); - } - else if (positive_number <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x3A)); - write_number(static_cast(positive_number)); - } - else - { - oa->write_character(to_char_type(0x3B)); - write_number(static_cast(positive_number)); - } - } - break; - } - - case value_t::number_unsigned: - { - if (j.m_value.number_unsigned <= 0x17) - { - write_number(static_cast(j.m_value.number_unsigned)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x18)); - write_number(static_cast(j.m_value.number_unsigned)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x19)); - write_number(static_cast(j.m_value.number_unsigned)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x1A)); - write_number(static_cast(j.m_value.number_unsigned)); - } - else - { - oa->write_character(to_char_type(0x1B)); - write_number(static_cast(j.m_value.number_unsigned)); - } - break; - } - - case value_t::number_float: - { - if (std::isnan(j.m_value.number_float)) - { - // NaN is 0xf97e00 in CBOR - oa->write_character(to_char_type(0xF9)); - oa->write_character(to_char_type(0x7E)); - oa->write_character(to_char_type(0x00)); - } - else if (std::isinf(j.m_value.number_float)) - { - // Infinity is 0xf97c00, -Infinity is 0xf9fc00 - oa->write_character(to_char_type(0xf9)); - oa->write_character(j.m_value.number_float > 0 ? to_char_type(0x7C) : to_char_type(0xFC)); - oa->write_character(to_char_type(0x00)); - } - else - { - write_compact_float(j.m_value.number_float, detail::input_format_t::cbor); - } - break; - } - - case value_t::string: - { - // step 1: write control byte and the string length - const auto N = j.m_value.string->size(); - if (N <= 0x17) - { - write_number(static_cast(0x60 + N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x78)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x79)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x7A)); - write_number(static_cast(N)); - } - // LCOV_EXCL_START - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x7B)); - write_number(static_cast(N)); - } - // LCOV_EXCL_STOP - - // step 2: write the string - oa->write_characters( - reinterpret_cast(j.m_value.string->c_str()), - j.m_value.string->size()); - break; - } - - case value_t::array: - { - // step 1: write control byte and the array size - const auto N = j.m_value.array->size(); - if (N <= 0x17) - { - write_number(static_cast(0x80 + N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x98)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x99)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x9A)); - write_number(static_cast(N)); - } - // LCOV_EXCL_START - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x9B)); - write_number(static_cast(N)); - } - // LCOV_EXCL_STOP - - // step 2: write each element - for (const auto& el : *j.m_value.array) - { - write_cbor(el); - } - break; - } - - case value_t::binary: - { - if (j.m_value.binary->has_subtype()) - { - write_number(static_cast(0xd8)); - write_number(j.m_value.binary->subtype()); - } - - // step 1: write control byte and the binary array size - const auto N = j.m_value.binary->size(); - if (N <= 0x17) - { - write_number(static_cast(0x40 + N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x58)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x59)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x5A)); - write_number(static_cast(N)); - } - // LCOV_EXCL_START - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0x5B)); - write_number(static_cast(N)); - } - // LCOV_EXCL_STOP - - // step 2: write each element - oa->write_characters( - reinterpret_cast(j.m_value.binary->data()), - N); - - break; - } - - case value_t::object: - { - // step 1: write control byte and the object size - const auto N = j.m_value.object->size(); - if (N <= 0x17) - { - write_number(static_cast(0xA0 + N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0xB8)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0xB9)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0xBA)); - write_number(static_cast(N)); - } - // LCOV_EXCL_START - else if (N <= (std::numeric_limits::max)()) - { - oa->write_character(to_char_type(0xBB)); - write_number(static_cast(N)); - } - // LCOV_EXCL_STOP - - // step 2: write each element - for (const auto& el : *j.m_value.object) - { - write_cbor(el.first); - write_cbor(el.second); - } - break; - } - - default: - break; - } - } - - /*! - @param[in] j JSON value to serialize - */ - void write_msgpack(const BasicJsonType& j) - { - switch (j.type()) - { - case value_t::null: // nil - { - oa->write_character(to_char_type(0xC0)); - break; - } - - case value_t::boolean: // true and false - { - oa->write_character(j.m_value.boolean - ? to_char_type(0xC3) - : to_char_type(0xC2)); - break; - } - - case value_t::number_integer: - { - if (j.m_value.number_integer >= 0) - { - // MessagePack does not differentiate between positive - // signed integers and unsigned integers. Therefore, we used - // the code from the value_t::number_unsigned case here. - if (j.m_value.number_unsigned < 128) - { - // positive fixnum - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 8 - oa->write_character(to_char_type(0xCC)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 16 - oa->write_character(to_char_type(0xCD)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 32 - oa->write_character(to_char_type(0xCE)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 64 - oa->write_character(to_char_type(0xCF)); - write_number(static_cast(j.m_value.number_integer)); - } - } - else - { - if (j.m_value.number_integer >= -32) - { - // negative fixnum - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer >= (std::numeric_limits::min)() && - j.m_value.number_integer <= (std::numeric_limits::max)()) - { - // int 8 - oa->write_character(to_char_type(0xD0)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer >= (std::numeric_limits::min)() && - j.m_value.number_integer <= (std::numeric_limits::max)()) - { - // int 16 - oa->write_character(to_char_type(0xD1)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer >= (std::numeric_limits::min)() && - j.m_value.number_integer <= (std::numeric_limits::max)()) - { - // int 32 - oa->write_character(to_char_type(0xD2)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_integer >= (std::numeric_limits::min)() && - j.m_value.number_integer <= (std::numeric_limits::max)()) - { - // int 64 - oa->write_character(to_char_type(0xD3)); - write_number(static_cast(j.m_value.number_integer)); - } - } - break; - } - - case value_t::number_unsigned: - { - if (j.m_value.number_unsigned < 128) - { - // positive fixnum - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 8 - oa->write_character(to_char_type(0xCC)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 16 - oa->write_character(to_char_type(0xCD)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 32 - oa->write_character(to_char_type(0xCE)); - write_number(static_cast(j.m_value.number_integer)); - } - else if (j.m_value.number_unsigned <= (std::numeric_limits::max)()) - { - // uint 64 - oa->write_character(to_char_type(0xCF)); - write_number(static_cast(j.m_value.number_integer)); - } - break; - } - - case value_t::number_float: - { - write_compact_float(j.m_value.number_float, detail::input_format_t::msgpack); - break; - } - - case value_t::string: - { - // step 1: write control byte and the string length - const auto N = j.m_value.string->size(); - if (N <= 31) - { - // fixstr - write_number(static_cast(0xA0 | N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // str 8 - oa->write_character(to_char_type(0xD9)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // str 16 - oa->write_character(to_char_type(0xDA)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // str 32 - oa->write_character(to_char_type(0xDB)); - write_number(static_cast(N)); - } - - // step 2: write the string - oa->write_characters( - reinterpret_cast(j.m_value.string->c_str()), - j.m_value.string->size()); - break; - } - - case value_t::array: - { - // step 1: write control byte and the array size - const auto N = j.m_value.array->size(); - if (N <= 15) - { - // fixarray - write_number(static_cast(0x90 | N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // array 16 - oa->write_character(to_char_type(0xDC)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // array 32 - oa->write_character(to_char_type(0xDD)); - write_number(static_cast(N)); - } - - // step 2: write each element - for (const auto& el : *j.m_value.array) - { - write_msgpack(el); - } - break; - } - - case value_t::binary: - { - // step 0: determine if the binary type has a set subtype to - // determine whether or not to use the ext or fixext types - const bool use_ext = j.m_value.binary->has_subtype(); - - // step 1: write control byte and the byte string length - const auto N = j.m_value.binary->size(); - if (N <= (std::numeric_limits::max)()) - { - std::uint8_t output_type{}; - bool fixed = true; - if (use_ext) - { - switch (N) - { - case 1: - output_type = 0xD4; // fixext 1 - break; - case 2: - output_type = 0xD5; // fixext 2 - break; - case 4: - output_type = 0xD6; // fixext 4 - break; - case 8: - output_type = 0xD7; // fixext 8 - break; - case 16: - output_type = 0xD8; // fixext 16 - break; - default: - output_type = 0xC7; // ext 8 - fixed = false; - break; - } - - } - else - { - output_type = 0xC4; // bin 8 - fixed = false; - } - - oa->write_character(to_char_type(output_type)); - if (!fixed) - { - write_number(static_cast(N)); - } - } - else if (N <= (std::numeric_limits::max)()) - { - std::uint8_t output_type = use_ext - ? 0xC8 // ext 16 - : 0xC5; // bin 16 - - oa->write_character(to_char_type(output_type)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - std::uint8_t output_type = use_ext - ? 0xC9 // ext 32 - : 0xC6; // bin 32 - - oa->write_character(to_char_type(output_type)); - write_number(static_cast(N)); - } - - // step 1.5: if this is an ext type, write the subtype - if (use_ext) - { - write_number(static_cast(j.m_value.binary->subtype())); - } - - // step 2: write the byte string - oa->write_characters( - reinterpret_cast(j.m_value.binary->data()), - N); - - break; - } - - case value_t::object: - { - // step 1: write control byte and the object size - const auto N = j.m_value.object->size(); - if (N <= 15) - { - // fixmap - write_number(static_cast(0x80 | (N & 0xF))); - } - else if (N <= (std::numeric_limits::max)()) - { - // map 16 - oa->write_character(to_char_type(0xDE)); - write_number(static_cast(N)); - } - else if (N <= (std::numeric_limits::max)()) - { - // map 32 - oa->write_character(to_char_type(0xDF)); - write_number(static_cast(N)); - } - - // step 2: write each element - for (const auto& el : *j.m_value.object) - { - write_msgpack(el.first); - write_msgpack(el.second); - } - break; - } - - default: - break; - } - } - - /*! - @param[in] j JSON value to serialize - @param[in] use_count whether to use '#' prefixes (optimized format) - @param[in] use_type whether to use '$' prefixes (optimized format) - @param[in] add_prefix whether prefixes need to be used for this value - */ - void write_ubjson(const BasicJsonType& j, const bool use_count, - const bool use_type, const bool add_prefix = true) - { - switch (j.type()) - { - case value_t::null: - { - if (add_prefix) - { - oa->write_character(to_char_type('Z')); - } - break; - } - - case value_t::boolean: - { - if (add_prefix) - { - oa->write_character(j.m_value.boolean - ? to_char_type('T') - : to_char_type('F')); - } - break; - } - - case value_t::number_integer: - { - write_number_with_ubjson_prefix(j.m_value.number_integer, add_prefix); - break; - } - - case value_t::number_unsigned: - { - write_number_with_ubjson_prefix(j.m_value.number_unsigned, add_prefix); - break; - } - - case value_t::number_float: - { - write_number_with_ubjson_prefix(j.m_value.number_float, add_prefix); - break; - } - - case value_t::string: - { - if (add_prefix) - { - oa->write_character(to_char_type('S')); - } - write_number_with_ubjson_prefix(j.m_value.string->size(), true); - oa->write_characters( - reinterpret_cast(j.m_value.string->c_str()), - j.m_value.string->size()); - break; - } - - case value_t::array: - { - if (add_prefix) - { - oa->write_character(to_char_type('[')); - } - - bool prefix_required = true; - if (use_type && !j.m_value.array->empty()) - { - JSON_ASSERT(use_count); - const CharType first_prefix = ubjson_prefix(j.front()); - const bool same_prefix = std::all_of(j.begin() + 1, j.end(), - [this, first_prefix](const BasicJsonType & v) - { - return ubjson_prefix(v) == first_prefix; - }); - - if (same_prefix) - { - prefix_required = false; - oa->write_character(to_char_type('$')); - oa->write_character(first_prefix); - } - } - - if (use_count) - { - oa->write_character(to_char_type('#')); - write_number_with_ubjson_prefix(j.m_value.array->size(), true); - } - - for (const auto& el : *j.m_value.array) - { - write_ubjson(el, use_count, use_type, prefix_required); - } - - if (!use_count) - { - oa->write_character(to_char_type(']')); - } - - break; - } - - case value_t::binary: - { - if (add_prefix) - { - oa->write_character(to_char_type('[')); - } - - if (use_type && !j.m_value.binary->empty()) - { - JSON_ASSERT(use_count); - oa->write_character(to_char_type('$')); - oa->write_character('U'); - } - - if (use_count) - { - oa->write_character(to_char_type('#')); - write_number_with_ubjson_prefix(j.m_value.binary->size(), true); - } - - if (use_type) - { - oa->write_characters( - reinterpret_cast(j.m_value.binary->data()), - j.m_value.binary->size()); - } - else - { - for (size_t i = 0; i < j.m_value.binary->size(); ++i) - { - oa->write_character(to_char_type('U')); - oa->write_character(j.m_value.binary->data()[i]); - } - } - - if (!use_count) - { - oa->write_character(to_char_type(']')); - } - - break; - } - - case value_t::object: - { - if (add_prefix) - { - oa->write_character(to_char_type('{')); - } - - bool prefix_required = true; - if (use_type && !j.m_value.object->empty()) - { - JSON_ASSERT(use_count); - const CharType first_prefix = ubjson_prefix(j.front()); - const bool same_prefix = std::all_of(j.begin(), j.end(), - [this, first_prefix](const BasicJsonType & v) - { - return ubjson_prefix(v) == first_prefix; - }); - - if (same_prefix) - { - prefix_required = false; - oa->write_character(to_char_type('$')); - oa->write_character(first_prefix); - } - } - - if (use_count) - { - oa->write_character(to_char_type('#')); - write_number_with_ubjson_prefix(j.m_value.object->size(), true); - } - - for (const auto& el : *j.m_value.object) - { - write_number_with_ubjson_prefix(el.first.size(), true); - oa->write_characters( - reinterpret_cast(el.first.c_str()), - el.first.size()); - write_ubjson(el.second, use_count, use_type, prefix_required); - } - - if (!use_count) - { - oa->write_character(to_char_type('}')); - } - - break; - } - - default: - break; - } - } - - private: - ////////// - // BSON // - ////////// - - /*! - @return The size of a BSON document entry header, including the id marker - and the entry name size (and its null-terminator). - */ - static std::size_t calc_bson_entry_header_size(const string_t& name, const BasicJsonType& j) - { - const auto it = name.find(static_cast(0)); - if (JSON_HEDLEY_UNLIKELY(it != BasicJsonType::string_t::npos)) - { - JSON_THROW(out_of_range::create(409, "BSON key cannot contain code point U+0000 (at byte " + std::to_string(it) + ")", j)); - } - - return /*id*/ 1ul + name.size() + /*zero-terminator*/1u; - } - - /*! - @brief Writes the given @a element_type and @a name to the output adapter - */ - void write_bson_entry_header(const string_t& name, - const std::uint8_t element_type) - { - oa->write_character(to_char_type(element_type)); // boolean - oa->write_characters( - reinterpret_cast(name.c_str()), - name.size() + 1u); - } - - /*! - @brief Writes a BSON element with key @a name and boolean value @a value - */ - void write_bson_boolean(const string_t& name, - const bool value) - { - write_bson_entry_header(name, 0x08); - oa->write_character(value ? to_char_type(0x01) : to_char_type(0x00)); - } - - /*! - @brief Writes a BSON element with key @a name and double value @a value - */ - void write_bson_double(const string_t& name, - const double value) - { - write_bson_entry_header(name, 0x01); - write_number(value); - } - - /*! - @return The size of the BSON-encoded string in @a value - */ - static std::size_t calc_bson_string_size(const string_t& value) - { - return sizeof(std::int32_t) + value.size() + 1ul; - } - - /*! - @brief Writes a BSON element with key @a name and string value @a value - */ - void write_bson_string(const string_t& name, - const string_t& value) - { - write_bson_entry_header(name, 0x02); - - write_number(static_cast(value.size() + 1ul)); - oa->write_characters( - reinterpret_cast(value.c_str()), - value.size() + 1); - } - - /*! - @brief Writes a BSON element with key @a name and null value - */ - void write_bson_null(const string_t& name) - { - write_bson_entry_header(name, 0x0A); - } - - /*! - @return The size of the BSON-encoded integer @a value - */ - static std::size_t calc_bson_integer_size(const std::int64_t value) - { - return (std::numeric_limits::min)() <= value && value <= (std::numeric_limits::max)() - ? sizeof(std::int32_t) - : sizeof(std::int64_t); - } - - /*! - @brief Writes a BSON element with key @a name and integer @a value - */ - void write_bson_integer(const string_t& name, - const std::int64_t value) - { - if ((std::numeric_limits::min)() <= value && value <= (std::numeric_limits::max)()) - { - write_bson_entry_header(name, 0x10); // int32 - write_number(static_cast(value)); - } - else - { - write_bson_entry_header(name, 0x12); // int64 - write_number(static_cast(value)); - } - } - - /*! - @return The size of the BSON-encoded unsigned integer in @a j - */ - static constexpr std::size_t calc_bson_unsigned_size(const std::uint64_t value) noexcept - { - return (value <= static_cast((std::numeric_limits::max)())) - ? sizeof(std::int32_t) - : sizeof(std::int64_t); - } - - /*! - @brief Writes a BSON element with key @a name and unsigned @a value - */ - void write_bson_unsigned(const string_t& name, - const BasicJsonType& j) - { - if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) - { - write_bson_entry_header(name, 0x10 /* int32 */); - write_number(static_cast(j.m_value.number_unsigned)); - } - else if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) - { - write_bson_entry_header(name, 0x12 /* int64 */); - write_number(static_cast(j.m_value.number_unsigned)); - } - else - { - JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(j.m_value.number_unsigned) + " cannot be represented by BSON as it does not fit int64", j)); - } - } - - /*! - @brief Writes a BSON element with key @a name and object @a value - */ - void write_bson_object_entry(const string_t& name, - const typename BasicJsonType::object_t& value) - { - write_bson_entry_header(name, 0x03); // object - write_bson_object(value); - } - - /*! - @return The size of the BSON-encoded array @a value - */ - static std::size_t calc_bson_array_size(const typename BasicJsonType::array_t& value) - { - std::size_t array_index = 0ul; - - const std::size_t embedded_document_size = std::accumulate(std::begin(value), std::end(value), std::size_t(0), [&array_index](std::size_t result, const typename BasicJsonType::array_t::value_type & el) - { - return result + calc_bson_element_size(std::to_string(array_index++), el); - }); - - return sizeof(std::int32_t) + embedded_document_size + 1ul; - } - - /*! - @return The size of the BSON-encoded binary array @a value - */ - static std::size_t calc_bson_binary_size(const typename BasicJsonType::binary_t& value) - { - return sizeof(std::int32_t) + value.size() + 1ul; - } - - /*! - @brief Writes a BSON element with key @a name and array @a value - */ - void write_bson_array(const string_t& name, - const typename BasicJsonType::array_t& value) - { - write_bson_entry_header(name, 0x04); // array - write_number(static_cast(calc_bson_array_size(value))); - - std::size_t array_index = 0ul; - - for (const auto& el : value) - { - write_bson_element(std::to_string(array_index++), el); - } - - oa->write_character(to_char_type(0x00)); - } - - /*! - @brief Writes a BSON element with key @a name and binary value @a value - */ - void write_bson_binary(const string_t& name, - const binary_t& value) - { - write_bson_entry_header(name, 0x05); - - write_number(static_cast(value.size())); - write_number(value.has_subtype() ? value.subtype() : std::uint8_t(0x00)); - - oa->write_characters(reinterpret_cast(value.data()), value.size()); - } - - /*! - @brief Calculates the size necessary to serialize the JSON value @a j with its @a name - @return The calculated size for the BSON document entry for @a j with the given @a name. - */ - static std::size_t calc_bson_element_size(const string_t& name, - const BasicJsonType& j) - { - const auto header_size = calc_bson_entry_header_size(name, j); - switch (j.type()) - { - case value_t::object: - return header_size + calc_bson_object_size(*j.m_value.object); - - case value_t::array: - return header_size + calc_bson_array_size(*j.m_value.array); - - case value_t::binary: - return header_size + calc_bson_binary_size(*j.m_value.binary); - - case value_t::boolean: - return header_size + 1ul; - - case value_t::number_float: - return header_size + 8ul; - - case value_t::number_integer: - return header_size + calc_bson_integer_size(j.m_value.number_integer); - - case value_t::number_unsigned: - return header_size + calc_bson_unsigned_size(j.m_value.number_unsigned); - - case value_t::string: - return header_size + calc_bson_string_size(*j.m_value.string); - - case value_t::null: - return header_size + 0ul; - - // LCOV_EXCL_START - default: - JSON_ASSERT(false); - return 0ul; - // LCOV_EXCL_STOP - } - } - - /*! - @brief Serializes the JSON value @a j to BSON and associates it with the - key @a name. - @param name The name to associate with the JSON entity @a j within the - current BSON document - @return The size of the BSON entry - */ - void write_bson_element(const string_t& name, - const BasicJsonType& j) - { - switch (j.type()) - { - case value_t::object: - return write_bson_object_entry(name, *j.m_value.object); - - case value_t::array: - return write_bson_array(name, *j.m_value.array); - - case value_t::binary: - return write_bson_binary(name, *j.m_value.binary); - - case value_t::boolean: - return write_bson_boolean(name, j.m_value.boolean); - - case value_t::number_float: - return write_bson_double(name, j.m_value.number_float); - - case value_t::number_integer: - return write_bson_integer(name, j.m_value.number_integer); - - case value_t::number_unsigned: - return write_bson_unsigned(name, j); - - case value_t::string: - return write_bson_string(name, *j.m_value.string); - - case value_t::null: - return write_bson_null(name); - - // LCOV_EXCL_START - default: - JSON_ASSERT(false); - return; - // LCOV_EXCL_STOP - } - } - - /*! - @brief Calculates the size of the BSON serialization of the given - JSON-object @a j. - @param[in] j JSON value to serialize - @pre j.type() == value_t::object - */ - static std::size_t calc_bson_object_size(const typename BasicJsonType::object_t& value) - { - std::size_t document_size = std::accumulate(value.begin(), value.end(), std::size_t(0), - [](size_t result, const typename BasicJsonType::object_t::value_type & el) - { - return result += calc_bson_element_size(el.first, el.second); - }); - - return sizeof(std::int32_t) + document_size + 1ul; - } - - /*! - @param[in] j JSON value to serialize - @pre j.type() == value_t::object - */ - void write_bson_object(const typename BasicJsonType::object_t& value) - { - write_number(static_cast(calc_bson_object_size(value))); - - for (const auto& el : value) - { - write_bson_element(el.first, el.second); - } - - oa->write_character(to_char_type(0x00)); - } - - ////////// - // CBOR // - ////////// - - static constexpr CharType get_cbor_float_prefix(float /*unused*/) - { - return to_char_type(0xFA); // Single-Precision Float - } - - static constexpr CharType get_cbor_float_prefix(double /*unused*/) - { - return to_char_type(0xFB); // Double-Precision Float - } - - ///////////// - // MsgPack // - ///////////// - - static constexpr CharType get_msgpack_float_prefix(float /*unused*/) - { - return to_char_type(0xCA); // float 32 - } - - static constexpr CharType get_msgpack_float_prefix(double /*unused*/) - { - return to_char_type(0xCB); // float 64 - } - - //////////// - // UBJSON // - //////////// - - // UBJSON: write number (floating point) - template::value, int>::type = 0> - void write_number_with_ubjson_prefix(const NumberType n, - const bool add_prefix) - { - if (add_prefix) - { - oa->write_character(get_ubjson_float_prefix(n)); - } - write_number(n); - } - - // UBJSON: write number (unsigned integer) - template::value, int>::type = 0> - void write_number_with_ubjson_prefix(const NumberType n, - const bool add_prefix) - { - if (n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('i')); // int8 - } - write_number(static_cast(n)); - } - else if (n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('U')); // uint8 - } - write_number(static_cast(n)); - } - else if (n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('I')); // int16 - } - write_number(static_cast(n)); - } - else if (n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('l')); // int32 - } - write_number(static_cast(n)); - } - else if (n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('L')); // int64 - } - write_number(static_cast(n)); - } - else - { - if (add_prefix) - { - oa->write_character(to_char_type('H')); // high-precision number - } - - const auto number = BasicJsonType(n).dump(); - write_number_with_ubjson_prefix(number.size(), true); - for (std::size_t i = 0; i < number.size(); ++i) - { - oa->write_character(to_char_type(static_cast(number[i]))); - } - } - } - - // UBJSON: write number (signed integer) - template < typename NumberType, typename std::enable_if < - std::is_signed::value&& - !std::is_floating_point::value, int >::type = 0 > - void write_number_with_ubjson_prefix(const NumberType n, - const bool add_prefix) - { - if ((std::numeric_limits::min)() <= n && n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('i')); // int8 - } - write_number(static_cast(n)); - } - else if (static_cast((std::numeric_limits::min)()) <= n && n <= static_cast((std::numeric_limits::max)())) - { - if (add_prefix) - { - oa->write_character(to_char_type('U')); // uint8 - } - write_number(static_cast(n)); - } - else if ((std::numeric_limits::min)() <= n && n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('I')); // int16 - } - write_number(static_cast(n)); - } - else if ((std::numeric_limits::min)() <= n && n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('l')); // int32 - } - write_number(static_cast(n)); - } - else if ((std::numeric_limits::min)() <= n && n <= (std::numeric_limits::max)()) - { - if (add_prefix) - { - oa->write_character(to_char_type('L')); // int64 - } - write_number(static_cast(n)); - } - // LCOV_EXCL_START - else - { - if (add_prefix) - { - oa->write_character(to_char_type('H')); // high-precision number - } - - const auto number = BasicJsonType(n).dump(); - write_number_with_ubjson_prefix(number.size(), true); - for (std::size_t i = 0; i < number.size(); ++i) - { - oa->write_character(to_char_type(static_cast(number[i]))); - } - } - // LCOV_EXCL_STOP - } - - /*! - @brief determine the type prefix of container values - */ - CharType ubjson_prefix(const BasicJsonType& j) const noexcept - { - switch (j.type()) - { - case value_t::null: - return 'Z'; - - case value_t::boolean: - return j.m_value.boolean ? 'T' : 'F'; - - case value_t::number_integer: - { - if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) - { - return 'i'; - } - if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) - { - return 'U'; - } - if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) - { - return 'I'; - } - if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) - { - return 'l'; - } - if ((std::numeric_limits::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits::max)()) - { - return 'L'; - } - // anything else is treated as high-precision number - return 'H'; // LCOV_EXCL_LINE - } - - case value_t::number_unsigned: - { - if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) - { - return 'i'; - } - if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) - { - return 'U'; - } - if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) - { - return 'I'; - } - if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) - { - return 'l'; - } - if (j.m_value.number_unsigned <= static_cast((std::numeric_limits::max)())) - { - return 'L'; - } - // anything else is treated as high-precision number - return 'H'; // LCOV_EXCL_LINE - } - - case value_t::number_float: - return get_ubjson_float_prefix(j.m_value.number_float); - - case value_t::string: - return 'S'; - - case value_t::array: // fallthrough - case value_t::binary: - return '['; - - case value_t::object: - return '{'; - - default: // discarded values - return 'N'; - } - } - - static constexpr CharType get_ubjson_float_prefix(float /*unused*/) - { - return 'd'; // float 32 - } - - static constexpr CharType get_ubjson_float_prefix(double /*unused*/) - { - return 'D'; // float 64 - } - - /////////////////////// - // Utility functions // - /////////////////////// - - /* - @brief write a number to output input - @param[in] n number of type @a NumberType - @tparam NumberType the type of the number - @tparam OutputIsLittleEndian Set to true if output data is - required to be little endian - - @note This function needs to respect the system's endianess, because bytes - in CBOR, MessagePack, and UBJSON are stored in network order (big - endian) and therefore need reordering on little endian systems. - */ - template - void write_number(const NumberType n) - { - // step 1: write number to array of length NumberType - std::array vec; - std::memcpy(vec.data(), &n, sizeof(NumberType)); - - // step 2: write array to output (with possible reordering) - if (is_little_endian != OutputIsLittleEndian) - { - // reverse byte order prior to conversion if necessary - std::reverse(vec.begin(), vec.end()); - } - - oa->write_characters(vec.data(), sizeof(NumberType)); - } - - void write_compact_float(const number_float_t n, detail::input_format_t format) - { - if (static_cast(n) >= static_cast(std::numeric_limits::lowest()) && - static_cast(n) <= static_cast((std::numeric_limits::max)()) && - static_cast(static_cast(n)) == static_cast(n)) - { - oa->write_character(format == detail::input_format_t::cbor - ? get_cbor_float_prefix(static_cast(n)) - : get_msgpack_float_prefix(static_cast(n))); - write_number(static_cast(n)); - } - else - { - oa->write_character(format == detail::input_format_t::cbor - ? get_cbor_float_prefix(n) - : get_msgpack_float_prefix(n)); - write_number(n); - } - } - - public: - // The following to_char_type functions are implement the conversion - // between uint8_t and CharType. In case CharType is not unsigned, - // such a conversion is required to allow values greater than 128. - // See for a discussion. - template < typename C = CharType, - enable_if_t < std::is_signed::value && std::is_signed::value > * = nullptr > - static constexpr CharType to_char_type(std::uint8_t x) noexcept - { - return *reinterpret_cast(&x); - } - - template < typename C = CharType, - enable_if_t < std::is_signed::value && std::is_unsigned::value > * = nullptr > - static CharType to_char_type(std::uint8_t x) noexcept - { - static_assert(sizeof(std::uint8_t) == sizeof(CharType), "size of CharType must be equal to std::uint8_t"); - static_assert(std::is_trivial::value, "CharType must be trivial"); - CharType result; - std::memcpy(&result, &x, sizeof(x)); - return result; - } - - template::value>* = nullptr> - static constexpr CharType to_char_type(std::uint8_t x) noexcept - { - return x; - } - - template < typename InputCharType, typename C = CharType, - enable_if_t < - std::is_signed::value && - std::is_signed::value && - std::is_same::type>::value - > * = nullptr > - static constexpr CharType to_char_type(InputCharType x) noexcept - { - return x; - } - - private: - /// whether we can assume little endianess - const bool is_little_endian = little_endianess(); - - /// the output - output_adapter_t oa = nullptr; -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - - -#include // reverse, remove, fill, find, none_of -#include // array -#include // localeconv, lconv -#include // labs, isfinite, isnan, signbit -#include // size_t, ptrdiff_t -#include // uint8_t -#include // snprintf -#include // numeric_limits -#include // string, char_traits -#include // is_same -#include // move - -// #include - - -#include // array -#include // signbit, isfinite -#include // intN_t, uintN_t -#include // memcpy, memmove -#include // numeric_limits -#include // conditional - -// #include - - -namespace nlohmann -{ -namespace detail -{ - -/*! -@brief implements the Grisu2 algorithm for binary to decimal floating-point -conversion. - -This implementation is a slightly modified version of the reference -implementation which may be obtained from -http://florian.loitsch.com/publications (bench.tar.gz). - -The code is distributed under the MIT license, Copyright (c) 2009 Florian Loitsch. - -For a detailed description of the algorithm see: - -[1] Loitsch, "Printing Floating-Point Numbers Quickly and Accurately with - Integers", Proceedings of the ACM SIGPLAN 2010 Conference on Programming - Language Design and Implementation, PLDI 2010 -[2] Burger, Dybvig, "Printing Floating-Point Numbers Quickly and Accurately", - Proceedings of the ACM SIGPLAN 1996 Conference on Programming Language - Design and Implementation, PLDI 1996 -*/ -namespace dtoa_impl -{ - -template -Target reinterpret_bits(const Source source) -{ - static_assert(sizeof(Target) == sizeof(Source), "size mismatch"); - - Target target; - std::memcpy(&target, &source, sizeof(Source)); - return target; -} - -struct diyfp // f * 2^e -{ - static constexpr int kPrecision = 64; // = q - - std::uint64_t f = 0; - int e = 0; - - constexpr diyfp(std::uint64_t f_, int e_) noexcept : f(f_), e(e_) {} - - /*! - @brief returns x - y - @pre x.e == y.e and x.f >= y.f - */ - static diyfp sub(const diyfp& x, const diyfp& y) noexcept - { - JSON_ASSERT(x.e == y.e); - JSON_ASSERT(x.f >= y.f); - - return {x.f - y.f, x.e}; - } - - /*! - @brief returns x * y - @note The result is rounded. (Only the upper q bits are returned.) - */ - static diyfp mul(const diyfp& x, const diyfp& y) noexcept - { - static_assert(kPrecision == 64, "internal error"); - - // Computes: - // f = round((x.f * y.f) / 2^q) - // e = x.e + y.e + q - - // Emulate the 64-bit * 64-bit multiplication: - // - // p = u * v - // = (u_lo + 2^32 u_hi) (v_lo + 2^32 v_hi) - // = (u_lo v_lo ) + 2^32 ((u_lo v_hi ) + (u_hi v_lo )) + 2^64 (u_hi v_hi ) - // = (p0 ) + 2^32 ((p1 ) + (p2 )) + 2^64 (p3 ) - // = (p0_lo + 2^32 p0_hi) + 2^32 ((p1_lo + 2^32 p1_hi) + (p2_lo + 2^32 p2_hi)) + 2^64 (p3 ) - // = (p0_lo ) + 2^32 (p0_hi + p1_lo + p2_lo ) + 2^64 (p1_hi + p2_hi + p3) - // = (p0_lo ) + 2^32 (Q ) + 2^64 (H ) - // = (p0_lo ) + 2^32 (Q_lo + 2^32 Q_hi ) + 2^64 (H ) - // - // (Since Q might be larger than 2^32 - 1) - // - // = (p0_lo + 2^32 Q_lo) + 2^64 (Q_hi + H) - // - // (Q_hi + H does not overflow a 64-bit int) - // - // = p_lo + 2^64 p_hi - - const std::uint64_t u_lo = x.f & 0xFFFFFFFFu; - const std::uint64_t u_hi = x.f >> 32u; - const std::uint64_t v_lo = y.f & 0xFFFFFFFFu; - const std::uint64_t v_hi = y.f >> 32u; - - const std::uint64_t p0 = u_lo * v_lo; - const std::uint64_t p1 = u_lo * v_hi; - const std::uint64_t p2 = u_hi * v_lo; - const std::uint64_t p3 = u_hi * v_hi; - - const std::uint64_t p0_hi = p0 >> 32u; - const std::uint64_t p1_lo = p1 & 0xFFFFFFFFu; - const std::uint64_t p1_hi = p1 >> 32u; - const std::uint64_t p2_lo = p2 & 0xFFFFFFFFu; - const std::uint64_t p2_hi = p2 >> 32u; - - std::uint64_t Q = p0_hi + p1_lo + p2_lo; - - // The full product might now be computed as - // - // p_hi = p3 + p2_hi + p1_hi + (Q >> 32) - // p_lo = p0_lo + (Q << 32) - // - // But in this particular case here, the full p_lo is not required. - // Effectively we only need to add the highest bit in p_lo to p_hi (and - // Q_hi + 1 does not overflow). - - Q += std::uint64_t{1} << (64u - 32u - 1u); // round, ties up - - const std::uint64_t h = p3 + p2_hi + p1_hi + (Q >> 32u); - - return {h, x.e + y.e + 64}; - } - - /*! - @brief normalize x such that the significand is >= 2^(q-1) - @pre x.f != 0 - */ - static diyfp normalize(diyfp x) noexcept - { - JSON_ASSERT(x.f != 0); - - while ((x.f >> 63u) == 0) - { - x.f <<= 1u; - x.e--; - } - - return x; - } - - /*! - @brief normalize x such that the result has the exponent E - @pre e >= x.e and the upper e - x.e bits of x.f must be zero. - */ - static diyfp normalize_to(const diyfp& x, const int target_exponent) noexcept - { - const int delta = x.e - target_exponent; - - JSON_ASSERT(delta >= 0); - JSON_ASSERT(((x.f << delta) >> delta) == x.f); - - return {x.f << delta, target_exponent}; - } -}; - -struct boundaries -{ - diyfp w; - diyfp minus; - diyfp plus; -}; - -/*! -Compute the (normalized) diyfp representing the input number 'value' and its -boundaries. - -@pre value must be finite and positive -*/ -template -boundaries compute_boundaries(FloatType value) -{ - JSON_ASSERT(std::isfinite(value)); - JSON_ASSERT(value > 0); - - // Convert the IEEE representation into a diyfp. - // - // If v is denormal: - // value = 0.F * 2^(1 - bias) = ( F) * 2^(1 - bias - (p-1)) - // If v is normalized: - // value = 1.F * 2^(E - bias) = (2^(p-1) + F) * 2^(E - bias - (p-1)) - - static_assert(std::numeric_limits::is_iec559, - "internal error: dtoa_short requires an IEEE-754 floating-point implementation"); - - constexpr int kPrecision = std::numeric_limits::digits; // = p (includes the hidden bit) - constexpr int kBias = std::numeric_limits::max_exponent - 1 + (kPrecision - 1); - constexpr int kMinExp = 1 - kBias; - constexpr std::uint64_t kHiddenBit = std::uint64_t{1} << (kPrecision - 1); // = 2^(p-1) - - using bits_type = typename std::conditional::type; - - const std::uint64_t bits = reinterpret_bits(value); - const std::uint64_t E = bits >> (kPrecision - 1); - const std::uint64_t F = bits & (kHiddenBit - 1); - - const bool is_denormal = E == 0; - const diyfp v = is_denormal - ? diyfp(F, kMinExp) - : diyfp(F + kHiddenBit, static_cast(E) - kBias); - - // Compute the boundaries m- and m+ of the floating-point value - // v = f * 2^e. - // - // Determine v- and v+, the floating-point predecessor and successor if v, - // respectively. - // - // v- = v - 2^e if f != 2^(p-1) or e == e_min (A) - // = v - 2^(e-1) if f == 2^(p-1) and e > e_min (B) - // - // v+ = v + 2^e - // - // Let m- = (v- + v) / 2 and m+ = (v + v+) / 2. All real numbers _strictly_ - // between m- and m+ round to v, regardless of how the input rounding - // algorithm breaks ties. - // - // ---+-------------+-------------+-------------+-------------+--- (A) - // v- m- v m+ v+ - // - // -----------------+------+------+-------------+-------------+--- (B) - // v- m- v m+ v+ - - const bool lower_boundary_is_closer = F == 0 && E > 1; - const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1); - const diyfp m_minus = lower_boundary_is_closer - ? diyfp(4 * v.f - 1, v.e - 2) // (B) - : diyfp(2 * v.f - 1, v.e - 1); // (A) - - // Determine the normalized w+ = m+. - const diyfp w_plus = diyfp::normalize(m_plus); - - // Determine w- = m- such that e_(w-) = e_(w+). - const diyfp w_minus = diyfp::normalize_to(m_minus, w_plus.e); - - return {diyfp::normalize(v), w_minus, w_plus}; -} - -// Given normalized diyfp w, Grisu needs to find a (normalized) cached -// power-of-ten c, such that the exponent of the product c * w = f * 2^e lies -// within a certain range [alpha, gamma] (Definition 3.2 from [1]) -// -// alpha <= e = e_c + e_w + q <= gamma -// -// or -// -// f_c * f_w * 2^alpha <= f_c 2^(e_c) * f_w 2^(e_w) * 2^q -// <= f_c * f_w * 2^gamma -// -// Since c and w are normalized, i.e. 2^(q-1) <= f < 2^q, this implies -// -// 2^(q-1) * 2^(q-1) * 2^alpha <= c * w * 2^q < 2^q * 2^q * 2^gamma -// -// or -// -// 2^(q - 2 + alpha) <= c * w < 2^(q + gamma) -// -// The choice of (alpha,gamma) determines the size of the table and the form of -// the digit generation procedure. Using (alpha,gamma)=(-60,-32) works out well -// in practice: -// -// The idea is to cut the number c * w = f * 2^e into two parts, which can be -// processed independently: An integral part p1, and a fractional part p2: -// -// f * 2^e = ( (f div 2^-e) * 2^-e + (f mod 2^-e) ) * 2^e -// = (f div 2^-e) + (f mod 2^-e) * 2^e -// = p1 + p2 * 2^e -// -// The conversion of p1 into decimal form requires a series of divisions and -// modulos by (a power of) 10. These operations are faster for 32-bit than for -// 64-bit integers, so p1 should ideally fit into a 32-bit integer. This can be -// achieved by choosing -// -// -e >= 32 or e <= -32 := gamma -// -// In order to convert the fractional part -// -// p2 * 2^e = p2 / 2^-e = d[-1] / 10^1 + d[-2] / 10^2 + ... -// -// into decimal form, the fraction is repeatedly multiplied by 10 and the digits -// d[-i] are extracted in order: -// -// (10 * p2) div 2^-e = d[-1] -// (10 * p2) mod 2^-e = d[-2] / 10^1 + ... -// -// The multiplication by 10 must not overflow. It is sufficient to choose -// -// 10 * p2 < 16 * p2 = 2^4 * p2 <= 2^64. -// -// Since p2 = f mod 2^-e < 2^-e, -// -// -e <= 60 or e >= -60 := alpha - -constexpr int kAlpha = -60; -constexpr int kGamma = -32; - -struct cached_power // c = f * 2^e ~= 10^k -{ - std::uint64_t f; - int e; - int k; -}; - -/*! -For a normalized diyfp w = f * 2^e, this function returns a (normalized) cached -power-of-ten c = f_c * 2^e_c, such that the exponent of the product w * c -satisfies (Definition 3.2 from [1]) - - alpha <= e_c + e + q <= gamma. -*/ -inline cached_power get_cached_power_for_binary_exponent(int e) -{ - // Now - // - // alpha <= e_c + e + q <= gamma (1) - // ==> f_c * 2^alpha <= c * 2^e * 2^q - // - // and since the c's are normalized, 2^(q-1) <= f_c, - // - // ==> 2^(q - 1 + alpha) <= c * 2^(e + q) - // ==> 2^(alpha - e - 1) <= c - // - // If c were an exact power of ten, i.e. c = 10^k, one may determine k as - // - // k = ceil( log_10( 2^(alpha - e - 1) ) ) - // = ceil( (alpha - e - 1) * log_10(2) ) - // - // From the paper: - // "In theory the result of the procedure could be wrong since c is rounded, - // and the computation itself is approximated [...]. In practice, however, - // this simple function is sufficient." - // - // For IEEE double precision floating-point numbers converted into - // normalized diyfp's w = f * 2^e, with q = 64, - // - // e >= -1022 (min IEEE exponent) - // -52 (p - 1) - // -52 (p - 1, possibly normalize denormal IEEE numbers) - // -11 (normalize the diyfp) - // = -1137 - // - // and - // - // e <= +1023 (max IEEE exponent) - // -52 (p - 1) - // -11 (normalize the diyfp) - // = 960 - // - // This binary exponent range [-1137,960] results in a decimal exponent - // range [-307,324]. One does not need to store a cached power for each - // k in this range. For each such k it suffices to find a cached power - // such that the exponent of the product lies in [alpha,gamma]. - // This implies that the difference of the decimal exponents of adjacent - // table entries must be less than or equal to - // - // floor( (gamma - alpha) * log_10(2) ) = 8. - // - // (A smaller distance gamma-alpha would require a larger table.) - - // NB: - // Actually this function returns c, such that -60 <= e_c + e + 64 <= -34. - - constexpr int kCachedPowersMinDecExp = -300; - constexpr int kCachedPowersDecStep = 8; - - static constexpr std::array kCachedPowers = - { - { - { 0xAB70FE17C79AC6CA, -1060, -300 }, - { 0xFF77B1FCBEBCDC4F, -1034, -292 }, - { 0xBE5691EF416BD60C, -1007, -284 }, - { 0x8DD01FAD907FFC3C, -980, -276 }, - { 0xD3515C2831559A83, -954, -268 }, - { 0x9D71AC8FADA6C9B5, -927, -260 }, - { 0xEA9C227723EE8BCB, -901, -252 }, - { 0xAECC49914078536D, -874, -244 }, - { 0x823C12795DB6CE57, -847, -236 }, - { 0xC21094364DFB5637, -821, -228 }, - { 0x9096EA6F3848984F, -794, -220 }, - { 0xD77485CB25823AC7, -768, -212 }, - { 0xA086CFCD97BF97F4, -741, -204 }, - { 0xEF340A98172AACE5, -715, -196 }, - { 0xB23867FB2A35B28E, -688, -188 }, - { 0x84C8D4DFD2C63F3B, -661, -180 }, - { 0xC5DD44271AD3CDBA, -635, -172 }, - { 0x936B9FCEBB25C996, -608, -164 }, - { 0xDBAC6C247D62A584, -582, -156 }, - { 0xA3AB66580D5FDAF6, -555, -148 }, - { 0xF3E2F893DEC3F126, -529, -140 }, - { 0xB5B5ADA8AAFF80B8, -502, -132 }, - { 0x87625F056C7C4A8B, -475, -124 }, - { 0xC9BCFF6034C13053, -449, -116 }, - { 0x964E858C91BA2655, -422, -108 }, - { 0xDFF9772470297EBD, -396, -100 }, - { 0xA6DFBD9FB8E5B88F, -369, -92 }, - { 0xF8A95FCF88747D94, -343, -84 }, - { 0xB94470938FA89BCF, -316, -76 }, - { 0x8A08F0F8BF0F156B, -289, -68 }, - { 0xCDB02555653131B6, -263, -60 }, - { 0x993FE2C6D07B7FAC, -236, -52 }, - { 0xE45C10C42A2B3B06, -210, -44 }, - { 0xAA242499697392D3, -183, -36 }, - { 0xFD87B5F28300CA0E, -157, -28 }, - { 0xBCE5086492111AEB, -130, -20 }, - { 0x8CBCCC096F5088CC, -103, -12 }, - { 0xD1B71758E219652C, -77, -4 }, - { 0x9C40000000000000, -50, 4 }, - { 0xE8D4A51000000000, -24, 12 }, - { 0xAD78EBC5AC620000, 3, 20 }, - { 0x813F3978F8940984, 30, 28 }, - { 0xC097CE7BC90715B3, 56, 36 }, - { 0x8F7E32CE7BEA5C70, 83, 44 }, - { 0xD5D238A4ABE98068, 109, 52 }, - { 0x9F4F2726179A2245, 136, 60 }, - { 0xED63A231D4C4FB27, 162, 68 }, - { 0xB0DE65388CC8ADA8, 189, 76 }, - { 0x83C7088E1AAB65DB, 216, 84 }, - { 0xC45D1DF942711D9A, 242, 92 }, - { 0x924D692CA61BE758, 269, 100 }, - { 0xDA01EE641A708DEA, 295, 108 }, - { 0xA26DA3999AEF774A, 322, 116 }, - { 0xF209787BB47D6B85, 348, 124 }, - { 0xB454E4A179DD1877, 375, 132 }, - { 0x865B86925B9BC5C2, 402, 140 }, - { 0xC83553C5C8965D3D, 428, 148 }, - { 0x952AB45CFA97A0B3, 455, 156 }, - { 0xDE469FBD99A05FE3, 481, 164 }, - { 0xA59BC234DB398C25, 508, 172 }, - { 0xF6C69A72A3989F5C, 534, 180 }, - { 0xB7DCBF5354E9BECE, 561, 188 }, - { 0x88FCF317F22241E2, 588, 196 }, - { 0xCC20CE9BD35C78A5, 614, 204 }, - { 0x98165AF37B2153DF, 641, 212 }, - { 0xE2A0B5DC971F303A, 667, 220 }, - { 0xA8D9D1535CE3B396, 694, 228 }, - { 0xFB9B7CD9A4A7443C, 720, 236 }, - { 0xBB764C4CA7A44410, 747, 244 }, - { 0x8BAB8EEFB6409C1A, 774, 252 }, - { 0xD01FEF10A657842C, 800, 260 }, - { 0x9B10A4E5E9913129, 827, 268 }, - { 0xE7109BFBA19C0C9D, 853, 276 }, - { 0xAC2820D9623BF429, 880, 284 }, - { 0x80444B5E7AA7CF85, 907, 292 }, - { 0xBF21E44003ACDD2D, 933, 300 }, - { 0x8E679C2F5E44FF8F, 960, 308 }, - { 0xD433179D9C8CB841, 986, 316 }, - { 0x9E19DB92B4E31BA9, 1013, 324 }, - } - }; - - // This computation gives exactly the same results for k as - // k = ceil((kAlpha - e - 1) * 0.30102999566398114) - // for |e| <= 1500, but doesn't require floating-point operations. - // NB: log_10(2) ~= 78913 / 2^18 - JSON_ASSERT(e >= -1500); - JSON_ASSERT(e <= 1500); - const int f = kAlpha - e - 1; - const int k = (f * 78913) / (1 << 18) + static_cast(f > 0); - - const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep; - JSON_ASSERT(index >= 0); - JSON_ASSERT(static_cast(index) < kCachedPowers.size()); - - const cached_power cached = kCachedPowers[static_cast(index)]; - JSON_ASSERT(kAlpha <= cached.e + e + 64); - JSON_ASSERT(kGamma >= cached.e + e + 64); - - return cached; -} - -/*! -For n != 0, returns k, such that pow10 := 10^(k-1) <= n < 10^k. -For n == 0, returns 1 and sets pow10 := 1. -*/ -inline int find_largest_pow10(const std::uint32_t n, std::uint32_t& pow10) -{ - // LCOV_EXCL_START - if (n >= 1000000000) - { - pow10 = 1000000000; - return 10; - } - // LCOV_EXCL_STOP - if (n >= 100000000) - { - pow10 = 100000000; - return 9; - } - if (n >= 10000000) - { - pow10 = 10000000; - return 8; - } - if (n >= 1000000) - { - pow10 = 1000000; - return 7; - } - if (n >= 100000) - { - pow10 = 100000; - return 6; - } - if (n >= 10000) - { - pow10 = 10000; - return 5; - } - if (n >= 1000) - { - pow10 = 1000; - return 4; - } - if (n >= 100) - { - pow10 = 100; - return 3; - } - if (n >= 10) - { - pow10 = 10; - return 2; - } - - pow10 = 1; - return 1; -} - -inline void grisu2_round(char* buf, int len, std::uint64_t dist, std::uint64_t delta, - std::uint64_t rest, std::uint64_t ten_k) -{ - JSON_ASSERT(len >= 1); - JSON_ASSERT(dist <= delta); - JSON_ASSERT(rest <= delta); - JSON_ASSERT(ten_k > 0); - - // <--------------------------- delta ----> - // <---- dist ---------> - // --------------[------------------+-------------------]-------------- - // M- w M+ - // - // ten_k - // <------> - // <---- rest ----> - // --------------[------------------+----+--------------]-------------- - // w V - // = buf * 10^k - // - // ten_k represents a unit-in-the-last-place in the decimal representation - // stored in buf. - // Decrement buf by ten_k while this takes buf closer to w. - - // The tests are written in this order to avoid overflow in unsigned - // integer arithmetic. - - while (rest < dist - && delta - rest >= ten_k - && (rest + ten_k < dist || dist - rest > rest + ten_k - dist)) - { - JSON_ASSERT(buf[len - 1] != '0'); - buf[len - 1]--; - rest += ten_k; - } -} - -/*! -Generates V = buffer * 10^decimal_exponent, such that M- <= V <= M+. -M- and M+ must be normalized and share the same exponent -60 <= e <= -32. -*/ -inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent, - diyfp M_minus, diyfp w, diyfp M_plus) -{ - static_assert(kAlpha >= -60, "internal error"); - static_assert(kGamma <= -32, "internal error"); - - // Generates the digits (and the exponent) of a decimal floating-point - // number V = buffer * 10^decimal_exponent in the range [M-, M+]. The diyfp's - // w, M- and M+ share the same exponent e, which satisfies alpha <= e <= gamma. - // - // <--------------------------- delta ----> - // <---- dist ---------> - // --------------[------------------+-------------------]-------------- - // M- w M+ - // - // Grisu2 generates the digits of M+ from left to right and stops as soon as - // V is in [M-,M+]. - - JSON_ASSERT(M_plus.e >= kAlpha); - JSON_ASSERT(M_plus.e <= kGamma); - - std::uint64_t delta = diyfp::sub(M_plus, M_minus).f; // (significand of (M+ - M-), implicit exponent is e) - std::uint64_t dist = diyfp::sub(M_plus, w ).f; // (significand of (M+ - w ), implicit exponent is e) - - // Split M+ = f * 2^e into two parts p1 and p2 (note: e < 0): - // - // M+ = f * 2^e - // = ((f div 2^-e) * 2^-e + (f mod 2^-e)) * 2^e - // = ((p1 ) * 2^-e + (p2 )) * 2^e - // = p1 + p2 * 2^e - - const diyfp one(std::uint64_t{1} << -M_plus.e, M_plus.e); - - auto p1 = static_cast(M_plus.f >> -one.e); // p1 = f div 2^-e (Since -e >= 32, p1 fits into a 32-bit int.) - std::uint64_t p2 = M_plus.f & (one.f - 1); // p2 = f mod 2^-e - - // 1) - // - // Generate the digits of the integral part p1 = d[n-1]...d[1]d[0] - - JSON_ASSERT(p1 > 0); - - std::uint32_t pow10; - const int k = find_largest_pow10(p1, pow10); - - // 10^(k-1) <= p1 < 10^k, pow10 = 10^(k-1) - // - // p1 = (p1 div 10^(k-1)) * 10^(k-1) + (p1 mod 10^(k-1)) - // = (d[k-1] ) * 10^(k-1) + (p1 mod 10^(k-1)) - // - // M+ = p1 + p2 * 2^e - // = d[k-1] * 10^(k-1) + (p1 mod 10^(k-1)) + p2 * 2^e - // = d[k-1] * 10^(k-1) + ((p1 mod 10^(k-1)) * 2^-e + p2) * 2^e - // = d[k-1] * 10^(k-1) + ( rest) * 2^e - // - // Now generate the digits d[n] of p1 from left to right (n = k-1,...,0) - // - // p1 = d[k-1]...d[n] * 10^n + d[n-1]...d[0] - // - // but stop as soon as - // - // rest * 2^e = (d[n-1]...d[0] * 2^-e + p2) * 2^e <= delta * 2^e - - int n = k; - while (n > 0) - { - // Invariants: - // M+ = buffer * 10^n + (p1 + p2 * 2^e) (buffer = 0 for n = k) - // pow10 = 10^(n-1) <= p1 < 10^n - // - const std::uint32_t d = p1 / pow10; // d = p1 div 10^(n-1) - const std::uint32_t r = p1 % pow10; // r = p1 mod 10^(n-1) - // - // M+ = buffer * 10^n + (d * 10^(n-1) + r) + p2 * 2^e - // = (buffer * 10 + d) * 10^(n-1) + (r + p2 * 2^e) - // - JSON_ASSERT(d <= 9); - buffer[length++] = static_cast('0' + d); // buffer := buffer * 10 + d - // - // M+ = buffer * 10^(n-1) + (r + p2 * 2^e) - // - p1 = r; - n--; - // - // M+ = buffer * 10^n + (p1 + p2 * 2^e) - // pow10 = 10^n - // - - // Now check if enough digits have been generated. - // Compute - // - // p1 + p2 * 2^e = (p1 * 2^-e + p2) * 2^e = rest * 2^e - // - // Note: - // Since rest and delta share the same exponent e, it suffices to - // compare the significands. - const std::uint64_t rest = (std::uint64_t{p1} << -one.e) + p2; - if (rest <= delta) - { - // V = buffer * 10^n, with M- <= V <= M+. - - decimal_exponent += n; - - // We may now just stop. But instead look if the buffer could be - // decremented to bring V closer to w. - // - // pow10 = 10^n is now 1 ulp in the decimal representation V. - // The rounding procedure works with diyfp's with an implicit - // exponent of e. - // - // 10^n = (10^n * 2^-e) * 2^e = ulp * 2^e - // - const std::uint64_t ten_n = std::uint64_t{pow10} << -one.e; - grisu2_round(buffer, length, dist, delta, rest, ten_n); - - return; - } - - pow10 /= 10; - // - // pow10 = 10^(n-1) <= p1 < 10^n - // Invariants restored. - } - - // 2) - // - // The digits of the integral part have been generated: - // - // M+ = d[k-1]...d[1]d[0] + p2 * 2^e - // = buffer + p2 * 2^e - // - // Now generate the digits of the fractional part p2 * 2^e. - // - // Note: - // No decimal point is generated: the exponent is adjusted instead. - // - // p2 actually represents the fraction - // - // p2 * 2^e - // = p2 / 2^-e - // = d[-1] / 10^1 + d[-2] / 10^2 + ... - // - // Now generate the digits d[-m] of p1 from left to right (m = 1,2,...) - // - // p2 * 2^e = d[-1]d[-2]...d[-m] * 10^-m - // + 10^-m * (d[-m-1] / 10^1 + d[-m-2] / 10^2 + ...) - // - // using - // - // 10^m * p2 = ((10^m * p2) div 2^-e) * 2^-e + ((10^m * p2) mod 2^-e) - // = ( d) * 2^-e + ( r) - // - // or - // 10^m * p2 * 2^e = d + r * 2^e - // - // i.e. - // - // M+ = buffer + p2 * 2^e - // = buffer + 10^-m * (d + r * 2^e) - // = (buffer * 10^m + d) * 10^-m + 10^-m * r * 2^e - // - // and stop as soon as 10^-m * r * 2^e <= delta * 2^e - - JSON_ASSERT(p2 > delta); - - int m = 0; - for (;;) - { - // Invariant: - // M+ = buffer * 10^-m + 10^-m * (d[-m-1] / 10 + d[-m-2] / 10^2 + ...) * 2^e - // = buffer * 10^-m + 10^-m * (p2 ) * 2^e - // = buffer * 10^-m + 10^-m * (1/10 * (10 * p2) ) * 2^e - // = buffer * 10^-m + 10^-m * (1/10 * ((10*p2 div 2^-e) * 2^-e + (10*p2 mod 2^-e)) * 2^e - // - JSON_ASSERT(p2 <= (std::numeric_limits::max)() / 10); - p2 *= 10; - const std::uint64_t d = p2 >> -one.e; // d = (10 * p2) div 2^-e - const std::uint64_t r = p2 & (one.f - 1); // r = (10 * p2) mod 2^-e - // - // M+ = buffer * 10^-m + 10^-m * (1/10 * (d * 2^-e + r) * 2^e - // = buffer * 10^-m + 10^-m * (1/10 * (d + r * 2^e)) - // = (buffer * 10 + d) * 10^(-m-1) + 10^(-m-1) * r * 2^e - // - JSON_ASSERT(d <= 9); - buffer[length++] = static_cast('0' + d); // buffer := buffer * 10 + d - // - // M+ = buffer * 10^(-m-1) + 10^(-m-1) * r * 2^e - // - p2 = r; - m++; - // - // M+ = buffer * 10^-m + 10^-m * p2 * 2^e - // Invariant restored. - - // Check if enough digits have been generated. - // - // 10^-m * p2 * 2^e <= delta * 2^e - // p2 * 2^e <= 10^m * delta * 2^e - // p2 <= 10^m * delta - delta *= 10; - dist *= 10; - if (p2 <= delta) - { - break; - } - } - - // V = buffer * 10^-m, with M- <= V <= M+. - - decimal_exponent -= m; - - // 1 ulp in the decimal representation is now 10^-m. - // Since delta and dist are now scaled by 10^m, we need to do the - // same with ulp in order to keep the units in sync. - // - // 10^m * 10^-m = 1 = 2^-e * 2^e = ten_m * 2^e - // - const std::uint64_t ten_m = one.f; - grisu2_round(buffer, length, dist, delta, p2, ten_m); - - // By construction this algorithm generates the shortest possible decimal - // number (Loitsch, Theorem 6.2) which rounds back to w. - // For an input number of precision p, at least - // - // N = 1 + ceil(p * log_10(2)) - // - // decimal digits are sufficient to identify all binary floating-point - // numbers (Matula, "In-and-Out conversions"). - // This implies that the algorithm does not produce more than N decimal - // digits. - // - // N = 17 for p = 53 (IEEE double precision) - // N = 9 for p = 24 (IEEE single precision) -} - -/*! -v = buf * 10^decimal_exponent -len is the length of the buffer (number of decimal digits) -The buffer must be large enough, i.e. >= max_digits10. -*/ -JSON_HEDLEY_NON_NULL(1) -inline void grisu2(char* buf, int& len, int& decimal_exponent, - diyfp m_minus, diyfp v, diyfp m_plus) -{ - JSON_ASSERT(m_plus.e == m_minus.e); - JSON_ASSERT(m_plus.e == v.e); - - // --------(-----------------------+-----------------------)-------- (A) - // m- v m+ - // - // --------------------(-----------+-----------------------)-------- (B) - // m- v m+ - // - // First scale v (and m- and m+) such that the exponent is in the range - // [alpha, gamma]. - - const cached_power cached = get_cached_power_for_binary_exponent(m_plus.e); - - const diyfp c_minus_k(cached.f, cached.e); // = c ~= 10^-k - - // The exponent of the products is = v.e + c_minus_k.e + q and is in the range [alpha,gamma] - const diyfp w = diyfp::mul(v, c_minus_k); - const diyfp w_minus = diyfp::mul(m_minus, c_minus_k); - const diyfp w_plus = diyfp::mul(m_plus, c_minus_k); - - // ----(---+---)---------------(---+---)---------------(---+---)---- - // w- w w+ - // = c*m- = c*v = c*m+ - // - // diyfp::mul rounds its result and c_minus_k is approximated too. w, w- and - // w+ are now off by a small amount. - // In fact: - // - // w - v * 10^k < 1 ulp - // - // To account for this inaccuracy, add resp. subtract 1 ulp. - // - // --------+---[---------------(---+---)---------------]---+-------- - // w- M- w M+ w+ - // - // Now any number in [M-, M+] (bounds included) will round to w when input, - // regardless of how the input rounding algorithm breaks ties. - // - // And digit_gen generates the shortest possible such number in [M-, M+]. - // Note that this does not mean that Grisu2 always generates the shortest - // possible number in the interval (m-, m+). - const diyfp M_minus(w_minus.f + 1, w_minus.e); - const diyfp M_plus (w_plus.f - 1, w_plus.e ); - - decimal_exponent = -cached.k; // = -(-k) = k - - grisu2_digit_gen(buf, len, decimal_exponent, M_minus, w, M_plus); -} - -/*! -v = buf * 10^decimal_exponent -len is the length of the buffer (number of decimal digits) -The buffer must be large enough, i.e. >= max_digits10. -*/ -template -JSON_HEDLEY_NON_NULL(1) -void grisu2(char* buf, int& len, int& decimal_exponent, FloatType value) -{ - static_assert(diyfp::kPrecision >= std::numeric_limits::digits + 3, - "internal error: not enough precision"); - - JSON_ASSERT(std::isfinite(value)); - JSON_ASSERT(value > 0); - - // If the neighbors (and boundaries) of 'value' are always computed for double-precision - // numbers, all float's can be recovered using strtod (and strtof). However, the resulting - // decimal representations are not exactly "short". - // - // The documentation for 'std::to_chars' (https://en.cppreference.com/w/cpp/utility/to_chars) - // says "value is converted to a string as if by std::sprintf in the default ("C") locale" - // and since sprintf promotes float's to double's, I think this is exactly what 'std::to_chars' - // does. - // On the other hand, the documentation for 'std::to_chars' requires that "parsing the - // representation using the corresponding std::from_chars function recovers value exactly". That - // indicates that single precision floating-point numbers should be recovered using - // 'std::strtof'. - // - // NB: If the neighbors are computed for single-precision numbers, there is a single float - // (7.0385307e-26f) which can't be recovered using strtod. The resulting double precision - // value is off by 1 ulp. -#if 0 - const boundaries w = compute_boundaries(static_cast(value)); -#else - const boundaries w = compute_boundaries(value); -#endif - - grisu2(buf, len, decimal_exponent, w.minus, w.w, w.plus); -} - -/*! -@brief appends a decimal representation of e to buf -@return a pointer to the element following the exponent. -@pre -1000 < e < 1000 -*/ -JSON_HEDLEY_NON_NULL(1) -JSON_HEDLEY_RETURNS_NON_NULL -inline char* append_exponent(char* buf, int e) -{ - JSON_ASSERT(e > -1000); - JSON_ASSERT(e < 1000); - - if (e < 0) - { - e = -e; - *buf++ = '-'; - } - else - { - *buf++ = '+'; - } - - auto k = static_cast(e); - if (k < 10) - { - // Always print at least two digits in the exponent. - // This is for compatibility with printf("%g"). - *buf++ = '0'; - *buf++ = static_cast('0' + k); - } - else if (k < 100) - { - *buf++ = static_cast('0' + k / 10); - k %= 10; - *buf++ = static_cast('0' + k); - } - else - { - *buf++ = static_cast('0' + k / 100); - k %= 100; - *buf++ = static_cast('0' + k / 10); - k %= 10; - *buf++ = static_cast('0' + k); - } - - return buf; -} - -/*! -@brief prettify v = buf * 10^decimal_exponent - -If v is in the range [10^min_exp, 10^max_exp) it will be printed in fixed-point -notation. Otherwise it will be printed in exponential notation. - -@pre min_exp < 0 -@pre max_exp > 0 -*/ -JSON_HEDLEY_NON_NULL(1) -JSON_HEDLEY_RETURNS_NON_NULL -inline char* format_buffer(char* buf, int len, int decimal_exponent, - int min_exp, int max_exp) -{ - JSON_ASSERT(min_exp < 0); - JSON_ASSERT(max_exp > 0); - - const int k = len; - const int n = len + decimal_exponent; - - // v = buf * 10^(n-k) - // k is the length of the buffer (number of decimal digits) - // n is the position of the decimal point relative to the start of the buffer. - - if (k <= n && n <= max_exp) - { - // digits[000] - // len <= max_exp + 2 - - std::memset(buf + k, '0', static_cast(n) - static_cast(k)); - // Make it look like a floating-point number (#362, #378) - buf[n + 0] = '.'; - buf[n + 1] = '0'; - return buf + (static_cast(n) + 2); - } - - if (0 < n && n <= max_exp) - { - // dig.its - // len <= max_digits10 + 1 - - JSON_ASSERT(k > n); - - std::memmove(buf + (static_cast(n) + 1), buf + n, static_cast(k) - static_cast(n)); - buf[n] = '.'; - return buf + (static_cast(k) + 1U); - } - - if (min_exp < n && n <= 0) - { - // 0.[000]digits - // len <= 2 + (-min_exp - 1) + max_digits10 - - std::memmove(buf + (2 + static_cast(-n)), buf, static_cast(k)); - buf[0] = '0'; - buf[1] = '.'; - std::memset(buf + 2, '0', static_cast(-n)); - return buf + (2U + static_cast(-n) + static_cast(k)); - } - - if (k == 1) - { - // dE+123 - // len <= 1 + 5 - - buf += 1; - } - else - { - // d.igitsE+123 - // len <= max_digits10 + 1 + 5 - - std::memmove(buf + 2, buf + 1, static_cast(k) - 1); - buf[1] = '.'; - buf += 1 + static_cast(k); - } - - *buf++ = 'e'; - return append_exponent(buf, n - 1); -} - -} // namespace dtoa_impl - -/*! -@brief generates a decimal representation of the floating-point number value in [first, last). - -The format of the resulting decimal representation is similar to printf's %g -format. Returns an iterator pointing past-the-end of the decimal representation. - -@note The input number must be finite, i.e. NaN's and Inf's are not supported. -@note The buffer must be large enough. -@note The result is NOT null-terminated. -*/ -template -JSON_HEDLEY_NON_NULL(1, 2) -JSON_HEDLEY_RETURNS_NON_NULL -char* to_chars(char* first, const char* last, FloatType value) -{ - static_cast(last); // maybe unused - fix warning - JSON_ASSERT(std::isfinite(value)); - - // Use signbit(value) instead of (value < 0) since signbit works for -0. - if (std::signbit(value)) - { - value = -value; - *first++ = '-'; - } - - if (value == 0) // +-0 - { - *first++ = '0'; - // Make it look like a floating-point number (#362, #378) - *first++ = '.'; - *first++ = '0'; - return first; - } - - JSON_ASSERT(last - first >= std::numeric_limits::max_digits10); - - // Compute v = buffer * 10^decimal_exponent. - // The decimal digits are stored in the buffer, which needs to be interpreted - // as an unsigned decimal integer. - // len is the length of the buffer, i.e. the number of decimal digits. - int len = 0; - int decimal_exponent = 0; - dtoa_impl::grisu2(first, len, decimal_exponent, value); - - JSON_ASSERT(len <= std::numeric_limits::max_digits10); - - // Format the buffer like printf("%.*g", prec, value) - constexpr int kMinExp = -4; - // Use digits10 here to increase compatibility with version 2. - constexpr int kMaxExp = std::numeric_limits::digits10; - - JSON_ASSERT(last - first >= kMaxExp + 2); - JSON_ASSERT(last - first >= 2 + (-kMinExp - 1) + std::numeric_limits::max_digits10); - JSON_ASSERT(last - first >= std::numeric_limits::max_digits10 + 6); - - return dtoa_impl::format_buffer(first, len, decimal_exponent, kMinExp, kMaxExp); -} - -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - -// #include - -// #include - -// #include - -// #include - - -namespace nlohmann -{ -namespace detail -{ -/////////////////// -// serialization // -/////////////////// - -/// how to treat decoding errors -enum class error_handler_t -{ - strict, ///< throw a type_error exception in case of invalid UTF-8 - replace, ///< replace invalid UTF-8 sequences with U+FFFD - ignore ///< ignore invalid UTF-8 sequences -}; - -template -class serializer -{ - using string_t = typename BasicJsonType::string_t; - using number_float_t = typename BasicJsonType::number_float_t; - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using binary_char_t = typename BasicJsonType::binary_t::value_type; - static constexpr std::uint8_t UTF8_ACCEPT = 0; - static constexpr std::uint8_t UTF8_REJECT = 1; - - public: - /*! - @param[in] s output stream to serialize to - @param[in] ichar indentation character to use - @param[in] error_handler_ how to react on decoding errors - */ - serializer(output_adapter_t s, const char ichar, - error_handler_t error_handler_ = error_handler_t::strict) - : o(std::move(s)) - , loc(std::localeconv()) - , thousands_sep(loc->thousands_sep == nullptr ? '\0' : std::char_traits::to_char_type(* (loc->thousands_sep))) - , decimal_point(loc->decimal_point == nullptr ? '\0' : std::char_traits::to_char_type(* (loc->decimal_point))) - , indent_char(ichar) - , indent_string(512, indent_char) - , error_handler(error_handler_) - {} - - // delete because of pointer members - serializer(const serializer&) = delete; - serializer& operator=(const serializer&) = delete; - serializer(serializer&&) = delete; - serializer& operator=(serializer&&) = delete; - ~serializer() = default; - - /*! - @brief internal implementation of the serialization function - - This function is called by the public member function dump and organizes - the serialization internally. The indentation level is propagated as - additional parameter. In case of arrays and objects, the function is - called recursively. - - - strings and object keys are escaped using `escape_string()` - - integer numbers are converted implicitly via `operator<<` - - floating-point numbers are converted to a string using `"%g"` format - - binary values are serialized as objects containing the subtype and the - byte array - - @param[in] val value to serialize - @param[in] pretty_print whether the output shall be pretty-printed - @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters - in the output are escaped with `\uXXXX` sequences, and the result consists - of ASCII characters only. - @param[in] indent_step the indent level - @param[in] current_indent the current indent level (only used internally) - */ - void dump(const BasicJsonType& val, - const bool pretty_print, - const bool ensure_ascii, - const unsigned int indent_step, - const unsigned int current_indent = 0) - { - switch (val.m_type) - { - case value_t::object: - { - if (val.m_value.object->empty()) - { - o->write_characters("{}", 2); - return; - } - - if (pretty_print) - { - o->write_characters("{\n", 2); - - // variable to hold indentation for recursive calls - const auto new_indent = current_indent + indent_step; - if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent)) - { - indent_string.resize(indent_string.size() * 2, ' '); - } - - // first n-1 elements - auto i = val.m_value.object->cbegin(); - for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) - { - o->write_characters(indent_string.c_str(), new_indent); - o->write_character('\"'); - dump_escaped(i->first, ensure_ascii); - o->write_characters("\": ", 3); - dump(i->second, true, ensure_ascii, indent_step, new_indent); - o->write_characters(",\n", 2); - } - - // last element - JSON_ASSERT(i != val.m_value.object->cend()); - JSON_ASSERT(std::next(i) == val.m_value.object->cend()); - o->write_characters(indent_string.c_str(), new_indent); - o->write_character('\"'); - dump_escaped(i->first, ensure_ascii); - o->write_characters("\": ", 3); - dump(i->second, true, ensure_ascii, indent_step, new_indent); - - o->write_character('\n'); - o->write_characters(indent_string.c_str(), current_indent); - o->write_character('}'); - } - else - { - o->write_character('{'); - - // first n-1 elements - auto i = val.m_value.object->cbegin(); - for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i) - { - o->write_character('\"'); - dump_escaped(i->first, ensure_ascii); - o->write_characters("\":", 2); - dump(i->second, false, ensure_ascii, indent_step, current_indent); - o->write_character(','); - } - - // last element - JSON_ASSERT(i != val.m_value.object->cend()); - JSON_ASSERT(std::next(i) == val.m_value.object->cend()); - o->write_character('\"'); - dump_escaped(i->first, ensure_ascii); - o->write_characters("\":", 2); - dump(i->second, false, ensure_ascii, indent_step, current_indent); - - o->write_character('}'); - } - - return; - } - - case value_t::array: - { - if (val.m_value.array->empty()) - { - o->write_characters("[]", 2); - return; - } - - if (pretty_print) - { - o->write_characters("[\n", 2); - - // variable to hold indentation for recursive calls - const auto new_indent = current_indent + indent_step; - if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent)) - { - indent_string.resize(indent_string.size() * 2, ' '); - } - - // first n-1 elements - for (auto i = val.m_value.array->cbegin(); - i != val.m_value.array->cend() - 1; ++i) - { - o->write_characters(indent_string.c_str(), new_indent); - dump(*i, true, ensure_ascii, indent_step, new_indent); - o->write_characters(",\n", 2); - } - - // last element - JSON_ASSERT(!val.m_value.array->empty()); - o->write_characters(indent_string.c_str(), new_indent); - dump(val.m_value.array->back(), true, ensure_ascii, indent_step, new_indent); - - o->write_character('\n'); - o->write_characters(indent_string.c_str(), current_indent); - o->write_character(']'); - } - else - { - o->write_character('['); - - // first n-1 elements - for (auto i = val.m_value.array->cbegin(); - i != val.m_value.array->cend() - 1; ++i) - { - dump(*i, false, ensure_ascii, indent_step, current_indent); - o->write_character(','); - } - - // last element - JSON_ASSERT(!val.m_value.array->empty()); - dump(val.m_value.array->back(), false, ensure_ascii, indent_step, current_indent); - - o->write_character(']'); - } - - return; - } - - case value_t::string: - { - o->write_character('\"'); - dump_escaped(*val.m_value.string, ensure_ascii); - o->write_character('\"'); - return; - } - - case value_t::binary: - { - if (pretty_print) - { - o->write_characters("{\n", 2); - - // variable to hold indentation for recursive calls - const auto new_indent = current_indent + indent_step; - if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent)) - { - indent_string.resize(indent_string.size() * 2, ' '); - } - - o->write_characters(indent_string.c_str(), new_indent); - - o->write_characters("\"bytes\": [", 10); - - if (!val.m_value.binary->empty()) - { - for (auto i = val.m_value.binary->cbegin(); - i != val.m_value.binary->cend() - 1; ++i) - { - dump_integer(*i); - o->write_characters(", ", 2); - } - dump_integer(val.m_value.binary->back()); - } - - o->write_characters("],\n", 3); - o->write_characters(indent_string.c_str(), new_indent); - - o->write_characters("\"subtype\": ", 11); - if (val.m_value.binary->has_subtype()) - { - dump_integer(val.m_value.binary->subtype()); - } - else - { - o->write_characters("null", 4); - } - o->write_character('\n'); - o->write_characters(indent_string.c_str(), current_indent); - o->write_character('}'); - } - else - { - o->write_characters("{\"bytes\":[", 10); - - if (!val.m_value.binary->empty()) - { - for (auto i = val.m_value.binary->cbegin(); - i != val.m_value.binary->cend() - 1; ++i) - { - dump_integer(*i); - o->write_character(','); - } - dump_integer(val.m_value.binary->back()); - } - - o->write_characters("],\"subtype\":", 12); - if (val.m_value.binary->has_subtype()) - { - dump_integer(val.m_value.binary->subtype()); - o->write_character('}'); - } - else - { - o->write_characters("null}", 5); - } - } - return; - } - - case value_t::boolean: - { - if (val.m_value.boolean) - { - o->write_characters("true", 4); - } - else - { - o->write_characters("false", 5); - } - return; - } - - case value_t::number_integer: - { - dump_integer(val.m_value.number_integer); - return; - } - - case value_t::number_unsigned: - { - dump_integer(val.m_value.number_unsigned); - return; - } - - case value_t::number_float: - { - dump_float(val.m_value.number_float); - return; - } - - case value_t::discarded: - { - o->write_characters("", 11); - return; - } - - case value_t::null: - { - o->write_characters("null", 4); - return; - } - - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - } - } - - JSON_PRIVATE_UNLESS_TESTED: - /*! - @brief dump escaped string - - Escape a string by replacing certain special characters by a sequence of an - escape character (backslash) and another character and other control - characters by a sequence of "\u" followed by a four-digit hex - representation. The escaped string is written to output stream @a o. - - @param[in] s the string to escape - @param[in] ensure_ascii whether to escape non-ASCII characters with - \uXXXX sequences - - @complexity Linear in the length of string @a s. - */ - void dump_escaped(const string_t& s, const bool ensure_ascii) - { - std::uint32_t codepoint; - std::uint8_t state = UTF8_ACCEPT; - std::size_t bytes = 0; // number of bytes written to string_buffer - - // number of bytes written at the point of the last valid byte - std::size_t bytes_after_last_accept = 0; - std::size_t undumped_chars = 0; - - for (std::size_t i = 0; i < s.size(); ++i) - { - const auto byte = static_cast(s[i]); - - switch (decode(state, codepoint, byte)) - { - case UTF8_ACCEPT: // decode found a new code point - { - switch (codepoint) - { - case 0x08: // backspace - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'b'; - break; - } - - case 0x09: // horizontal tab - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 't'; - break; - } - - case 0x0A: // newline - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'n'; - break; - } - - case 0x0C: // formfeed - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'f'; - break; - } - - case 0x0D: // carriage return - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'r'; - break; - } - - case 0x22: // quotation mark - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = '\"'; - break; - } - - case 0x5C: // reverse solidus - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = '\\'; - break; - } - - default: - { - // escape control characters (0x00..0x1F) or, if - // ensure_ascii parameter is used, non-ASCII characters - if ((codepoint <= 0x1F) || (ensure_ascii && (codepoint >= 0x7F))) - { - if (codepoint <= 0xFFFF) - { - (std::snprintf)(string_buffer.data() + bytes, 7, "\\u%04x", - static_cast(codepoint)); - bytes += 6; - } - else - { - (std::snprintf)(string_buffer.data() + bytes, 13, "\\u%04x\\u%04x", - static_cast(0xD7C0u + (codepoint >> 10u)), - static_cast(0xDC00u + (codepoint & 0x3FFu))); - bytes += 12; - } - } - else - { - // copy byte to buffer (all previous bytes - // been copied have in default case above) - string_buffer[bytes++] = s[i]; - } - break; - } - } - - // write buffer and reset index; there must be 13 bytes - // left, as this is the maximal number of bytes to be - // written ("\uxxxx\uxxxx\0") for one code point - if (string_buffer.size() - bytes < 13) - { - o->write_characters(string_buffer.data(), bytes); - bytes = 0; - } - - // remember the byte position of this accept - bytes_after_last_accept = bytes; - undumped_chars = 0; - break; - } - - case UTF8_REJECT: // decode found invalid UTF-8 byte - { - switch (error_handler) - { - case error_handler_t::strict: - { - std::string sn(3, '\0'); - (std::snprintf)(&sn[0], sn.size(), "%.2X", byte); - JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + sn, BasicJsonType())); - } - - case error_handler_t::ignore: - case error_handler_t::replace: - { - // in case we saw this character the first time, we - // would like to read it again, because the byte - // may be OK for itself, but just not OK for the - // previous sequence - if (undumped_chars > 0) - { - --i; - } - - // reset length buffer to the last accepted index; - // thus removing/ignoring the invalid characters - bytes = bytes_after_last_accept; - - if (error_handler == error_handler_t::replace) - { - // add a replacement character - if (ensure_ascii) - { - string_buffer[bytes++] = '\\'; - string_buffer[bytes++] = 'u'; - string_buffer[bytes++] = 'f'; - string_buffer[bytes++] = 'f'; - string_buffer[bytes++] = 'f'; - string_buffer[bytes++] = 'd'; - } - else - { - string_buffer[bytes++] = detail::binary_writer::to_char_type('\xEF'); - string_buffer[bytes++] = detail::binary_writer::to_char_type('\xBF'); - string_buffer[bytes++] = detail::binary_writer::to_char_type('\xBD'); - } - - // write buffer and reset index; there must be 13 bytes - // left, as this is the maximal number of bytes to be - // written ("\uxxxx\uxxxx\0") for one code point - if (string_buffer.size() - bytes < 13) - { - o->write_characters(string_buffer.data(), bytes); - bytes = 0; - } - - bytes_after_last_accept = bytes; - } - - undumped_chars = 0; - - // continue processing the string - state = UTF8_ACCEPT; - break; - } - - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - } - break; - } - - default: // decode found yet incomplete multi-byte code point - { - if (!ensure_ascii) - { - // code point will not be escaped - copy byte to buffer - string_buffer[bytes++] = s[i]; - } - ++undumped_chars; - break; - } - } - } - - // we finished processing the string - if (JSON_HEDLEY_LIKELY(state == UTF8_ACCEPT)) - { - // write buffer - if (bytes > 0) - { - o->write_characters(string_buffer.data(), bytes); - } - } - else - { - // we finish reading, but do not accept: string was incomplete - switch (error_handler) - { - case error_handler_t::strict: - { - std::string sn(3, '\0'); - (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast(s.back())); - JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + sn, BasicJsonType())); - } - - case error_handler_t::ignore: - { - // write all accepted bytes - o->write_characters(string_buffer.data(), bytes_after_last_accept); - break; - } - - case error_handler_t::replace: - { - // write all accepted bytes - o->write_characters(string_buffer.data(), bytes_after_last_accept); - // add a replacement character - if (ensure_ascii) - { - o->write_characters("\\ufffd", 6); - } - else - { - o->write_characters("\xEF\xBF\xBD", 3); - } - break; - } - - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - } - } - } - - private: - /*! - @brief count digits - - Count the number of decimal (base 10) digits for an input unsigned integer. - - @param[in] x unsigned integer number to count its digits - @return number of decimal digits - */ - inline unsigned int count_digits(number_unsigned_t x) noexcept - { - unsigned int n_digits = 1; - for (;;) - { - if (x < 10) - { - return n_digits; - } - if (x < 100) - { - return n_digits + 1; - } - if (x < 1000) - { - return n_digits + 2; - } - if (x < 10000) - { - return n_digits + 3; - } - x = x / 10000u; - n_digits += 4; - } - } - - /*! - @brief dump an integer - - Dump a given integer to output stream @a o. Works internally with - @a number_buffer. - - @param[in] x integer number (signed or unsigned) to dump - @tparam NumberType either @a number_integer_t or @a number_unsigned_t - */ - template < typename NumberType, detail::enable_if_t < - std::is_same::value || - std::is_same::value || - std::is_same::value, - int > = 0 > - void dump_integer(NumberType x) - { - static constexpr std::array, 100> digits_to_99 - { - { - {{'0', '0'}}, {{'0', '1'}}, {{'0', '2'}}, {{'0', '3'}}, {{'0', '4'}}, {{'0', '5'}}, {{'0', '6'}}, {{'0', '7'}}, {{'0', '8'}}, {{'0', '9'}}, - {{'1', '0'}}, {{'1', '1'}}, {{'1', '2'}}, {{'1', '3'}}, {{'1', '4'}}, {{'1', '5'}}, {{'1', '6'}}, {{'1', '7'}}, {{'1', '8'}}, {{'1', '9'}}, - {{'2', '0'}}, {{'2', '1'}}, {{'2', '2'}}, {{'2', '3'}}, {{'2', '4'}}, {{'2', '5'}}, {{'2', '6'}}, {{'2', '7'}}, {{'2', '8'}}, {{'2', '9'}}, - {{'3', '0'}}, {{'3', '1'}}, {{'3', '2'}}, {{'3', '3'}}, {{'3', '4'}}, {{'3', '5'}}, {{'3', '6'}}, {{'3', '7'}}, {{'3', '8'}}, {{'3', '9'}}, - {{'4', '0'}}, {{'4', '1'}}, {{'4', '2'}}, {{'4', '3'}}, {{'4', '4'}}, {{'4', '5'}}, {{'4', '6'}}, {{'4', '7'}}, {{'4', '8'}}, {{'4', '9'}}, - {{'5', '0'}}, {{'5', '1'}}, {{'5', '2'}}, {{'5', '3'}}, {{'5', '4'}}, {{'5', '5'}}, {{'5', '6'}}, {{'5', '7'}}, {{'5', '8'}}, {{'5', '9'}}, - {{'6', '0'}}, {{'6', '1'}}, {{'6', '2'}}, {{'6', '3'}}, {{'6', '4'}}, {{'6', '5'}}, {{'6', '6'}}, {{'6', '7'}}, {{'6', '8'}}, {{'6', '9'}}, - {{'7', '0'}}, {{'7', '1'}}, {{'7', '2'}}, {{'7', '3'}}, {{'7', '4'}}, {{'7', '5'}}, {{'7', '6'}}, {{'7', '7'}}, {{'7', '8'}}, {{'7', '9'}}, - {{'8', '0'}}, {{'8', '1'}}, {{'8', '2'}}, {{'8', '3'}}, {{'8', '4'}}, {{'8', '5'}}, {{'8', '6'}}, {{'8', '7'}}, {{'8', '8'}}, {{'8', '9'}}, - {{'9', '0'}}, {{'9', '1'}}, {{'9', '2'}}, {{'9', '3'}}, {{'9', '4'}}, {{'9', '5'}}, {{'9', '6'}}, {{'9', '7'}}, {{'9', '8'}}, {{'9', '9'}}, - } - }; - - // special case for "0" - if (x == 0) - { - o->write_character('0'); - return; - } - - // use a pointer to fill the buffer - auto buffer_ptr = number_buffer.begin(); - - const bool is_negative = std::is_same::value && !(x >= 0); // see issue #755 - number_unsigned_t abs_value; - - unsigned int n_chars; - - if (is_negative) - { - *buffer_ptr = '-'; - abs_value = remove_sign(static_cast(x)); - - // account one more byte for the minus sign - n_chars = 1 + count_digits(abs_value); - } - else - { - abs_value = static_cast(x); - n_chars = count_digits(abs_value); - } - - // spare 1 byte for '\0' - JSON_ASSERT(n_chars < number_buffer.size() - 1); - - // jump to the end to generate the string from backward - // so we later avoid reversing the result - buffer_ptr += n_chars; - - // Fast int2ascii implementation inspired by "Fastware" talk by Andrei Alexandrescu - // See: https://www.youtube.com/watch?v=o4-CwDo2zpg - while (abs_value >= 100) - { - const auto digits_index = static_cast((abs_value % 100)); - abs_value /= 100; - *(--buffer_ptr) = digits_to_99[digits_index][1]; - *(--buffer_ptr) = digits_to_99[digits_index][0]; - } - - if (abs_value >= 10) - { - const auto digits_index = static_cast(abs_value); - *(--buffer_ptr) = digits_to_99[digits_index][1]; - *(--buffer_ptr) = digits_to_99[digits_index][0]; - } - else - { - *(--buffer_ptr) = static_cast('0' + abs_value); - } - - o->write_characters(number_buffer.data(), n_chars); - } - - /*! - @brief dump a floating-point number - - Dump a given floating-point number to output stream @a o. Works internally - with @a number_buffer. - - @param[in] x floating-point number to dump - */ - void dump_float(number_float_t x) - { - // NaN / inf - if (!std::isfinite(x)) - { - o->write_characters("null", 4); - return; - } - - // If number_float_t is an IEEE-754 single or double precision number, - // use the Grisu2 algorithm to produce short numbers which are - // guaranteed to round-trip, using strtof and strtod, resp. - // - // NB: The test below works if == . - static constexpr bool is_ieee_single_or_double - = (std::numeric_limits::is_iec559 && std::numeric_limits::digits == 24 && std::numeric_limits::max_exponent == 128) || - (std::numeric_limits::is_iec559 && std::numeric_limits::digits == 53 && std::numeric_limits::max_exponent == 1024); - - dump_float(x, std::integral_constant()); - } - - void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_double*/) - { - char* begin = number_buffer.data(); - char* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x); - - o->write_characters(begin, static_cast(end - begin)); - } - - void dump_float(number_float_t x, std::false_type /*is_ieee_single_or_double*/) - { - // get number of digits for a float -> text -> float round-trip - static constexpr auto d = std::numeric_limits::max_digits10; - - // the actual conversion - std::ptrdiff_t len = (std::snprintf)(number_buffer.data(), number_buffer.size(), "%.*g", d, x); - - // negative value indicates an error - JSON_ASSERT(len > 0); - // check if buffer was large enough - JSON_ASSERT(static_cast(len) < number_buffer.size()); - - // erase thousands separator - if (thousands_sep != '\0') - { - const auto end = std::remove(number_buffer.begin(), - number_buffer.begin() + len, thousands_sep); - std::fill(end, number_buffer.end(), '\0'); - JSON_ASSERT((end - number_buffer.begin()) <= len); - len = (end - number_buffer.begin()); - } - - // convert decimal point to '.' - if (decimal_point != '\0' && decimal_point != '.') - { - const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point); - if (dec_pos != number_buffer.end()) - { - *dec_pos = '.'; - } - } - - o->write_characters(number_buffer.data(), static_cast(len)); - - // determine if need to append ".0" - const bool value_is_int_like = - std::none_of(number_buffer.begin(), number_buffer.begin() + len + 1, - [](char c) - { - return c == '.' || c == 'e'; - }); - - if (value_is_int_like) - { - o->write_characters(".0", 2); - } - } - - /*! - @brief check whether a string is UTF-8 encoded - - The function checks each byte of a string whether it is UTF-8 encoded. The - result of the check is stored in the @a state parameter. The function must - be called initially with state 0 (accept). State 1 means the string must - be rejected, because the current byte is not allowed. If the string is - completely processed, but the state is non-zero, the string ended - prematurely; that is, the last byte indicated more bytes should have - followed. - - @param[in,out] state the state of the decoding - @param[in,out] codep codepoint (valid only if resulting state is UTF8_ACCEPT) - @param[in] byte next byte to decode - @return new state - - @note The function has been edited: a std::array is used. - - @copyright Copyright (c) 2008-2009 Bjoern Hoehrmann - @sa http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ - */ - static std::uint8_t decode(std::uint8_t& state, std::uint32_t& codep, const std::uint8_t byte) noexcept - { - static const std::array utf8d = - { - { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3F - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5F - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7F - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9F - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // A0..BF - 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0..DF - 0xA, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // E0..EF - 0xB, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // F0..FF - 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 - 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 - 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 - 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // s7..s8 - } - }; - - JSON_ASSERT(byte < utf8d.size()); - const std::uint8_t type = utf8d[byte]; - - codep = (state != UTF8_ACCEPT) - ? (byte & 0x3fu) | (codep << 6u) - : (0xFFu >> type) & (byte); - - std::size_t index = 256u + static_cast(state) * 16u + static_cast(type); - JSON_ASSERT(index < 400); - state = utf8d[index]; - return state; - } - - /* - * Overload to make the compiler happy while it is instantiating - * dump_integer for number_unsigned_t. - * Must never be called. - */ - number_unsigned_t remove_sign(number_unsigned_t x) - { - JSON_ASSERT(false); // LCOV_EXCL_LINE - return x; // LCOV_EXCL_LINE - } - - /* - * Helper function for dump_integer - * - * This function takes a negative signed integer and returns its absolute - * value as unsigned integer. The plus/minus shuffling is necessary as we can - * not directly remove the sign of an arbitrary signed integer as the - * absolute values of INT_MIN and INT_MAX are usually not the same. See - * #1708 for details. - */ - inline number_unsigned_t remove_sign(number_integer_t x) noexcept - { - JSON_ASSERT(x < 0 && x < (std::numeric_limits::max)()); - return static_cast(-(x + 1)) + 1; - } - - private: - /// the output of the serializer - output_adapter_t o = nullptr; - - /// a (hopefully) large enough character buffer - std::array number_buffer{{}}; - - /// the locale - const std::lconv* loc = nullptr; - /// the locale's thousand separator character - const char thousands_sep = '\0'; - /// the locale's decimal point character - const char decimal_point = '\0'; - - /// string buffer - std::array string_buffer{{}}; - - /// the indentation character - const char indent_char; - /// the indentation string - string_t indent_string; - - /// error_handler how to react on decoding errors - const error_handler_t error_handler; -}; -} // namespace detail -} // namespace nlohmann - -// #include - -// #include - -// #include - - -#include // less -#include // allocator -#include // pair -#include // vector - -// #include - - -namespace nlohmann -{ - -/// ordered_map: a minimal map-like container that preserves insertion order -/// for use within nlohmann::basic_json -template , - class Allocator = std::allocator>> - struct ordered_map : std::vector, Allocator> -{ - using key_type = Key; - using mapped_type = T; - using Container = std::vector, Allocator>; - using typename Container::iterator; - using typename Container::const_iterator; - using typename Container::size_type; - using typename Container::value_type; - - // Explicit constructors instead of `using Container::Container` - // otherwise older compilers choke on it (GCC <= 5.5, xcode <= 9.4) - ordered_map(const Allocator& alloc = Allocator()) : Container{alloc} {} - template - ordered_map(It first, It last, const Allocator& alloc = Allocator()) - : Container{first, last, alloc} {} - ordered_map(std::initializer_list init, const Allocator& alloc = Allocator() ) - : Container{init, alloc} {} - - std::pair emplace(const key_type& key, T&& t) - { - for (auto it = this->begin(); it != this->end(); ++it) - { - if (it->first == key) - { - return {it, false}; - } - } - Container::emplace_back(key, t); - return {--this->end(), true}; - } - - T& operator[](const Key& key) - { - return emplace(key, T{}).first->second; - } - - const T& operator[](const Key& key) const - { - return at(key); - } - - T& at(const Key& key) - { - for (auto it = this->begin(); it != this->end(); ++it) - { - if (it->first == key) - { - return it->second; - } - } - - JSON_THROW(std::out_of_range("key not found")); - } - - const T& at(const Key& key) const - { - for (auto it = this->begin(); it != this->end(); ++it) - { - if (it->first == key) - { - return it->second; - } - } - - JSON_THROW(std::out_of_range("key not found")); - } - - size_type erase(const Key& key) - { - for (auto it = this->begin(); it != this->end(); ++it) - { - if (it->first == key) - { - // Since we cannot move const Keys, re-construct them in place - for (auto next = it; ++next != this->end(); ++it) - { - it->~value_type(); // Destroy but keep allocation - new (&*it) value_type{std::move(*next)}; - } - Container::pop_back(); - return 1; - } - } - return 0; - } - - iterator erase(iterator pos) - { - auto it = pos; - - // Since we cannot move const Keys, re-construct them in place - for (auto next = it; ++next != this->end(); ++it) - { - it->~value_type(); // Destroy but keep allocation - new (&*it) value_type{std::move(*next)}; - } - Container::pop_back(); - return pos; - } - - size_type count(const Key& key) const - { - for (auto it = this->begin(); it != this->end(); ++it) - { - if (it->first == key) - { - return 1; - } - } - return 0; - } - - iterator find(const Key& key) - { - for (auto it = this->begin(); it != this->end(); ++it) - { - if (it->first == key) - { - return it; - } - } - return Container::end(); - } - - const_iterator find(const Key& key) const - { - for (auto it = this->begin(); it != this->end(); ++it) - { - if (it->first == key) - { - return it; - } - } - return Container::end(); - } - - std::pair insert( value_type&& value ) - { - return emplace(value.first, std::move(value.second)); - } - - std::pair insert( const value_type& value ) - { - for (auto it = this->begin(); it != this->end(); ++it) - { - if (it->first == value.first) - { - return {it, false}; - } - } - Container::push_back(value); - return {--this->end(), true}; - } - - template - using require_input_iter = typename std::enable_if::iterator_category, - std::input_iterator_tag>::value>::type; - - template> - void insert(InputIt first, InputIt last) - { - for (auto it = first; it != last; ++it) - { - insert(*it); - } - } -}; - -} // namespace nlohmann - - -#if defined(JSON_HAS_CPP_17) - #include -#endif - -/*! -@brief namespace for Niels Lohmann -@see https://github.com/nlohmann -@since version 1.0.0 -*/ -namespace nlohmann -{ - -/*! -@brief a class to store JSON values - -@tparam ObjectType type for JSON objects (`std::map` by default; will be used -in @ref object_t) -@tparam ArrayType type for JSON arrays (`std::vector` by default; will be used -in @ref array_t) -@tparam StringType type for JSON strings and object keys (`std::string` by -default; will be used in @ref string_t) -@tparam BooleanType type for JSON booleans (`bool` by default; will be used -in @ref boolean_t) -@tparam NumberIntegerType type for JSON integer numbers (`int64_t` by -default; will be used in @ref number_integer_t) -@tparam NumberUnsignedType type for JSON unsigned integer numbers (@c -`uint64_t` by default; will be used in @ref number_unsigned_t) -@tparam NumberFloatType type for JSON floating-point numbers (`double` by -default; will be used in @ref number_float_t) -@tparam BinaryType type for packed binary data for compatibility with binary -serialization formats (`std::vector` by default; will be used in -@ref binary_t) -@tparam AllocatorType type of the allocator to use (`std::allocator` by -default) -@tparam JSONSerializer the serializer to resolve internal calls to `to_json()` -and `from_json()` (@ref adl_serializer by default) - -@requirement The class satisfies the following concept requirements: -- Basic - - [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible): - JSON values can be default constructed. The result will be a JSON null - value. - - [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible): - A JSON value can be constructed from an rvalue argument. - - [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible): - A JSON value can be copy-constructed from an lvalue expression. - - [MoveAssignable](https://en.cppreference.com/w/cpp/named_req/MoveAssignable): - A JSON value van be assigned from an rvalue argument. - - [CopyAssignable](https://en.cppreference.com/w/cpp/named_req/CopyAssignable): - A JSON value can be copy-assigned from an lvalue expression. - - [Destructible](https://en.cppreference.com/w/cpp/named_req/Destructible): - JSON values can be destructed. -- Layout - - [StandardLayoutType](https://en.cppreference.com/w/cpp/named_req/StandardLayoutType): - JSON values have - [standard layout](https://en.cppreference.com/w/cpp/language/data_members#Standard_layout): - All non-static data members are private and standard layout types, the - class has no virtual functions or (virtual) base classes. -- Library-wide - - [EqualityComparable](https://en.cppreference.com/w/cpp/named_req/EqualityComparable): - JSON values can be compared with `==`, see @ref - operator==(const_reference,const_reference). - - [LessThanComparable](https://en.cppreference.com/w/cpp/named_req/LessThanComparable): - JSON values can be compared with `<`, see @ref - operator<(const_reference,const_reference). - - [Swappable](https://en.cppreference.com/w/cpp/named_req/Swappable): - Any JSON lvalue or rvalue of can be swapped with any lvalue or rvalue of - other compatible types, using unqualified function call @ref swap(). - - [NullablePointer](https://en.cppreference.com/w/cpp/named_req/NullablePointer): - JSON values can be compared against `std::nullptr_t` objects which are used - to model the `null` value. -- Container - - [Container](https://en.cppreference.com/w/cpp/named_req/Container): - JSON values can be used like STL containers and provide iterator access. - - [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer); - JSON values can be used like STL containers and provide reverse iterator - access. - -@invariant The member variables @a m_value and @a m_type have the following -relationship: -- If `m_type == value_t::object`, then `m_value.object != nullptr`. -- If `m_type == value_t::array`, then `m_value.array != nullptr`. -- If `m_type == value_t::string`, then `m_value.string != nullptr`. -The invariants are checked by member function assert_invariant(). - -@internal -@note ObjectType trick from https://stackoverflow.com/a/9860911 -@endinternal - -@see [RFC 7159: The JavaScript Object Notation (JSON) Data Interchange -Format](http://rfc7159.net/rfc7159) - -@since version 1.0.0 - -@nosubgrouping -*/ -NLOHMANN_BASIC_JSON_TPL_DECLARATION -class basic_json -{ - private: - template friend struct detail::external_constructor; - friend ::nlohmann::json_pointer; - - template - friend class ::nlohmann::detail::parser; - friend ::nlohmann::detail::serializer; - template - friend class ::nlohmann::detail::iter_impl; - template - friend class ::nlohmann::detail::binary_writer; - template - friend class ::nlohmann::detail::binary_reader; - template - friend class ::nlohmann::detail::json_sax_dom_parser; - template - friend class ::nlohmann::detail::json_sax_dom_callback_parser; - friend class ::nlohmann::detail::exception; - - /// workaround type for MSVC - using basic_json_t = NLOHMANN_BASIC_JSON_TPL; - - JSON_PRIVATE_UNLESS_TESTED: - // convenience aliases for types residing in namespace detail; - using lexer = ::nlohmann::detail::lexer_base; - - template - static ::nlohmann::detail::parser parser( - InputAdapterType adapter, - detail::parser_callback_tcb = nullptr, - const bool allow_exceptions = true, - const bool ignore_comments = false - ) - { - return ::nlohmann::detail::parser(std::move(adapter), - std::move(cb), allow_exceptions, ignore_comments); - } - - private: - using primitive_iterator_t = ::nlohmann::detail::primitive_iterator_t; - template - using internal_iterator = ::nlohmann::detail::internal_iterator; - template - using iter_impl = ::nlohmann::detail::iter_impl; - template - using iteration_proxy = ::nlohmann::detail::iteration_proxy; - template using json_reverse_iterator = ::nlohmann::detail::json_reverse_iterator; - - template - using output_adapter_t = ::nlohmann::detail::output_adapter_t; - - template - using binary_reader = ::nlohmann::detail::binary_reader; - template using binary_writer = ::nlohmann::detail::binary_writer; - - JSON_PRIVATE_UNLESS_TESTED: - using serializer = ::nlohmann::detail::serializer; - - public: - using value_t = detail::value_t; - /// JSON Pointer, see @ref nlohmann::json_pointer - using json_pointer = ::nlohmann::json_pointer; - template - using json_serializer = JSONSerializer; - /// how to treat decoding errors - using error_handler_t = detail::error_handler_t; - /// how to treat CBOR tags - using cbor_tag_handler_t = detail::cbor_tag_handler_t; - /// helper type for initializer lists of basic_json values - using initializer_list_t = std::initializer_list>; - - using input_format_t = detail::input_format_t; - /// SAX interface type, see @ref nlohmann::json_sax - using json_sax_t = json_sax; - - //////////////// - // exceptions // - //////////////// - - /// @name exceptions - /// Classes to implement user-defined exceptions. - /// @{ - - /// @copydoc detail::exception - using exception = detail::exception; - /// @copydoc detail::parse_error - using parse_error = detail::parse_error; - /// @copydoc detail::invalid_iterator - using invalid_iterator = detail::invalid_iterator; - /// @copydoc detail::type_error - using type_error = detail::type_error; - /// @copydoc detail::out_of_range - using out_of_range = detail::out_of_range; - /// @copydoc detail::other_error - using other_error = detail::other_error; - - /// @} - - - ///////////////////// - // container types // - ///////////////////// - - /// @name container types - /// The canonic container types to use @ref basic_json like any other STL - /// container. - /// @{ - - /// the type of elements in a basic_json container - using value_type = basic_json; - - /// the type of an element reference - using reference = value_type&; - /// the type of an element const reference - using const_reference = const value_type&; - - /// a type to represent differences between iterators - using difference_type = std::ptrdiff_t; - /// a type to represent container sizes - using size_type = std::size_t; - - /// the allocator type - using allocator_type = AllocatorType; - - /// the type of an element pointer - using pointer = typename std::allocator_traits::pointer; - /// the type of an element const pointer - using const_pointer = typename std::allocator_traits::const_pointer; - - /// an iterator for a basic_json container - using iterator = iter_impl; - /// a const iterator for a basic_json container - using const_iterator = iter_impl; - /// a reverse iterator for a basic_json container - using reverse_iterator = json_reverse_iterator; - /// a const reverse iterator for a basic_json container - using const_reverse_iterator = json_reverse_iterator; - - /// @} - - - /*! - @brief returns the allocator associated with the container - */ - static allocator_type get_allocator() - { - return allocator_type(); - } - - /*! - @brief returns version information on the library - - This function returns a JSON object with information about the library, - including the version number and information on the platform and compiler. - - @return JSON object holding version information - key | description - ----------- | --------------- - `compiler` | Information on the used compiler. It is an object with the following keys: `c++` (the used C++ standard), `family` (the compiler family; possible values are `clang`, `icc`, `gcc`, `ilecpp`, `msvc`, `pgcpp`, `sunpro`, and `unknown`), and `version` (the compiler version). - `copyright` | The copyright line for the library as string. - `name` | The name of the library as string. - `platform` | The used platform as string. Possible values are `win32`, `linux`, `apple`, `unix`, and `unknown`. - `url` | The URL of the project as string. - `version` | The version of the library. It is an object with the following keys: `major`, `minor`, and `patch` as defined by [Semantic Versioning](http://semver.org), and `string` (the version string). - - @liveexample{The following code shows an example output of the `meta()` - function.,meta} - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @complexity Constant. - - @since 2.1.0 - */ - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json meta() - { - basic_json result; - - result["copyright"] = "(C) 2013-2021 Niels Lohmann"; - result["name"] = "JSON for Modern C++"; - result["url"] = "https://github.com/nlohmann/json"; - result["version"]["string"] = - std::to_string(NLOHMANN_JSON_VERSION_MAJOR) + "." + - std::to_string(NLOHMANN_JSON_VERSION_MINOR) + "." + - std::to_string(NLOHMANN_JSON_VERSION_PATCH); - result["version"]["major"] = NLOHMANN_JSON_VERSION_MAJOR; - result["version"]["minor"] = NLOHMANN_JSON_VERSION_MINOR; - result["version"]["patch"] = NLOHMANN_JSON_VERSION_PATCH; - -#ifdef _WIN32 - result["platform"] = "win32"; -#elif defined __linux__ - result["platform"] = "linux"; -#elif defined __APPLE__ - result["platform"] = "apple"; -#elif defined __unix__ - result["platform"] = "unix"; -#else - result["platform"] = "unknown"; -#endif - -#if defined(__ICC) || defined(__INTEL_COMPILER) - result["compiler"] = {{"family", "icc"}, {"version", __INTEL_COMPILER}}; -#elif defined(__clang__) - result["compiler"] = {{"family", "clang"}, {"version", __clang_version__}}; -#elif defined(__GNUC__) || defined(__GNUG__) - result["compiler"] = {{"family", "gcc"}, {"version", std::to_string(__GNUC__) + "." + std::to_string(__GNUC_MINOR__) + "." + std::to_string(__GNUC_PATCHLEVEL__)}}; -#elif defined(__HP_cc) || defined(__HP_aCC) - result["compiler"] = "hp" -#elif defined(__IBMCPP__) - result["compiler"] = {{"family", "ilecpp"}, {"version", __IBMCPP__}}; -#elif defined(_MSC_VER) - result["compiler"] = {{"family", "msvc"}, {"version", _MSC_VER}}; -#elif defined(__PGI) - result["compiler"] = {{"family", "pgcpp"}, {"version", __PGI}}; -#elif defined(__SUNPRO_CC) - result["compiler"] = {{"family", "sunpro"}, {"version", __SUNPRO_CC}}; -#else - result["compiler"] = {{"family", "unknown"}, {"version", "unknown"}}; -#endif - -#ifdef __cplusplus - result["compiler"]["c++"] = std::to_string(__cplusplus); -#else - result["compiler"]["c++"] = "unknown"; -#endif - return result; - } - - - /////////////////////////// - // JSON value data types // - /////////////////////////// - - /// @name JSON value data types - /// The data types to store a JSON value. These types are derived from - /// the template arguments passed to class @ref basic_json. - /// @{ - -#if defined(JSON_HAS_CPP_14) - // Use transparent comparator if possible, combined with perfect forwarding - // on find() and count() calls prevents unnecessary string construction. - using object_comparator_t = std::less<>; -#else - using object_comparator_t = std::less; -#endif - - /*! - @brief a type for an object - - [RFC 7159](http://rfc7159.net/rfc7159) describes JSON objects as follows: - > An object is an unordered collection of zero or more name/value pairs, - > where a name is a string and a value is a string, number, boolean, null, - > object, or array. - - To store objects in C++, a type is defined by the template parameters - described below. - - @tparam ObjectType the container to store objects (e.g., `std::map` or - `std::unordered_map`) - @tparam StringType the type of the keys or names (e.g., `std::string`). - The comparison function `std::less` is used to order elements - inside the container. - @tparam AllocatorType the allocator to use for objects (e.g., - `std::allocator`) - - #### Default type - - With the default values for @a ObjectType (`std::map`), @a StringType - (`std::string`), and @a AllocatorType (`std::allocator`), the default - value for @a object_t is: - - @code {.cpp} - std::map< - std::string, // key_type - basic_json, // value_type - std::less, // key_compare - std::allocator> // allocator_type - > - @endcode - - #### Behavior - - The choice of @a object_t influences the behavior of the JSON class. With - the default type, objects have the following behavior: - - - When all names are unique, objects will be interoperable in the sense - that all software implementations receiving that object will agree on - the name-value mappings. - - When the names within an object are not unique, it is unspecified which - one of the values for a given key will be chosen. For instance, - `{"key": 2, "key": 1}` could be equal to either `{"key": 1}` or - `{"key": 2}`. - - Internally, name/value pairs are stored in lexicographical order of the - names. Objects will also be serialized (see @ref dump) in this order. - For instance, `{"b": 1, "a": 2}` and `{"a": 2, "b": 1}` will be stored - and serialized as `{"a": 2, "b": 1}`. - - When comparing objects, the order of the name/value pairs is irrelevant. - This makes objects interoperable in the sense that they will not be - affected by these differences. For instance, `{"b": 1, "a": 2}` and - `{"a": 2, "b": 1}` will be treated as equal. - - #### Limits - - [RFC 7159](http://rfc7159.net/rfc7159) specifies: - > An implementation may set limits on the maximum depth of nesting. - - In this class, the object's limit of nesting is not explicitly constrained. - However, a maximum depth of nesting may be introduced by the compiler or - runtime environment. A theoretical limit can be queried by calling the - @ref max_size function of a JSON object. - - #### Storage - - Objects are stored as pointers in a @ref basic_json type. That is, for any - access to object values, a pointer of type `object_t*` must be - dereferenced. - - @sa @ref array_t -- type for an array value - - @since version 1.0.0 - - @note The order name/value pairs are added to the object is *not* - preserved by the library. Therefore, iterating an object may return - name/value pairs in a different order than they were originally stored. In - fact, keys will be traversed in alphabetical order as `std::map` with - `std::less` is used by default. Please note this behavior conforms to [RFC - 7159](http://rfc7159.net/rfc7159), because any order implements the - specified "unordered" nature of JSON objects. - */ - using object_t = ObjectType>>; - - /*! - @brief a type for an array - - [RFC 7159](http://rfc7159.net/rfc7159) describes JSON arrays as follows: - > An array is an ordered sequence of zero or more values. - - To store objects in C++, a type is defined by the template parameters - explained below. - - @tparam ArrayType container type to store arrays (e.g., `std::vector` or - `std::list`) - @tparam AllocatorType allocator to use for arrays (e.g., `std::allocator`) - - #### Default type - - With the default values for @a ArrayType (`std::vector`) and @a - AllocatorType (`std::allocator`), the default value for @a array_t is: - - @code {.cpp} - std::vector< - basic_json, // value_type - std::allocator // allocator_type - > - @endcode - - #### Limits - - [RFC 7159](http://rfc7159.net/rfc7159) specifies: - > An implementation may set limits on the maximum depth of nesting. - - In this class, the array's limit of nesting is not explicitly constrained. - However, a maximum depth of nesting may be introduced by the compiler or - runtime environment. A theoretical limit can be queried by calling the - @ref max_size function of a JSON array. - - #### Storage - - Arrays are stored as pointers in a @ref basic_json type. That is, for any - access to array values, a pointer of type `array_t*` must be dereferenced. - - @sa @ref object_t -- type for an object value - - @since version 1.0.0 - */ - using array_t = ArrayType>; - - /*! - @brief a type for a string - - [RFC 7159](http://rfc7159.net/rfc7159) describes JSON strings as follows: - > A string is a sequence of zero or more Unicode characters. - - To store objects in C++, a type is defined by the template parameter - described below. Unicode values are split by the JSON class into - byte-sized characters during deserialization. - - @tparam StringType the container to store strings (e.g., `std::string`). - Note this container is used for keys/names in objects, see @ref object_t. - - #### Default type - - With the default values for @a StringType (`std::string`), the default - value for @a string_t is: - - @code {.cpp} - std::string - @endcode - - #### Encoding - - Strings are stored in UTF-8 encoding. Therefore, functions like - `std::string::size()` or `std::string::length()` return the number of - bytes in the string rather than the number of characters or glyphs. - - #### String comparison - - [RFC 7159](http://rfc7159.net/rfc7159) states: - > Software implementations are typically required to test names of object - > members for equality. Implementations that transform the textual - > representation into sequences of Unicode code units and then perform the - > comparison numerically, code unit by code unit, are interoperable in the - > sense that implementations will agree in all cases on equality or - > inequality of two strings. For example, implementations that compare - > strings with escaped characters unconverted may incorrectly find that - > `"a\\b"` and `"a\u005Cb"` are not equal. - - This implementation is interoperable as it does compare strings code unit - by code unit. - - #### Storage - - String values are stored as pointers in a @ref basic_json type. That is, - for any access to string values, a pointer of type `string_t*` must be - dereferenced. - - @since version 1.0.0 - */ - using string_t = StringType; - - /*! - @brief a type for a boolean - - [RFC 7159](http://rfc7159.net/rfc7159) implicitly describes a boolean as a - type which differentiates the two literals `true` and `false`. - - To store objects in C++, a type is defined by the template parameter @a - BooleanType which chooses the type to use. - - #### Default type - - With the default values for @a BooleanType (`bool`), the default value for - @a boolean_t is: - - @code {.cpp} - bool - @endcode - - #### Storage - - Boolean values are stored directly inside a @ref basic_json type. - - @since version 1.0.0 - */ - using boolean_t = BooleanType; - - /*! - @brief a type for a number (integer) - - [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows: - > The representation of numbers is similar to that used in most - > programming languages. A number is represented in base 10 using decimal - > digits. It contains an integer component that may be prefixed with an - > optional minus sign, which may be followed by a fraction part and/or an - > exponent part. Leading zeros are not allowed. (...) Numeric values that - > cannot be represented in the grammar below (such as Infinity and NaN) - > are not permitted. - - This description includes both integer and floating-point numbers. - However, C++ allows more precise storage if it is known whether the number - is a signed integer, an unsigned integer or a floating-point number. - Therefore, three different types, @ref number_integer_t, @ref - number_unsigned_t and @ref number_float_t are used. - - To store integer numbers in C++, a type is defined by the template - parameter @a NumberIntegerType which chooses the type to use. - - #### Default type - - With the default values for @a NumberIntegerType (`int64_t`), the default - value for @a number_integer_t is: - - @code {.cpp} - int64_t - @endcode - - #### Default behavior - - - The restrictions about leading zeros is not enforced in C++. Instead, - leading zeros in integer literals lead to an interpretation as octal - number. Internally, the value will be stored as decimal number. For - instance, the C++ integer literal `010` will be serialized to `8`. - During deserialization, leading zeros yield an error. - - Not-a-number (NaN) values will be serialized to `null`. - - #### Limits - - [RFC 7159](http://rfc7159.net/rfc7159) specifies: - > An implementation may set limits on the range and precision of numbers. - - When the default type is used, the maximal integer number that can be - stored is `9223372036854775807` (INT64_MAX) and the minimal integer number - that can be stored is `-9223372036854775808` (INT64_MIN). Integer numbers - that are out of range will yield over/underflow when used in a - constructor. During deserialization, too large or small integer numbers - will be automatically be stored as @ref number_unsigned_t or @ref - number_float_t. - - [RFC 7159](http://rfc7159.net/rfc7159) further states: - > Note that when such software is used, numbers that are integers and are - > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense - > that implementations will agree exactly on their numeric values. - - As this range is a subrange of the exactly supported range [INT64_MIN, - INT64_MAX], this class's integer type is interoperable. - - #### Storage - - Integer number values are stored directly inside a @ref basic_json type. - - @sa @ref number_float_t -- type for number values (floating-point) - - @sa @ref number_unsigned_t -- type for number values (unsigned integer) - - @since version 1.0.0 - */ - using number_integer_t = NumberIntegerType; - - /*! - @brief a type for a number (unsigned) - - [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows: - > The representation of numbers is similar to that used in most - > programming languages. A number is represented in base 10 using decimal - > digits. It contains an integer component that may be prefixed with an - > optional minus sign, which may be followed by a fraction part and/or an - > exponent part. Leading zeros are not allowed. (...) Numeric values that - > cannot be represented in the grammar below (such as Infinity and NaN) - > are not permitted. - - This description includes both integer and floating-point numbers. - However, C++ allows more precise storage if it is known whether the number - is a signed integer, an unsigned integer or a floating-point number. - Therefore, three different types, @ref number_integer_t, @ref - number_unsigned_t and @ref number_float_t are used. - - To store unsigned integer numbers in C++, a type is defined by the - template parameter @a NumberUnsignedType which chooses the type to use. - - #### Default type - - With the default values for @a NumberUnsignedType (`uint64_t`), the - default value for @a number_unsigned_t is: - - @code {.cpp} - uint64_t - @endcode - - #### Default behavior - - - The restrictions about leading zeros is not enforced in C++. Instead, - leading zeros in integer literals lead to an interpretation as octal - number. Internally, the value will be stored as decimal number. For - instance, the C++ integer literal `010` will be serialized to `8`. - During deserialization, leading zeros yield an error. - - Not-a-number (NaN) values will be serialized to `null`. - - #### Limits - - [RFC 7159](http://rfc7159.net/rfc7159) specifies: - > An implementation may set limits on the range and precision of numbers. - - When the default type is used, the maximal integer number that can be - stored is `18446744073709551615` (UINT64_MAX) and the minimal integer - number that can be stored is `0`. Integer numbers that are out of range - will yield over/underflow when used in a constructor. During - deserialization, too large or small integer numbers will be automatically - be stored as @ref number_integer_t or @ref number_float_t. - - [RFC 7159](http://rfc7159.net/rfc7159) further states: - > Note that when such software is used, numbers that are integers and are - > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense - > that implementations will agree exactly on their numeric values. - - As this range is a subrange (when considered in conjunction with the - number_integer_t type) of the exactly supported range [0, UINT64_MAX], - this class's integer type is interoperable. - - #### Storage - - Integer number values are stored directly inside a @ref basic_json type. - - @sa @ref number_float_t -- type for number values (floating-point) - @sa @ref number_integer_t -- type for number values (integer) - - @since version 2.0.0 - */ - using number_unsigned_t = NumberUnsignedType; - - /*! - @brief a type for a number (floating-point) - - [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows: - > The representation of numbers is similar to that used in most - > programming languages. A number is represented in base 10 using decimal - > digits. It contains an integer component that may be prefixed with an - > optional minus sign, which may be followed by a fraction part and/or an - > exponent part. Leading zeros are not allowed. (...) Numeric values that - > cannot be represented in the grammar below (such as Infinity and NaN) - > are not permitted. - - This description includes both integer and floating-point numbers. - However, C++ allows more precise storage if it is known whether the number - is a signed integer, an unsigned integer or a floating-point number. - Therefore, three different types, @ref number_integer_t, @ref - number_unsigned_t and @ref number_float_t are used. - - To store floating-point numbers in C++, a type is defined by the template - parameter @a NumberFloatType which chooses the type to use. - - #### Default type - - With the default values for @a NumberFloatType (`double`), the default - value for @a number_float_t is: - - @code {.cpp} - double - @endcode - - #### Default behavior - - - The restrictions about leading zeros is not enforced in C++. Instead, - leading zeros in floating-point literals will be ignored. Internally, - the value will be stored as decimal number. For instance, the C++ - floating-point literal `01.2` will be serialized to `1.2`. During - deserialization, leading zeros yield an error. - - Not-a-number (NaN) values will be serialized to `null`. - - #### Limits - - [RFC 7159](http://rfc7159.net/rfc7159) states: - > This specification allows implementations to set limits on the range and - > precision of numbers accepted. Since software that implements IEEE - > 754-2008 binary64 (double precision) numbers is generally available and - > widely used, good interoperability can be achieved by implementations - > that expect no more precision or range than these provide, in the sense - > that implementations will approximate JSON numbers within the expected - > precision. - - This implementation does exactly follow this approach, as it uses double - precision floating-point numbers. Note values smaller than - `-1.79769313486232e+308` and values greater than `1.79769313486232e+308` - will be stored as NaN internally and be serialized to `null`. - - #### Storage - - Floating-point number values are stored directly inside a @ref basic_json - type. - - @sa @ref number_integer_t -- type for number values (integer) - - @sa @ref number_unsigned_t -- type for number values (unsigned integer) - - @since version 1.0.0 - */ - using number_float_t = NumberFloatType; - - /*! - @brief a type for a packed binary type - - This type is a type designed to carry binary data that appears in various - serialized formats, such as CBOR's Major Type 2, MessagePack's bin, and - BSON's generic binary subtype. This type is NOT a part of standard JSON and - exists solely for compatibility with these binary types. As such, it is - simply defined as an ordered sequence of zero or more byte values. - - Additionally, as an implementation detail, the subtype of the binary data is - carried around as a `std::uint8_t`, which is compatible with both of the - binary data formats that use binary subtyping, (though the specific - numbering is incompatible with each other, and it is up to the user to - translate between them). - - [CBOR's RFC 7049](https://tools.ietf.org/html/rfc7049) describes this type - as: - > Major type 2: a byte string. The string's length in bytes is represented - > following the rules for positive integers (major type 0). - - [MessagePack's documentation on the bin type - family](https://github.com/msgpack/msgpack/blob/master/spec.md#bin-format-family) - describes this type as: - > Bin format family stores an byte array in 2, 3, or 5 bytes of extra bytes - > in addition to the size of the byte array. - - [BSON's specifications](http://bsonspec.org/spec.html) describe several - binary types; however, this type is intended to represent the generic binary - type which has the description: - > Generic binary subtype - This is the most commonly used binary subtype and - > should be the 'default' for drivers and tools. - - None of these impose any limitations on the internal representation other - than the basic unit of storage be some type of array whose parts are - decomposable into bytes. - - The default representation of this binary format is a - `std::vector`, which is a very common way to represent a byte - array in modern C++. - - #### Default type - - The default values for @a BinaryType is `std::vector` - - #### Storage - - Binary Arrays are stored as pointers in a @ref basic_json type. That is, - for any access to array values, a pointer of the type `binary_t*` must be - dereferenced. - - #### Notes on subtypes - - - CBOR - - Binary values are represented as byte strings. No subtypes are - supported and will be ignored when CBOR is written. - - MessagePack - - If a subtype is given and the binary array contains exactly 1, 2, 4, 8, - or 16 elements, the fixext family (fixext1, fixext2, fixext4, fixext8) - is used. For other sizes, the ext family (ext8, ext16, ext32) is used. - The subtype is then added as singed 8-bit integer. - - If no subtype is given, the bin family (bin8, bin16, bin32) is used. - - BSON - - If a subtype is given, it is used and added as unsigned 8-bit integer. - - If no subtype is given, the generic binary subtype 0x00 is used. - - @sa @ref binary -- create a binary array - - @since version 3.8.0 - */ - using binary_t = nlohmann::byte_container_with_subtype; - /// @} - - private: - - /// helper for exception-safe object creation - template - JSON_HEDLEY_RETURNS_NON_NULL - static T* create(Args&& ... args) - { - AllocatorType alloc; - using AllocatorTraits = std::allocator_traits>; - - auto deleter = [&](T * obj) - { - AllocatorTraits::deallocate(alloc, obj, 1); - }; - std::unique_ptr obj(AllocatorTraits::allocate(alloc, 1), deleter); - AllocatorTraits::construct(alloc, obj.get(), std::forward(args)...); - JSON_ASSERT(obj != nullptr); - return obj.release(); - } - - //////////////////////// - // JSON value storage // - //////////////////////// - - JSON_PRIVATE_UNLESS_TESTED: - /*! - @brief a JSON value - - The actual storage for a JSON value of the @ref basic_json class. This - union combines the different storage types for the JSON value types - defined in @ref value_t. - - JSON type | value_t type | used type - --------- | --------------- | ------------------------ - object | object | pointer to @ref object_t - array | array | pointer to @ref array_t - string | string | pointer to @ref string_t - boolean | boolean | @ref boolean_t - number | number_integer | @ref number_integer_t - number | number_unsigned | @ref number_unsigned_t - number | number_float | @ref number_float_t - binary | binary | pointer to @ref binary_t - null | null | *no value is stored* - - @note Variable-length types (objects, arrays, and strings) are stored as - pointers. The size of the union should not exceed 64 bits if the default - value types are used. - - @since version 1.0.0 - */ - union json_value - { - /// object (stored with pointer to save storage) - object_t* object; - /// array (stored with pointer to save storage) - array_t* array; - /// string (stored with pointer to save storage) - string_t* string; - /// binary (stored with pointer to save storage) - binary_t* binary; - /// boolean - boolean_t boolean; - /// number (integer) - number_integer_t number_integer; - /// number (unsigned integer) - number_unsigned_t number_unsigned; - /// number (floating-point) - number_float_t number_float; - - /// default constructor (for null values) - json_value() = default; - /// constructor for booleans - json_value(boolean_t v) noexcept : boolean(v) {} - /// constructor for numbers (integer) - json_value(number_integer_t v) noexcept : number_integer(v) {} - /// constructor for numbers (unsigned) - json_value(number_unsigned_t v) noexcept : number_unsigned(v) {} - /// constructor for numbers (floating-point) - json_value(number_float_t v) noexcept : number_float(v) {} - /// constructor for empty values of a given type - json_value(value_t t) - { - switch (t) - { - case value_t::object: - { - object = create(); - break; - } - - case value_t::array: - { - array = create(); - break; - } - - case value_t::string: - { - string = create(""); - break; - } - - case value_t::binary: - { - binary = create(); - break; - } - - case value_t::boolean: - { - boolean = boolean_t(false); - break; - } - - case value_t::number_integer: - { - number_integer = number_integer_t(0); - break; - } - - case value_t::number_unsigned: - { - number_unsigned = number_unsigned_t(0); - break; - } - - case value_t::number_float: - { - number_float = number_float_t(0.0); - break; - } - - case value_t::null: - { - object = nullptr; // silence warning, see #821 - break; - } - - default: - { - object = nullptr; // silence warning, see #821 - if (JSON_HEDLEY_UNLIKELY(t == value_t::null)) - { - JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.9.1", basic_json())); // LCOV_EXCL_LINE - } - break; - } - } - } - - /// constructor for strings - json_value(const string_t& value) - { - string = create(value); - } - - /// constructor for rvalue strings - json_value(string_t&& value) - { - string = create(std::move(value)); - } - - /// constructor for objects - json_value(const object_t& value) - { - object = create(value); - } - - /// constructor for rvalue objects - json_value(object_t&& value) - { - object = create(std::move(value)); - } - - /// constructor for arrays - json_value(const array_t& value) - { - array = create(value); - } - - /// constructor for rvalue arrays - json_value(array_t&& value) - { - array = create(std::move(value)); - } - - /// constructor for binary arrays - json_value(const typename binary_t::container_type& value) - { - binary = create(value); - } - - /// constructor for rvalue binary arrays - json_value(typename binary_t::container_type&& value) - { - binary = create(std::move(value)); - } - - /// constructor for binary arrays (internal type) - json_value(const binary_t& value) - { - binary = create(value); - } - - /// constructor for rvalue binary arrays (internal type) - json_value(binary_t&& value) - { - binary = create(std::move(value)); - } - - void destroy(value_t t) noexcept - { - // flatten the current json_value to a heap-allocated stack - std::vector stack; - - // move the top-level items to stack - if (t == value_t::array) - { - stack.reserve(array->size()); - std::move(array->begin(), array->end(), std::back_inserter(stack)); - } - else if (t == value_t::object) - { - stack.reserve(object->size()); - for (auto&& it : *object) - { - stack.push_back(std::move(it.second)); - } - } - - while (!stack.empty()) - { - // move the last item to local variable to be processed - basic_json current_item(std::move(stack.back())); - stack.pop_back(); - - // if current_item is array/object, move - // its children to the stack to be processed later - if (current_item.is_array()) - { - std::move(current_item.m_value.array->begin(), current_item.m_value.array->end(), - std::back_inserter(stack)); - - current_item.m_value.array->clear(); - } - else if (current_item.is_object()) - { - for (auto&& it : *current_item.m_value.object) - { - stack.push_back(std::move(it.second)); - } - - current_item.m_value.object->clear(); - } - - // it's now safe that current_item get destructed - // since it doesn't have any children - } - - switch (t) - { - case value_t::object: - { - AllocatorType alloc; - std::allocator_traits::destroy(alloc, object); - std::allocator_traits::deallocate(alloc, object, 1); - break; - } - - case value_t::array: - { - AllocatorType alloc; - std::allocator_traits::destroy(alloc, array); - std::allocator_traits::deallocate(alloc, array, 1); - break; - } - - case value_t::string: - { - AllocatorType alloc; - std::allocator_traits::destroy(alloc, string); - std::allocator_traits::deallocate(alloc, string, 1); - break; - } - - case value_t::binary: - { - AllocatorType alloc; - std::allocator_traits::destroy(alloc, binary); - std::allocator_traits::deallocate(alloc, binary, 1); - break; - } - - default: - { - break; - } - } - } - }; - - private: - /*! - @brief checks the class invariants - - This function asserts the class invariants. It needs to be called at the - end of every constructor to make sure that created objects respect the - invariant. Furthermore, it has to be called each time the type of a JSON - value is changed, because the invariant expresses a relationship between - @a m_type and @a m_value. - - Furthermore, the parent relation is checked for arrays and objects: If - @a check_parents true and the value is an array or object, then the - container's elements must have the current value as parent. - - @param[in] check_parents whether the parent relation should be checked. - The value is true by default and should only be set to false - during destruction of objects when the invariant does not - need to hold. - */ - void assert_invariant(bool check_parents = true) const noexcept - { - JSON_ASSERT(m_type != value_t::object || m_value.object != nullptr); - JSON_ASSERT(m_type != value_t::array || m_value.array != nullptr); - JSON_ASSERT(m_type != value_t::string || m_value.string != nullptr); - JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr); - -#if JSON_DIAGNOSTICS - JSON_ASSERT(!check_parents || !is_structured() || std::all_of(begin(), end(), [this](const basic_json & j) - { - return j.m_parent == this; - })); -#else - static_cast(check_parents); -#endif - } - - void set_parents() - { -#if JSON_DIAGNOSTICS - switch (m_type) - { - case value_t::array: - { - for (auto& element : *m_value.array) - { - element.m_parent = this; - } - break; - } - - case value_t::object: - { - for (auto& element : *m_value.object) - { - element.second.m_parent = this; - } - break; - } - - default: - break; - } -#endif - } - - iterator set_parents(iterator it, typename iterator::difference_type count) - { -#if JSON_DIAGNOSTICS - for (typename iterator::difference_type i = 0; i < count; ++i) - { - (it + i)->m_parent = this; - } -#else - static_cast(count); -#endif - return it; - } - - reference set_parent(reference j) - { -#if JSON_DIAGNOSTICS - j.m_parent = this; -#else - static_cast(j); -#endif - return j; - } - - public: - ////////////////////////// - // JSON parser callback // - ////////////////////////// - - /*! - @brief parser event types - - The parser callback distinguishes the following events: - - `object_start`: the parser read `{` and started to process a JSON object - - `key`: the parser read a key of a value in an object - - `object_end`: the parser read `}` and finished processing a JSON object - - `array_start`: the parser read `[` and started to process a JSON array - - `array_end`: the parser read `]` and finished processing a JSON array - - `value`: the parser finished reading a JSON value - - @image html callback_events.png "Example when certain parse events are triggered" - - @sa @ref parser_callback_t for more information and examples - */ - using parse_event_t = detail::parse_event_t; - - /*! - @brief per-element parser callback type - - With a parser callback function, the result of parsing a JSON text can be - influenced. When passed to @ref parse, it is called on certain events - (passed as @ref parse_event_t via parameter @a event) with a set recursion - depth @a depth and context JSON value @a parsed. The return value of the - callback function is a boolean indicating whether the element that emitted - the callback shall be kept or not. - - We distinguish six scenarios (determined by the event type) in which the - callback function can be called. The following table describes the values - of the parameters @a depth, @a event, and @a parsed. - - parameter @a event | description | parameter @a depth | parameter @a parsed - ------------------ | ----------- | ------------------ | ------------------- - parse_event_t::object_start | the parser read `{` and started to process a JSON object | depth of the parent of the JSON object | a JSON value with type discarded - parse_event_t::key | the parser read a key of a value in an object | depth of the currently parsed JSON object | a JSON string containing the key - parse_event_t::object_end | the parser read `}` and finished processing a JSON object | depth of the parent of the JSON object | the parsed JSON object - parse_event_t::array_start | the parser read `[` and started to process a JSON array | depth of the parent of the JSON array | a JSON value with type discarded - parse_event_t::array_end | the parser read `]` and finished processing a JSON array | depth of the parent of the JSON array | the parsed JSON array - parse_event_t::value | the parser finished reading a JSON value | depth of the value | the parsed JSON value - - @image html callback_events.png "Example when certain parse events are triggered" - - Discarding a value (i.e., returning `false`) has different effects - depending on the context in which function was called: - - - Discarded values in structured types are skipped. That is, the parser - will behave as if the discarded value was never read. - - In case a value outside a structured type is skipped, it is replaced - with `null`. This case happens if the top-level element is skipped. - - @param[in] depth the depth of the recursion during parsing - - @param[in] event an event of type parse_event_t indicating the context in - the callback function has been called - - @param[in,out] parsed the current intermediate parse result; note that - writing to this value has no effect for parse_event_t::key events - - @return Whether the JSON value which called the function during parsing - should be kept (`true`) or not (`false`). In the latter case, it is either - skipped completely or replaced by an empty discarded object. - - @sa @ref parse for examples - - @since version 1.0.0 - */ - using parser_callback_t = detail::parser_callback_t; - - ////////////////// - // constructors // - ////////////////// - - /// @name constructors and destructors - /// Constructors of class @ref basic_json, copy/move constructor, copy - /// assignment, static functions creating objects, and the destructor. - /// @{ - - /*! - @brief create an empty value with a given type - - Create an empty JSON value with a given type. The value will be default - initialized with an empty value which depends on the type: - - Value type | initial value - ----------- | ------------- - null | `null` - boolean | `false` - string | `""` - number | `0` - object | `{}` - array | `[]` - binary | empty array - - @param[in] v the type of the value to create - - @complexity Constant. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @liveexample{The following code shows the constructor for different @ref - value_t values,basic_json__value_t} - - @sa @ref clear() -- restores the postcondition of this constructor - - @since version 1.0.0 - */ - basic_json(const value_t v) - : m_type(v), m_value(v) - { - assert_invariant(); - } - - /*! - @brief create a null object - - Create a `null` JSON value. It either takes a null pointer as parameter - (explicitly creating `null`) or no parameter (implicitly creating `null`). - The passed null pointer itself is not read -- it is only used to choose - the right constructor. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this constructor never throws - exceptions. - - @liveexample{The following code shows the constructor with and without a - null pointer parameter.,basic_json__nullptr_t} - - @since version 1.0.0 - */ - basic_json(std::nullptr_t = nullptr) noexcept - : basic_json(value_t::null) - { - assert_invariant(); - } - - /*! - @brief create a JSON value - - This is a "catch all" constructor for all compatible JSON types; that is, - types for which a `to_json()` method exists. The constructor forwards the - parameter @a val to that method (to `json_serializer::to_json` method - with `U = uncvref_t`, to be exact). - - Template type @a CompatibleType includes, but is not limited to, the - following types: - - **arrays**: @ref array_t and all kinds of compatible containers such as - `std::vector`, `std::deque`, `std::list`, `std::forward_list`, - `std::array`, `std::valarray`, `std::set`, `std::unordered_set`, - `std::multiset`, and `std::unordered_multiset` with a `value_type` from - which a @ref basic_json value can be constructed. - - **objects**: @ref object_t and all kinds of compatible associative - containers such as `std::map`, `std::unordered_map`, `std::multimap`, - and `std::unordered_multimap` with a `key_type` compatible to - @ref string_t and a `value_type` from which a @ref basic_json value can - be constructed. - - **strings**: @ref string_t, string literals, and all compatible string - containers can be used. - - **numbers**: @ref number_integer_t, @ref number_unsigned_t, - @ref number_float_t, and all convertible number types such as `int`, - `size_t`, `int64_t`, `float` or `double` can be used. - - **boolean**: @ref boolean_t / `bool` can be used. - - **binary**: @ref binary_t / `std::vector` may be used, - unfortunately because string literals cannot be distinguished from binary - character arrays by the C++ type system, all types compatible with `const - char*` will be directed to the string constructor instead. This is both - for backwards compatibility, and due to the fact that a binary type is not - a standard JSON type. - - See the examples below. - - @tparam CompatibleType a type such that: - - @a CompatibleType is not derived from `std::istream`, - - @a CompatibleType is not @ref basic_json (to avoid hijacking copy/move - constructors), - - @a CompatibleType is not a different @ref basic_json type (i.e. with different template arguments) - - @a CompatibleType is not a @ref basic_json nested type (e.g., - @ref json_pointer, @ref iterator, etc ...) - - @ref @ref json_serializer has a - `to_json(basic_json_t&, CompatibleType&&)` method - - @tparam U = `uncvref_t` - - @param[in] val the value to be forwarded to the respective constructor - - @complexity Usually linear in the size of the passed @a val, also - depending on the implementation of the called `to_json()` - method. - - @exceptionsafety Depends on the called constructor. For types directly - supported by the library (i.e., all types for which no `to_json()` function - was provided), strong guarantee holds: if an exception is thrown, there are - no changes to any JSON value. - - @liveexample{The following code shows the constructor with several - compatible types.,basic_json__CompatibleType} - - @since version 2.1.0 - */ - template < typename CompatibleType, - typename U = detail::uncvref_t, - detail::enable_if_t < - !detail::is_basic_json::value && detail::is_compatible_type::value, int > = 0 > - basic_json(CompatibleType && val) noexcept(noexcept( - JSONSerializer::to_json(std::declval(), - std::forward(val)))) - { - JSONSerializer::to_json(*this, std::forward(val)); - set_parents(); - assert_invariant(); - } - - /*! - @brief create a JSON value from an existing one - - This is a constructor for existing @ref basic_json types. - It does not hijack copy/move constructors, since the parameter has different - template arguments than the current ones. - - The constructor tries to convert the internal @ref m_value of the parameter. - - @tparam BasicJsonType a type such that: - - @a BasicJsonType is a @ref basic_json type. - - @a BasicJsonType has different template arguments than @ref basic_json_t. - - @param[in] val the @ref basic_json value to be converted. - - @complexity Usually linear in the size of the passed @a val, also - depending on the implementation of the called `to_json()` - method. - - @exceptionsafety Depends on the called constructor. For types directly - supported by the library (i.e., all types for which no `to_json()` function - was provided), strong guarantee holds: if an exception is thrown, there are - no changes to any JSON value. - - @since version 3.2.0 - */ - template < typename BasicJsonType, - detail::enable_if_t < - detail::is_basic_json::value&& !std::is_same::value, int > = 0 > - basic_json(const BasicJsonType& val) - { - using other_boolean_t = typename BasicJsonType::boolean_t; - using other_number_float_t = typename BasicJsonType::number_float_t; - using other_number_integer_t = typename BasicJsonType::number_integer_t; - using other_number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using other_string_t = typename BasicJsonType::string_t; - using other_object_t = typename BasicJsonType::object_t; - using other_array_t = typename BasicJsonType::array_t; - using other_binary_t = typename BasicJsonType::binary_t; - - switch (val.type()) - { - case value_t::boolean: - JSONSerializer::to_json(*this, val.template get()); - break; - case value_t::number_float: - JSONSerializer::to_json(*this, val.template get()); - break; - case value_t::number_integer: - JSONSerializer::to_json(*this, val.template get()); - break; - case value_t::number_unsigned: - JSONSerializer::to_json(*this, val.template get()); - break; - case value_t::string: - JSONSerializer::to_json(*this, val.template get_ref()); - break; - case value_t::object: - JSONSerializer::to_json(*this, val.template get_ref()); - break; - case value_t::array: - JSONSerializer::to_json(*this, val.template get_ref()); - break; - case value_t::binary: - JSONSerializer::to_json(*this, val.template get_ref()); - break; - case value_t::null: - *this = nullptr; - break; - case value_t::discarded: - m_type = value_t::discarded; - break; - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - } - set_parents(); - assert_invariant(); - } - - /*! - @brief create a container (array or object) from an initializer list - - Creates a JSON value of type array or object from the passed initializer - list @a init. In case @a type_deduction is `true` (default), the type of - the JSON value to be created is deducted from the initializer list @a init - according to the following rules: - - 1. If the list is empty, an empty JSON object value `{}` is created. - 2. If the list consists of pairs whose first element is a string, a JSON - object value is created where the first elements of the pairs are - treated as keys and the second elements are as values. - 3. In all other cases, an array is created. - - The rules aim to create the best fit between a C++ initializer list and - JSON values. The rationale is as follows: - - 1. The empty initializer list is written as `{}` which is exactly an empty - JSON object. - 2. C++ has no way of describing mapped types other than to list a list of - pairs. As JSON requires that keys must be of type string, rule 2 is the - weakest constraint one can pose on initializer lists to interpret them - as an object. - 3. In all other cases, the initializer list could not be interpreted as - JSON object type, so interpreting it as JSON array type is safe. - - With the rules described above, the following JSON values cannot be - expressed by an initializer list: - - - the empty array (`[]`): use @ref array(initializer_list_t) - with an empty initializer list in this case - - arrays whose elements satisfy rule 2: use @ref - array(initializer_list_t) with the same initializer list - in this case - - @note When used without parentheses around an empty initializer list, @ref - basic_json() is called instead of this function, yielding the JSON null - value. - - @param[in] init initializer list with JSON values - - @param[in] type_deduction internal parameter; when set to `true`, the type - of the JSON value is deducted from the initializer list @a init; when set - to `false`, the type provided via @a manual_type is forced. This mode is - used by the functions @ref array(initializer_list_t) and - @ref object(initializer_list_t). - - @param[in] manual_type internal parameter; when @a type_deduction is set - to `false`, the created JSON value will use the provided type (only @ref - value_t::array and @ref value_t::object are valid); when @a type_deduction - is set to `true`, this parameter has no effect - - @throw type_error.301 if @a type_deduction is `false`, @a manual_type is - `value_t::object`, but @a init contains an element which is not a pair - whose first element is a string. In this case, the constructor could not - create an object. If @a type_deduction would have be `true`, an array - would have been created. See @ref object(initializer_list_t) - for an example. - - @complexity Linear in the size of the initializer list @a init. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @liveexample{The example below shows how JSON values are created from - initializer lists.,basic_json__list_init_t} - - @sa @ref array(initializer_list_t) -- create a JSON array - value from an initializer list - @sa @ref object(initializer_list_t) -- create a JSON object - value from an initializer list - - @since version 1.0.0 - */ - basic_json(initializer_list_t init, - bool type_deduction = true, - value_t manual_type = value_t::array) - { - // check if each element is an array with two elements whose first - // element is a string - bool is_an_object = std::all_of(init.begin(), init.end(), - [](const detail::json_ref& element_ref) - { - return element_ref->is_array() && element_ref->size() == 2 && (*element_ref)[0].is_string(); - }); - - // adjust type if type deduction is not wanted - if (!type_deduction) - { - // if array is wanted, do not create an object though possible - if (manual_type == value_t::array) - { - is_an_object = false; - } - - // if object is wanted but impossible, throw an exception - if (JSON_HEDLEY_UNLIKELY(manual_type == value_t::object && !is_an_object)) - { - JSON_THROW(type_error::create(301, "cannot create object from initializer list", basic_json())); - } - } - - if (is_an_object) - { - // the initializer list is a list of pairs -> create object - m_type = value_t::object; - m_value = value_t::object; - - for (auto& element_ref : init) - { - auto element = element_ref.moved_or_copied(); - m_value.object->emplace( - std::move(*((*element.m_value.array)[0].m_value.string)), - std::move((*element.m_value.array)[1])); - } - } - else - { - // the initializer list describes an array -> create array - m_type = value_t::array; - m_value.array = create(init.begin(), init.end()); - } - - set_parents(); - assert_invariant(); - } - - /*! - @brief explicitly create a binary array (without subtype) - - Creates a JSON binary array value from a given binary container. Binary - values are part of various binary formats, such as CBOR, MessagePack, and - BSON. This constructor is used to create a value for serialization to those - formats. - - @note Note, this function exists because of the difficulty in correctly - specifying the correct template overload in the standard value ctor, as both - JSON arrays and JSON binary arrays are backed with some form of a - `std::vector`. Because JSON binary arrays are a non-standard extension it - was decided that it would be best to prevent automatic initialization of a - binary array type, for backwards compatibility and so it does not happen on - accident. - - @param[in] init container containing bytes to use as binary type - - @return JSON binary array value - - @complexity Linear in the size of @a init. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @since version 3.8.0 - */ - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json binary(const typename binary_t::container_type& init) - { - auto res = basic_json(); - res.m_type = value_t::binary; - res.m_value = init; - return res; - } - - /*! - @brief explicitly create a binary array (with subtype) - - Creates a JSON binary array value from a given binary container. Binary - values are part of various binary formats, such as CBOR, MessagePack, and - BSON. This constructor is used to create a value for serialization to those - formats. - - @note Note, this function exists because of the difficulty in correctly - specifying the correct template overload in the standard value ctor, as both - JSON arrays and JSON binary arrays are backed with some form of a - `std::vector`. Because JSON binary arrays are a non-standard extension it - was decided that it would be best to prevent automatic initialization of a - binary array type, for backwards compatibility and so it does not happen on - accident. - - @param[in] init container containing bytes to use as binary type - @param[in] subtype subtype to use in MessagePack and BSON - - @return JSON binary array value - - @complexity Linear in the size of @a init. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @since version 3.8.0 - */ - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json binary(const typename binary_t::container_type& init, std::uint8_t subtype) - { - auto res = basic_json(); - res.m_type = value_t::binary; - res.m_value = binary_t(init, subtype); - return res; - } - - /// @copydoc binary(const typename binary_t::container_type&) - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json binary(typename binary_t::container_type&& init) - { - auto res = basic_json(); - res.m_type = value_t::binary; - res.m_value = std::move(init); - return res; - } - - /// @copydoc binary(const typename binary_t::container_type&, std::uint8_t) - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json binary(typename binary_t::container_type&& init, std::uint8_t subtype) - { - auto res = basic_json(); - res.m_type = value_t::binary; - res.m_value = binary_t(std::move(init), subtype); - return res; - } - - /*! - @brief explicitly create an array from an initializer list - - Creates a JSON array value from a given initializer list. That is, given a - list of values `a, b, c`, creates the JSON value `[a, b, c]`. If the - initializer list is empty, the empty array `[]` is created. - - @note This function is only needed to express two edge cases that cannot - be realized with the initializer list constructor (@ref - basic_json(initializer_list_t, bool, value_t)). These cases - are: - 1. creating an array whose elements are all pairs whose first element is a - string -- in this case, the initializer list constructor would create an - object, taking the first elements as keys - 2. creating an empty array -- passing the empty initializer list to the - initializer list constructor yields an empty object - - @param[in] init initializer list with JSON values to create an array from - (optional) - - @return JSON array value - - @complexity Linear in the size of @a init. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @liveexample{The following code shows an example for the `array` - function.,array} - - @sa @ref basic_json(initializer_list_t, bool, value_t) -- - create a JSON value from an initializer list - @sa @ref object(initializer_list_t) -- create a JSON object - value from an initializer list - - @since version 1.0.0 - */ - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json array(initializer_list_t init = {}) - { - return basic_json(init, false, value_t::array); - } - - /*! - @brief explicitly create an object from an initializer list - - Creates a JSON object value from a given initializer list. The initializer - lists elements must be pairs, and their first elements must be strings. If - the initializer list is empty, the empty object `{}` is created. - - @note This function is only added for symmetry reasons. In contrast to the - related function @ref array(initializer_list_t), there are - no cases which can only be expressed by this function. That is, any - initializer list @a init can also be passed to the initializer list - constructor @ref basic_json(initializer_list_t, bool, value_t). - - @param[in] init initializer list to create an object from (optional) - - @return JSON object value - - @throw type_error.301 if @a init is not a list of pairs whose first - elements are strings. In this case, no object can be created. When such a - value is passed to @ref basic_json(initializer_list_t, bool, value_t), - an array would have been created from the passed initializer list @a init. - See example below. - - @complexity Linear in the size of @a init. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @liveexample{The following code shows an example for the `object` - function.,object} - - @sa @ref basic_json(initializer_list_t, bool, value_t) -- - create a JSON value from an initializer list - @sa @ref array(initializer_list_t) -- create a JSON array - value from an initializer list - - @since version 1.0.0 - */ - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json object(initializer_list_t init = {}) - { - return basic_json(init, false, value_t::object); - } - - /*! - @brief construct an array with count copies of given value - - Constructs a JSON array value by creating @a cnt copies of a passed value. - In case @a cnt is `0`, an empty array is created. - - @param[in] cnt the number of JSON copies of @a val to create - @param[in] val the JSON value to copy - - @post `std::distance(begin(),end()) == cnt` holds. - - @complexity Linear in @a cnt. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @liveexample{The following code shows examples for the @ref - basic_json(size_type\, const basic_json&) - constructor.,basic_json__size_type_basic_json} - - @since version 1.0.0 - */ - basic_json(size_type cnt, const basic_json& val) - : m_type(value_t::array) - { - m_value.array = create(cnt, val); - set_parents(); - assert_invariant(); - } - - /*! - @brief construct a JSON container given an iterator range - - Constructs the JSON value with the contents of the range `[first, last)`. - The semantics depends on the different types a JSON value can have: - - In case of a null type, invalid_iterator.206 is thrown. - - In case of other primitive types (number, boolean, or string), @a first - must be `begin()` and @a last must be `end()`. In this case, the value is - copied. Otherwise, invalid_iterator.204 is thrown. - - In case of structured types (array, object), the constructor behaves as - similar versions for `std::vector` or `std::map`; that is, a JSON array - or object is constructed from the values in the range. - - @tparam InputIT an input iterator type (@ref iterator or @ref - const_iterator) - - @param[in] first begin of the range to copy from (included) - @param[in] last end of the range to copy from (excluded) - - @pre Iterators @a first and @a last must be initialized. **This - precondition is enforced with an assertion (see warning).** If - assertions are switched off, a violation of this precondition yields - undefined behavior. - - @pre Range `[first, last)` is valid. Usually, this precondition cannot be - checked efficiently. Only certain edge cases are detected; see the - description of the exceptions below. A violation of this precondition - yields undefined behavior. - - @warning A precondition is enforced with a runtime assertion that will - result in calling `std::abort` if this precondition is not met. - Assertions can be disabled by defining `NDEBUG` at compile time. - See https://en.cppreference.com/w/cpp/error/assert for more - information. - - @throw invalid_iterator.201 if iterators @a first and @a last are not - compatible (i.e., do not belong to the same JSON value). In this case, - the range `[first, last)` is undefined. - @throw invalid_iterator.204 if iterators @a first and @a last belong to a - primitive type (number, boolean, or string), but @a first does not point - to the first element any more. In this case, the range `[first, last)` is - undefined. See example code below. - @throw invalid_iterator.206 if iterators @a first and @a last belong to a - null value. In this case, the range `[first, last)` is undefined. - - @complexity Linear in distance between @a first and @a last. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @liveexample{The example below shows several ways to create JSON values by - specifying a subrange with iterators.,basic_json__InputIt_InputIt} - - @since version 1.0.0 - */ - template < class InputIT, typename std::enable_if < - std::is_same::value || - std::is_same::value, int >::type = 0 > - basic_json(InputIT first, InputIT last) - { - JSON_ASSERT(first.m_object != nullptr); - JSON_ASSERT(last.m_object != nullptr); - - // make sure iterator fits the current value - if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object)) - { - JSON_THROW(invalid_iterator::create(201, "iterators are not compatible", basic_json())); - } - - // copy type from first iterator - m_type = first.m_object->m_type; - - // check if iterator range is complete for primitive values - switch (m_type) - { - case value_t::boolean: - case value_t::number_float: - case value_t::number_integer: - case value_t::number_unsigned: - case value_t::string: - { - if (JSON_HEDLEY_UNLIKELY(!first.m_it.primitive_iterator.is_begin() - || !last.m_it.primitive_iterator.is_end())) - { - JSON_THROW(invalid_iterator::create(204, "iterators out of range", *first.m_object)); - } - break; - } - - default: - break; - } - - switch (m_type) - { - case value_t::number_integer: - { - m_value.number_integer = first.m_object->m_value.number_integer; - break; - } - - case value_t::number_unsigned: - { - m_value.number_unsigned = first.m_object->m_value.number_unsigned; - break; - } - - case value_t::number_float: - { - m_value.number_float = first.m_object->m_value.number_float; - break; - } - - case value_t::boolean: - { - m_value.boolean = first.m_object->m_value.boolean; - break; - } - - case value_t::string: - { - m_value = *first.m_object->m_value.string; - break; - } - - case value_t::object: - { - m_value.object = create(first.m_it.object_iterator, - last.m_it.object_iterator); - break; - } - - case value_t::array: - { - m_value.array = create(first.m_it.array_iterator, - last.m_it.array_iterator); - break; - } - - case value_t::binary: - { - m_value = *first.m_object->m_value.binary; - break; - } - - default: - JSON_THROW(invalid_iterator::create(206, "cannot construct with iterators from " + std::string(first.m_object->type_name()), *first.m_object)); - } - - set_parents(); - assert_invariant(); - } - - - /////////////////////////////////////// - // other constructors and destructor // - /////////////////////////////////////// - - template, - std::is_same>::value, int> = 0 > - basic_json(const JsonRef& ref) : basic_json(ref.moved_or_copied()) {} - - /*! - @brief copy constructor - - Creates a copy of a given JSON value. - - @param[in] other the JSON value to copy - - @post `*this == other` - - @complexity Linear in the size of @a other. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes to any JSON value. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is linear. - - As postcondition, it holds: `other == basic_json(other)`. - - @liveexample{The following code shows an example for the copy - constructor.,basic_json__basic_json} - - @since version 1.0.0 - */ - basic_json(const basic_json& other) - : m_type(other.m_type) - { - // check of passed value is valid - other.assert_invariant(); - - switch (m_type) - { - case value_t::object: - { - m_value = *other.m_value.object; - break; - } - - case value_t::array: - { - m_value = *other.m_value.array; - break; - } - - case value_t::string: - { - m_value = *other.m_value.string; - break; - } - - case value_t::boolean: - { - m_value = other.m_value.boolean; - break; - } - - case value_t::number_integer: - { - m_value = other.m_value.number_integer; - break; - } - - case value_t::number_unsigned: - { - m_value = other.m_value.number_unsigned; - break; - } - - case value_t::number_float: - { - m_value = other.m_value.number_float; - break; - } - - case value_t::binary: - { - m_value = *other.m_value.binary; - break; - } - - default: - break; - } - - set_parents(); - assert_invariant(); - } - - /*! - @brief move constructor - - Move constructor. Constructs a JSON value with the contents of the given - value @a other using move semantics. It "steals" the resources from @a - other and leaves it as JSON null value. - - @param[in,out] other value to move to this object - - @post `*this` has the same value as @a other before the call. - @post @a other is a JSON null value. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this constructor never throws - exceptions. - - @requirement This function helps `basic_json` satisfying the - [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible) - requirements. - - @liveexample{The code below shows the move constructor explicitly called - via std::move.,basic_json__moveconstructor} - - @since version 1.0.0 - */ - basic_json(basic_json&& other) noexcept - : m_type(std::move(other.m_type)), - m_value(std::move(other.m_value)) - { - // check that passed value is valid - other.assert_invariant(false); - - // invalidate payload - other.m_type = value_t::null; - other.m_value = {}; - - set_parents(); - assert_invariant(); - } - - /*! - @brief copy assignment - - Copy assignment operator. Copies a JSON value via the "copy and swap" - strategy: It is expressed in terms of the copy constructor, destructor, - and the `swap()` member function. - - @param[in] other value to copy from - - @complexity Linear. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is linear. - - @liveexample{The code below shows and example for the copy assignment. It - creates a copy of value `a` which is then swapped with `b`. Finally\, the - copy of `a` (which is the null value after the swap) is - destroyed.,basic_json__copyassignment} - - @since version 1.0.0 - */ - basic_json& operator=(basic_json other) noexcept ( - std::is_nothrow_move_constructible::value&& - std::is_nothrow_move_assignable::value&& - std::is_nothrow_move_constructible::value&& - std::is_nothrow_move_assignable::value - ) - { - // check that passed value is valid - other.assert_invariant(); - - using std::swap; - swap(m_type, other.m_type); - swap(m_value, other.m_value); - - set_parents(); - assert_invariant(); - return *this; - } - - /*! - @brief destructor - - Destroys the JSON value and frees all allocated memory. - - @complexity Linear. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is linear. - - All stored elements are destroyed and all memory is freed. - - @since version 1.0.0 - */ - ~basic_json() noexcept - { - assert_invariant(false); - m_value.destroy(m_type); - } - - /// @} - - public: - /////////////////////// - // object inspection // - /////////////////////// - - /// @name object inspection - /// Functions to inspect the type of a JSON value. - /// @{ - - /*! - @brief serialization - - Serialization function for JSON values. The function tries to mimic - Python's `json.dumps()` function, and currently supports its @a indent - and @a ensure_ascii parameters. - - @param[in] indent If indent is nonnegative, then array elements and object - members will be pretty-printed with that indent level. An indent level of - `0` will only insert newlines. `-1` (the default) selects the most compact - representation. - @param[in] indent_char The character to use for indentation if @a indent is - greater than `0`. The default is ` ` (space). - @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters - in the output are escaped with `\uXXXX` sequences, and the result consists - of ASCII characters only. - @param[in] error_handler how to react on decoding errors; there are three - possible values: `strict` (throws and exception in case a decoding error - occurs; default), `replace` (replace invalid UTF-8 sequences with U+FFFD), - and `ignore` (ignore invalid UTF-8 sequences during serialization; all - bytes are copied to the output unchanged). - - @return string containing the serialization of the JSON value - - @throw type_error.316 if a string stored inside the JSON value is not - UTF-8 encoded and @a error_handler is set to strict - - @note Binary values are serialized as object containing two keys: - - "bytes": an array of bytes as integers - - "subtype": the subtype as integer or "null" if the binary has no subtype - - @complexity Linear. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes in the JSON value. - - @liveexample{The following example shows the effect of different @a indent\, - @a indent_char\, and @a ensure_ascii parameters to the result of the - serialization.,dump} - - @see https://docs.python.org/2/library/json.html#json.dump - - @since version 1.0.0; indentation character @a indent_char, option - @a ensure_ascii and exceptions added in version 3.0.0; error - handlers added in version 3.4.0; serialization of binary values added - in version 3.8.0. - */ - string_t dump(const int indent = -1, - const char indent_char = ' ', - const bool ensure_ascii = false, - const error_handler_t error_handler = error_handler_t::strict) const - { - string_t result; - serializer s(detail::output_adapter(result), indent_char, error_handler); - - if (indent >= 0) - { - s.dump(*this, true, ensure_ascii, static_cast(indent)); - } - else - { - s.dump(*this, false, ensure_ascii, 0); - } - - return result; - } - - /*! - @brief return the type of the JSON value (explicit) - - Return the type of the JSON value as a value from the @ref value_t - enumeration. - - @return the type of the JSON value - Value type | return value - ------------------------- | ------------------------- - null | value_t::null - boolean | value_t::boolean - string | value_t::string - number (integer) | value_t::number_integer - number (unsigned integer) | value_t::number_unsigned - number (floating-point) | value_t::number_float - object | value_t::object - array | value_t::array - binary | value_t::binary - discarded | value_t::discarded - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `type()` for all JSON - types.,type} - - @sa @ref operator value_t() -- return the type of the JSON value (implicit) - @sa @ref type_name() -- return the type as string - - @since version 1.0.0 - */ - constexpr value_t type() const noexcept - { - return m_type; - } - - /*! - @brief return whether type is primitive - - This function returns true if and only if the JSON type is primitive - (string, number, boolean, or null). - - @return `true` if type is primitive (string, number, boolean, or null), - `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_primitive()` for all JSON - types.,is_primitive} - - @sa @ref is_structured() -- returns whether JSON value is structured - @sa @ref is_null() -- returns whether JSON value is `null` - @sa @ref is_string() -- returns whether JSON value is a string - @sa @ref is_boolean() -- returns whether JSON value is a boolean - @sa @ref is_number() -- returns whether JSON value is a number - @sa @ref is_binary() -- returns whether JSON value is a binary array - - @since version 1.0.0 - */ - constexpr bool is_primitive() const noexcept - { - return is_null() || is_string() || is_boolean() || is_number() || is_binary(); - } - - /*! - @brief return whether type is structured - - This function returns true if and only if the JSON type is structured - (array or object). - - @return `true` if type is structured (array or object), `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_structured()` for all JSON - types.,is_structured} - - @sa @ref is_primitive() -- returns whether value is primitive - @sa @ref is_array() -- returns whether value is an array - @sa @ref is_object() -- returns whether value is an object - - @since version 1.0.0 - */ - constexpr bool is_structured() const noexcept - { - return is_array() || is_object(); - } - - /*! - @brief return whether value is null - - This function returns true if and only if the JSON value is null. - - @return `true` if type is null, `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_null()` for all JSON - types.,is_null} - - @since version 1.0.0 - */ - constexpr bool is_null() const noexcept - { - return m_type == value_t::null; - } - - /*! - @brief return whether value is a boolean - - This function returns true if and only if the JSON value is a boolean. - - @return `true` if type is boolean, `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_boolean()` for all JSON - types.,is_boolean} - - @since version 1.0.0 - */ - constexpr bool is_boolean() const noexcept - { - return m_type == value_t::boolean; - } - - /*! - @brief return whether value is a number - - This function returns true if and only if the JSON value is a number. This - includes both integer (signed and unsigned) and floating-point values. - - @return `true` if type is number (regardless whether integer, unsigned - integer or floating-type), `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_number()` for all JSON - types.,is_number} - - @sa @ref is_number_integer() -- check if value is an integer or unsigned - integer number - @sa @ref is_number_unsigned() -- check if value is an unsigned integer - number - @sa @ref is_number_float() -- check if value is a floating-point number - - @since version 1.0.0 - */ - constexpr bool is_number() const noexcept - { - return is_number_integer() || is_number_float(); - } - - /*! - @brief return whether value is an integer number - - This function returns true if and only if the JSON value is a signed or - unsigned integer number. This excludes floating-point values. - - @return `true` if type is an integer or unsigned integer number, `false` - otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_number_integer()` for all - JSON types.,is_number_integer} - - @sa @ref is_number() -- check if value is a number - @sa @ref is_number_unsigned() -- check if value is an unsigned integer - number - @sa @ref is_number_float() -- check if value is a floating-point number - - @since version 1.0.0 - */ - constexpr bool is_number_integer() const noexcept - { - return m_type == value_t::number_integer || m_type == value_t::number_unsigned; - } - - /*! - @brief return whether value is an unsigned integer number - - This function returns true if and only if the JSON value is an unsigned - integer number. This excludes floating-point and signed integer values. - - @return `true` if type is an unsigned integer number, `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_number_unsigned()` for all - JSON types.,is_number_unsigned} - - @sa @ref is_number() -- check if value is a number - @sa @ref is_number_integer() -- check if value is an integer or unsigned - integer number - @sa @ref is_number_float() -- check if value is a floating-point number - - @since version 2.0.0 - */ - constexpr bool is_number_unsigned() const noexcept - { - return m_type == value_t::number_unsigned; - } - - /*! - @brief return whether value is a floating-point number - - This function returns true if and only if the JSON value is a - floating-point number. This excludes signed and unsigned integer values. - - @return `true` if type is a floating-point number, `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_number_float()` for all - JSON types.,is_number_float} - - @sa @ref is_number() -- check if value is number - @sa @ref is_number_integer() -- check if value is an integer number - @sa @ref is_number_unsigned() -- check if value is an unsigned integer - number - - @since version 1.0.0 - */ - constexpr bool is_number_float() const noexcept - { - return m_type == value_t::number_float; - } - - /*! - @brief return whether value is an object - - This function returns true if and only if the JSON value is an object. - - @return `true` if type is object, `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_object()` for all JSON - types.,is_object} - - @since version 1.0.0 - */ - constexpr bool is_object() const noexcept - { - return m_type == value_t::object; - } - - /*! - @brief return whether value is an array - - This function returns true if and only if the JSON value is an array. - - @return `true` if type is array, `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_array()` for all JSON - types.,is_array} - - @since version 1.0.0 - */ - constexpr bool is_array() const noexcept - { - return m_type == value_t::array; - } - - /*! - @brief return whether value is a string - - This function returns true if and only if the JSON value is a string. - - @return `true` if type is string, `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_string()` for all JSON - types.,is_string} - - @since version 1.0.0 - */ - constexpr bool is_string() const noexcept - { - return m_type == value_t::string; - } - - /*! - @brief return whether value is a binary array - - This function returns true if and only if the JSON value is a binary array. - - @return `true` if type is binary array, `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_binary()` for all JSON - types.,is_binary} - - @since version 3.8.0 - */ - constexpr bool is_binary() const noexcept - { - return m_type == value_t::binary; - } - - /*! - @brief return whether value is discarded - - This function returns true if and only if the JSON value was discarded - during parsing with a callback function (see @ref parser_callback_t). - - @note This function will always be `false` for JSON values after parsing. - That is, discarded values can only occur during parsing, but will be - removed when inside a structured value or replaced by null in other cases. - - @return `true` if type is discarded, `false` otherwise. - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies `is_discarded()` for all JSON - types.,is_discarded} - - @since version 1.0.0 - */ - constexpr bool is_discarded() const noexcept - { - return m_type == value_t::discarded; - } - - /*! - @brief return the type of the JSON value (implicit) - - Implicitly return the type of the JSON value as a value from the @ref - value_t enumeration. - - @return the type of the JSON value - - @complexity Constant. - - @exceptionsafety No-throw guarantee: this member function never throws - exceptions. - - @liveexample{The following code exemplifies the @ref value_t operator for - all JSON types.,operator__value_t} - - @sa @ref type() -- return the type of the JSON value (explicit) - @sa @ref type_name() -- return the type as string - - @since version 1.0.0 - */ - constexpr operator value_t() const noexcept - { - return m_type; - } - - /// @} - - private: - ////////////////// - // value access // - ////////////////// - - /// get a boolean (explicit) - boolean_t get_impl(boolean_t* /*unused*/) const - { - if (JSON_HEDLEY_LIKELY(is_boolean())) - { - return m_value.boolean; - } - - JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(type_name()), *this)); - } - - /// get a pointer to the value (object) - object_t* get_impl_ptr(object_t* /*unused*/) noexcept - { - return is_object() ? m_value.object : nullptr; - } - - /// get a pointer to the value (object) - constexpr const object_t* get_impl_ptr(const object_t* /*unused*/) const noexcept - { - return is_object() ? m_value.object : nullptr; - } - - /// get a pointer to the value (array) - array_t* get_impl_ptr(array_t* /*unused*/) noexcept - { - return is_array() ? m_value.array : nullptr; - } - - /// get a pointer to the value (array) - constexpr const array_t* get_impl_ptr(const array_t* /*unused*/) const noexcept - { - return is_array() ? m_value.array : nullptr; - } - - /// get a pointer to the value (string) - string_t* get_impl_ptr(string_t* /*unused*/) noexcept - { - return is_string() ? m_value.string : nullptr; - } - - /// get a pointer to the value (string) - constexpr const string_t* get_impl_ptr(const string_t* /*unused*/) const noexcept - { - return is_string() ? m_value.string : nullptr; - } - - /// get a pointer to the value (boolean) - boolean_t* get_impl_ptr(boolean_t* /*unused*/) noexcept - { - return is_boolean() ? &m_value.boolean : nullptr; - } - - /// get a pointer to the value (boolean) - constexpr const boolean_t* get_impl_ptr(const boolean_t* /*unused*/) const noexcept - { - return is_boolean() ? &m_value.boolean : nullptr; - } - - /// get a pointer to the value (integer number) - number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept - { - return is_number_integer() ? &m_value.number_integer : nullptr; - } - - /// get a pointer to the value (integer number) - constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /*unused*/) const noexcept - { - return is_number_integer() ? &m_value.number_integer : nullptr; - } - - /// get a pointer to the value (unsigned number) - number_unsigned_t* get_impl_ptr(number_unsigned_t* /*unused*/) noexcept - { - return is_number_unsigned() ? &m_value.number_unsigned : nullptr; - } - - /// get a pointer to the value (unsigned number) - constexpr const number_unsigned_t* get_impl_ptr(const number_unsigned_t* /*unused*/) const noexcept - { - return is_number_unsigned() ? &m_value.number_unsigned : nullptr; - } - - /// get a pointer to the value (floating-point number) - number_float_t* get_impl_ptr(number_float_t* /*unused*/) noexcept - { - return is_number_float() ? &m_value.number_float : nullptr; - } - - /// get a pointer to the value (floating-point number) - constexpr const number_float_t* get_impl_ptr(const number_float_t* /*unused*/) const noexcept - { - return is_number_float() ? &m_value.number_float : nullptr; - } - - /// get a pointer to the value (binary) - binary_t* get_impl_ptr(binary_t* /*unused*/) noexcept - { - return is_binary() ? m_value.binary : nullptr; - } - - /// get a pointer to the value (binary) - constexpr const binary_t* get_impl_ptr(const binary_t* /*unused*/) const noexcept - { - return is_binary() ? m_value.binary : nullptr; - } - - /*! - @brief helper function to implement get_ref() - - This function helps to implement get_ref() without code duplication for - const and non-const overloads - - @tparam ThisType will be deduced as `basic_json` or `const basic_json` - - @throw type_error.303 if ReferenceType does not match underlying value - type of the current JSON - */ - template - static ReferenceType get_ref_impl(ThisType& obj) - { - // delegate the call to get_ptr<>() - auto ptr = obj.template get_ptr::type>(); - - if (JSON_HEDLEY_LIKELY(ptr != nullptr)) - { - return *ptr; - } - - JSON_THROW(type_error::create(303, "incompatible ReferenceType for get_ref, actual type is " + std::string(obj.type_name()), obj)); - } - - public: - /// @name value access - /// Direct access to the stored value of a JSON value. - /// @{ - - /*! - @brief get special-case overload - - This overloads avoids a lot of template boilerplate, it can be seen as the - identity method - - @tparam BasicJsonType == @ref basic_json - - @return a copy of *this - - @complexity Constant. - - @since version 2.1.0 - */ - template::type, basic_json_t>::value, - int> = 0> - basic_json get() const - { - return *this; - } - - /*! - @brief get special-case overload - - This overloads converts the current @ref basic_json in a different - @ref basic_json type - - @tparam BasicJsonType == @ref basic_json - - @return a copy of *this, converted into @tparam BasicJsonType - - @complexity Depending on the implementation of the called `from_json()` - method. - - @since version 3.2.0 - */ - template < typename BasicJsonType, detail::enable_if_t < - !std::is_same::value&& - detail::is_basic_json::value, int > = 0 > - BasicJsonType get() const - { - return *this; - } - - /*! - @brief get a value (explicit) - - Explicit type conversion between the JSON value and a compatible value - which is [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible) - and [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible). - The value is converted by calling the @ref json_serializer - `from_json()` method. - - The function is equivalent to executing - @code {.cpp} - ValueType ret; - JSONSerializer::from_json(*this, ret); - return ret; - @endcode - - This overloads is chosen if: - - @a ValueType is not @ref basic_json, - - @ref json_serializer has a `from_json()` method of the form - `void from_json(const basic_json&, ValueType&)`, and - - @ref json_serializer does not have a `from_json()` method of - the form `ValueType from_json(const basic_json&)` - - @tparam ValueTypeCV the provided value type - @tparam ValueType the returned value type - - @return copy of the JSON value, converted to @a ValueType - - @throw what @ref json_serializer `from_json()` method throws - - @liveexample{The example below shows several conversions from JSON values - to other types. There a few things to note: (1) Floating-point numbers can - be converted to integers\, (2) A JSON array can be converted to a standard - `std::vector`\, (3) A JSON object can be converted to C++ - associative containers such as `std::unordered_map`.,get__ValueType_const} - - @since version 2.1.0 - */ - template < typename ValueTypeCV, typename ValueType = detail::uncvref_t, - detail::enable_if_t < - !detail::is_basic_json::value && - detail::has_from_json::value && - !detail::has_non_default_from_json::value, - int > = 0 > - ValueType get() const noexcept(noexcept( - JSONSerializer::from_json(std::declval(), std::declval()))) - { - // we cannot static_assert on ValueTypeCV being non-const, because - // there is support for get(), which is why we - // still need the uncvref - static_assert(!std::is_reference::value, - "get() cannot be used with reference types, you might want to use get_ref()"); - static_assert(std::is_default_constructible::value, - "types must be DefaultConstructible when used with get()"); - - ValueType ret; - JSONSerializer::from_json(*this, ret); - return ret; - } - - /*! - @brief get a value (explicit); special case - - Explicit type conversion between the JSON value and a compatible value - which is **not** [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible) - and **not** [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible). - The value is converted by calling the @ref json_serializer - `from_json()` method. - - The function is equivalent to executing - @code {.cpp} - return JSONSerializer::from_json(*this); - @endcode - - This overloads is chosen if: - - @a ValueType is not @ref basic_json and - - @ref json_serializer has a `from_json()` method of the form - `ValueType from_json(const basic_json&)` - - @note If @ref json_serializer has both overloads of - `from_json()`, this one is chosen. - - @tparam ValueTypeCV the provided value type - @tparam ValueType the returned value type - - @return copy of the JSON value, converted to @a ValueType - - @throw what @ref json_serializer `from_json()` method throws - - @since version 2.1.0 - */ - template < typename ValueTypeCV, typename ValueType = detail::uncvref_t, - detail::enable_if_t < !std::is_same::value && - detail::has_non_default_from_json::value, - int > = 0 > - ValueType get() const noexcept(noexcept( - JSONSerializer::from_json(std::declval()))) - { - static_assert(!std::is_reference::value, - "get() cannot be used with reference types, you might want to use get_ref()"); - return JSONSerializer::from_json(*this); - } - - /*! - @brief get a value (explicit) - - Explicit type conversion between the JSON value and a compatible value. - The value is filled into the input parameter by calling the @ref json_serializer - `from_json()` method. - - The function is equivalent to executing - @code {.cpp} - ValueType v; - JSONSerializer::from_json(*this, v); - @endcode - - This overloads is chosen if: - - @a ValueType is not @ref basic_json, - - @ref json_serializer has a `from_json()` method of the form - `void from_json(const basic_json&, ValueType&)`, and - - @tparam ValueType the input parameter type. - - @return the input parameter, allowing chaining calls. - - @throw what @ref json_serializer `from_json()` method throws - - @liveexample{The example below shows several conversions from JSON values - to other types. There a few things to note: (1) Floating-point numbers can - be converted to integers\, (2) A JSON array can be converted to a standard - `std::vector`\, (3) A JSON object can be converted to C++ - associative containers such as `std::unordered_map`.,get_to} - - @since version 3.3.0 - */ - template < typename ValueType, - detail::enable_if_t < - !detail::is_basic_json::value&& - detail::has_from_json::value, - int > = 0 > - ValueType & get_to(ValueType& v) const noexcept(noexcept( - JSONSerializer::from_json(std::declval(), v))) - { - JSONSerializer::from_json(*this, v); - return v; - } - - // specialization to allow to call get_to with a basic_json value - // see https://github.com/nlohmann/json/issues/2175 - template::value, - int> = 0> - ValueType & get_to(ValueType& v) const - { - v = *this; - return v; - } - - template < - typename T, std::size_t N, - typename Array = T (&)[N], - detail::enable_if_t < - detail::has_from_json::value, int > = 0 > - Array get_to(T (&v)[N]) const - noexcept(noexcept(JSONSerializer::from_json( - std::declval(), v))) - { - JSONSerializer::from_json(*this, v); - return v; - } - - - /*! - @brief get a pointer value (implicit) - - Implicit pointer access to the internally stored JSON value. No copies are - made. - - @warning Writing data to the pointee of the result yields an undefined - state. - - @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref - object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, - @ref number_unsigned_t, or @ref number_float_t. Enforced by a static - assertion. - - @return pointer to the internally stored JSON value if the requested - pointer type @a PointerType fits to the JSON value; `nullptr` otherwise - - @complexity Constant. - - @liveexample{The example below shows how pointers to internal values of a - JSON value can be requested. Note that no type conversions are made and a - `nullptr` is returned if the value and the requested pointer type does not - match.,get_ptr} - - @since version 1.0.0 - */ - template::value, int>::type = 0> - auto get_ptr() noexcept -> decltype(std::declval().get_impl_ptr(std::declval())) - { - // delegate the call to get_impl_ptr<>() - return get_impl_ptr(static_cast(nullptr)); - } - - /*! - @brief get a pointer value (implicit) - @copydoc get_ptr() - */ - template < typename PointerType, typename std::enable_if < - std::is_pointer::value&& - std::is_const::type>::value, int >::type = 0 > - constexpr auto get_ptr() const noexcept -> decltype(std::declval().get_impl_ptr(std::declval())) - { - // delegate the call to get_impl_ptr<>() const - return get_impl_ptr(static_cast(nullptr)); - } - - /*! - @brief get a pointer value (explicit) - - Explicit pointer access to the internally stored JSON value. No copies are - made. - - @warning The pointer becomes invalid if the underlying JSON object - changes. - - @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref - object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, - @ref number_unsigned_t, or @ref number_float_t. - - @return pointer to the internally stored JSON value if the requested - pointer type @a PointerType fits to the JSON value; `nullptr` otherwise - - @complexity Constant. - - @liveexample{The example below shows how pointers to internal values of a - JSON value can be requested. Note that no type conversions are made and a - `nullptr` is returned if the value and the requested pointer type does not - match.,get__PointerType} - - @sa @ref get_ptr() for explicit pointer-member access - - @since version 1.0.0 - */ - template::value, int>::type = 0> - auto get() noexcept -> decltype(std::declval().template get_ptr()) - { - // delegate the call to get_ptr - return get_ptr(); - } - - /*! - @brief get a pointer value (explicit) - @copydoc get() - */ - template::value, int>::type = 0> - constexpr auto get() const noexcept -> decltype(std::declval().template get_ptr()) - { - // delegate the call to get_ptr - return get_ptr(); - } - - /*! - @brief get a reference value (implicit) - - Implicit reference access to the internally stored JSON value. No copies - are made. - - @warning Writing data to the referee of the result yields an undefined - state. - - @tparam ReferenceType reference type; must be a reference to @ref array_t, - @ref object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, or - @ref number_float_t. Enforced by static assertion. - - @return reference to the internally stored JSON value if the requested - reference type @a ReferenceType fits to the JSON value; throws - type_error.303 otherwise - - @throw type_error.303 in case passed type @a ReferenceType is incompatible - with the stored JSON value; see example below - - @complexity Constant. - - @liveexample{The example shows several calls to `get_ref()`.,get_ref} - - @since version 1.1.0 - */ - template::value, int>::type = 0> - ReferenceType get_ref() - { - // delegate call to get_ref_impl - return get_ref_impl(*this); - } - - /*! - @brief get a reference value (implicit) - @copydoc get_ref() - */ - template < typename ReferenceType, typename std::enable_if < - std::is_reference::value&& - std::is_const::type>::value, int >::type = 0 > - ReferenceType get_ref() const - { - // delegate call to get_ref_impl - return get_ref_impl(*this); - } - - /*! - @brief get a value (implicit) - - Implicit type conversion between the JSON value and a compatible value. - The call is realized by calling @ref get() const. - - @tparam ValueType non-pointer type compatible to the JSON value, for - instance `int` for JSON integer numbers, `bool` for JSON booleans, or - `std::vector` types for JSON arrays. The character type of @ref string_t - as well as an initializer list of this type is excluded to avoid - ambiguities as these types implicitly convert to `std::string`. - - @return copy of the JSON value, converted to type @a ValueType - - @throw type_error.302 in case passed type @a ValueType is incompatible - to the JSON value type (e.g., the JSON value is of type boolean, but a - string is requested); see example below - - @complexity Linear in the size of the JSON value. - - @liveexample{The example below shows several conversions from JSON values - to other types. There a few things to note: (1) Floating-point numbers can - be converted to integers\, (2) A JSON array can be converted to a standard - `std::vector`\, (3) A JSON object can be converted to C++ - associative containers such as `std::unordered_map`.,operator__ValueType} - - @since version 1.0.0 - */ - template < typename ValueType, typename std::enable_if < - !std::is_pointer::value&& - !std::is_same>::value&& - !std::is_same::value&& - !detail::is_basic_json::value - && !std::is_same>::value -#if defined(JSON_HAS_CPP_17) && (defined(__GNUC__) || (defined(_MSC_VER) && _MSC_VER >= 1910 && _MSC_VER <= 1914)) - && !std::is_same::value -#endif - && detail::is_detected::value - , int >::type = 0 > - JSON_EXPLICIT operator ValueType() const - { - // delegate the call to get<>() const - return get(); - } - - /*! - @return reference to the binary value - - @throw type_error.302 if the value is not binary - - @sa @ref is_binary() to check if the value is binary - - @since version 3.8.0 - */ - binary_t& get_binary() - { - if (!is_binary()) - { - JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(type_name()), *this)); - } - - return *get_ptr(); - } - - /// @copydoc get_binary() - const binary_t& get_binary() const - { - if (!is_binary()) - { - JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(type_name()), *this)); - } - - return *get_ptr(); - } - - /// @} - - - //////////////////// - // element access // - //////////////////// - - /// @name element access - /// Access to the JSON value. - /// @{ - - /*! - @brief access specified array element with bounds checking - - Returns a reference to the element at specified location @a idx, with - bounds checking. - - @param[in] idx index of the element to access - - @return reference to the element at index @a idx - - @throw type_error.304 if the JSON value is not an array; in this case, - calling `at` with an index makes no sense. See example below. - @throw out_of_range.401 if the index @a idx is out of range of the array; - that is, `idx >= size()`. See example below. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes in the JSON value. - - @complexity Constant. - - @since version 1.0.0 - - @liveexample{The example below shows how array elements can be read and - written using `at()`. It also demonstrates the different exceptions that - can be thrown.,at__size_type} - */ - reference at(size_type idx) - { - // at only works for arrays - if (JSON_HEDLEY_LIKELY(is_array())) - { - JSON_TRY - { - return set_parent(m_value.array->at(idx)); - } - JSON_CATCH (std::out_of_range&) - { - // create better exception explanation - JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range", *this)); - } - } - else - { - JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()), *this)); - } - } - - /*! - @brief access specified array element with bounds checking - - Returns a const reference to the element at specified location @a idx, - with bounds checking. - - @param[in] idx index of the element to access - - @return const reference to the element at index @a idx - - @throw type_error.304 if the JSON value is not an array; in this case, - calling `at` with an index makes no sense. See example below. - @throw out_of_range.401 if the index @a idx is out of range of the array; - that is, `idx >= size()`. See example below. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes in the JSON value. - - @complexity Constant. - - @since version 1.0.0 - - @liveexample{The example below shows how array elements can be read using - `at()`. It also demonstrates the different exceptions that can be thrown., - at__size_type_const} - */ - const_reference at(size_type idx) const - { - // at only works for arrays - if (JSON_HEDLEY_LIKELY(is_array())) - { - JSON_TRY - { - return m_value.array->at(idx); - } - JSON_CATCH (std::out_of_range&) - { - // create better exception explanation - JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range", *this)); - } - } - else - { - JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()), *this)); - } - } - - /*! - @brief access specified object element with bounds checking - - Returns a reference to the element at with specified key @a key, with - bounds checking. - - @param[in] key key of the element to access - - @return reference to the element at key @a key - - @throw type_error.304 if the JSON value is not an object; in this case, - calling `at` with a key makes no sense. See example below. - @throw out_of_range.403 if the key @a key is is not stored in the object; - that is, `find(key) == end()`. See example below. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes in the JSON value. - - @complexity Logarithmic in the size of the container. - - @sa @ref operator[](const typename object_t::key_type&) for unchecked - access by reference - @sa @ref value() for access by value with a default value - - @since version 1.0.0 - - @liveexample{The example below shows how object elements can be read and - written using `at()`. It also demonstrates the different exceptions that - can be thrown.,at__object_t_key_type} - */ - reference at(const typename object_t::key_type& key) - { - // at only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - JSON_TRY - { - return set_parent(m_value.object->at(key)); - } - JSON_CATCH (std::out_of_range&) - { - // create better exception explanation - JSON_THROW(out_of_range::create(403, "key '" + key + "' not found", *this)); - } - } - else - { - JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()), *this)); - } - } - - /*! - @brief access specified object element with bounds checking - - Returns a const reference to the element at with specified key @a key, - with bounds checking. - - @param[in] key key of the element to access - - @return const reference to the element at key @a key - - @throw type_error.304 if the JSON value is not an object; in this case, - calling `at` with a key makes no sense. See example below. - @throw out_of_range.403 if the key @a key is is not stored in the object; - that is, `find(key) == end()`. See example below. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes in the JSON value. - - @complexity Logarithmic in the size of the container. - - @sa @ref operator[](const typename object_t::key_type&) for unchecked - access by reference - @sa @ref value() for access by value with a default value - - @since version 1.0.0 - - @liveexample{The example below shows how object elements can be read using - `at()`. It also demonstrates the different exceptions that can be thrown., - at__object_t_key_type_const} - */ - const_reference at(const typename object_t::key_type& key) const - { - // at only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - JSON_TRY - { - return m_value.object->at(key); - } - JSON_CATCH (std::out_of_range&) - { - // create better exception explanation - JSON_THROW(out_of_range::create(403, "key '" + key + "' not found", *this)); - } - } - else - { - JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name()), *this)); - } - } - - /*! - @brief access specified array element - - Returns a reference to the element at specified location @a idx. - - @note If @a idx is beyond the range of the array (i.e., `idx >= size()`), - then the array is silently filled up with `null` values to make `idx` a - valid reference to the last stored element. - - @param[in] idx index of the element to access - - @return reference to the element at index @a idx - - @throw type_error.305 if the JSON value is not an array or null; in that - cases, using the [] operator with an index makes no sense. - - @complexity Constant if @a idx is in the range of the array. Otherwise - linear in `idx - size()`. - - @liveexample{The example below shows how array elements can be read and - written using `[]` operator. Note the addition of `null` - values.,operatorarray__size_type} - - @since version 1.0.0 - */ - reference operator[](size_type idx) - { - // implicitly convert null value to an empty array - if (is_null()) - { - m_type = value_t::array; - m_value.array = create(); - assert_invariant(); - } - - // operator[] only works for arrays - if (JSON_HEDLEY_LIKELY(is_array())) - { - // fill up array with null values if given idx is outside range - if (idx >= m_value.array->size()) - { -#if JSON_DIAGNOSTICS - // remember array size before resizing - const auto previous_size = m_value.array->size(); -#endif - m_value.array->resize(idx + 1); - -#if JSON_DIAGNOSTICS - // set parent for values added above - set_parents(begin() + static_cast(previous_size), static_cast(idx + 1 - previous_size)); -#endif - } - - return m_value.array->operator[](idx); - } - - JSON_THROW(type_error::create(305, "cannot use operator[] with a numeric argument with " + std::string(type_name()), *this)); - } - - /*! - @brief access specified array element - - Returns a const reference to the element at specified location @a idx. - - @param[in] idx index of the element to access - - @return const reference to the element at index @a idx - - @throw type_error.305 if the JSON value is not an array; in that case, - using the [] operator with an index makes no sense. - - @complexity Constant. - - @liveexample{The example below shows how array elements can be read using - the `[]` operator.,operatorarray__size_type_const} - - @since version 1.0.0 - */ - const_reference operator[](size_type idx) const - { - // const operator[] only works for arrays - if (JSON_HEDLEY_LIKELY(is_array())) - { - return m_value.array->operator[](idx); - } - - JSON_THROW(type_error::create(305, "cannot use operator[] with a numeric argument with " + std::string(type_name()), *this)); - } - - /*! - @brief access specified object element - - Returns a reference to the element at with specified key @a key. - - @note If @a key is not found in the object, then it is silently added to - the object and filled with a `null` value to make `key` a valid reference. - In case the value was `null` before, it is converted to an object. - - @param[in] key key of the element to access - - @return reference to the element at key @a key - - @throw type_error.305 if the JSON value is not an object or null; in that - cases, using the [] operator with a key makes no sense. - - @complexity Logarithmic in the size of the container. - - @liveexample{The example below shows how object elements can be read and - written using the `[]` operator.,operatorarray__key_type} - - @sa @ref at(const typename object_t::key_type&) for access by reference - with range checking - @sa @ref value() for access by value with a default value - - @since version 1.0.0 - */ - reference operator[](const typename object_t::key_type& key) - { - // implicitly convert null value to an empty object - if (is_null()) - { - m_type = value_t::object; - m_value.object = create(); - assert_invariant(); - } - - // operator[] only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - return set_parent(m_value.object->operator[](key)); - } - - JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name()), *this)); - } - - /*! - @brief read-only access specified object element - - Returns a const reference to the element at with specified key @a key. No - bounds checking is performed. - - @warning If the element with key @a key does not exist, the behavior is - undefined. - - @param[in] key key of the element to access - - @return const reference to the element at key @a key - - @pre The element with key @a key must exist. **This precondition is - enforced with an assertion.** - - @throw type_error.305 if the JSON value is not an object; in that case, - using the [] operator with a key makes no sense. - - @complexity Logarithmic in the size of the container. - - @liveexample{The example below shows how object elements can be read using - the `[]` operator.,operatorarray__key_type_const} - - @sa @ref at(const typename object_t::key_type&) for access by reference - with range checking - @sa @ref value() for access by value with a default value - - @since version 1.0.0 - */ - const_reference operator[](const typename object_t::key_type& key) const - { - // const operator[] only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - JSON_ASSERT(m_value.object->find(key) != m_value.object->end()); - return m_value.object->find(key)->second; - } - - JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name()), *this)); - } - - /*! - @brief access specified object element - - Returns a reference to the element at with specified key @a key. - - @note If @a key is not found in the object, then it is silently added to - the object and filled with a `null` value to make `key` a valid reference. - In case the value was `null` before, it is converted to an object. - - @param[in] key key of the element to access - - @return reference to the element at key @a key - - @throw type_error.305 if the JSON value is not an object or null; in that - cases, using the [] operator with a key makes no sense. - - @complexity Logarithmic in the size of the container. - - @liveexample{The example below shows how object elements can be read and - written using the `[]` operator.,operatorarray__key_type} - - @sa @ref at(const typename object_t::key_type&) for access by reference - with range checking - @sa @ref value() for access by value with a default value - - @since version 1.1.0 - */ - template - JSON_HEDLEY_NON_NULL(2) - reference operator[](T* key) - { - // implicitly convert null to object - if (is_null()) - { - m_type = value_t::object; - m_value = value_t::object; - assert_invariant(); - } - - // at only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - return set_parent(m_value.object->operator[](key)); - } - - JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name()), *this)); - } - - /*! - @brief read-only access specified object element - - Returns a const reference to the element at with specified key @a key. No - bounds checking is performed. - - @warning If the element with key @a key does not exist, the behavior is - undefined. - - @param[in] key key of the element to access - - @return const reference to the element at key @a key - - @pre The element with key @a key must exist. **This precondition is - enforced with an assertion.** - - @throw type_error.305 if the JSON value is not an object; in that case, - using the [] operator with a key makes no sense. - - @complexity Logarithmic in the size of the container. - - @liveexample{The example below shows how object elements can be read using - the `[]` operator.,operatorarray__key_type_const} - - @sa @ref at(const typename object_t::key_type&) for access by reference - with range checking - @sa @ref value() for access by value with a default value - - @since version 1.1.0 - */ - template - JSON_HEDLEY_NON_NULL(2) - const_reference operator[](T* key) const - { - // at only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - JSON_ASSERT(m_value.object->find(key) != m_value.object->end()); - return m_value.object->find(key)->second; - } - - JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name()), *this)); - } - - /*! - @brief access specified object element with default value - - Returns either a copy of an object's element at the specified key @a key - or a given default value if no element with key @a key exists. - - The function is basically equivalent to executing - @code {.cpp} - try { - return at(key); - } catch(out_of_range) { - return default_value; - } - @endcode - - @note Unlike @ref at(const typename object_t::key_type&), this function - does not throw if the given key @a key was not found. - - @note Unlike @ref operator[](const typename object_t::key_type& key), this - function does not implicitly add an element to the position defined by @a - key. This function is furthermore also applicable to const objects. - - @param[in] key key of the element to access - @param[in] default_value the value to return if @a key is not found - - @tparam ValueType type compatible to JSON values, for instance `int` for - JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for - JSON arrays. Note the type of the expected value at @a key and the default - value @a default_value must be compatible. - - @return copy of the element at key @a key or @a default_value if @a key - is not found - - @throw type_error.302 if @a default_value does not match the type of the - value at @a key - @throw type_error.306 if the JSON value is not an object; in that case, - using `value()` with a key makes no sense. - - @complexity Logarithmic in the size of the container. - - @liveexample{The example below shows how object elements can be queried - with a default value.,basic_json__value} - - @sa @ref at(const typename object_t::key_type&) for access by reference - with range checking - @sa @ref operator[](const typename object_t::key_type&) for unchecked - access by reference - - @since version 1.0.0 - */ - // using std::is_convertible in a std::enable_if will fail when using explicit conversions - template < class ValueType, typename std::enable_if < - detail::is_getable::value - && !std::is_same::value, int >::type = 0 > - ValueType value(const typename object_t::key_type& key, const ValueType& default_value) const - { - // at only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - // if key is found, return value and given default value otherwise - const auto it = find(key); - if (it != end()) - { - return it->template get(); - } - - return default_value; - } - - JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name()), *this)); - } - - /*! - @brief overload for a default value of type const char* - @copydoc basic_json::value(const typename object_t::key_type&, const ValueType&) const - */ - string_t value(const typename object_t::key_type& key, const char* default_value) const - { - return value(key, string_t(default_value)); - } - - /*! - @brief access specified object element via JSON Pointer with default value - - Returns either a copy of an object's element at the specified key @a key - or a given default value if no element with key @a key exists. - - The function is basically equivalent to executing - @code {.cpp} - try { - return at(ptr); - } catch(out_of_range) { - return default_value; - } - @endcode - - @note Unlike @ref at(const json_pointer&), this function does not throw - if the given key @a key was not found. - - @param[in] ptr a JSON pointer to the element to access - @param[in] default_value the value to return if @a ptr found no value - - @tparam ValueType type compatible to JSON values, for instance `int` for - JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for - JSON arrays. Note the type of the expected value at @a key and the default - value @a default_value must be compatible. - - @return copy of the element at key @a key or @a default_value if @a key - is not found - - @throw type_error.302 if @a default_value does not match the type of the - value at @a ptr - @throw type_error.306 if the JSON value is not an object; in that case, - using `value()` with a key makes no sense. - - @complexity Logarithmic in the size of the container. - - @liveexample{The example below shows how object elements can be queried - with a default value.,basic_json__value_ptr} - - @sa @ref operator[](const json_pointer&) for unchecked access by reference - - @since version 2.0.2 - */ - template::value, int>::type = 0> - ValueType value(const json_pointer& ptr, const ValueType& default_value) const - { - // at only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - // if pointer resolves a value, return it or use default value - JSON_TRY - { - return ptr.get_checked(this).template get(); - } - JSON_INTERNAL_CATCH (out_of_range&) - { - return default_value; - } - } - - JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name()), *this)); - } - - /*! - @brief overload for a default value of type const char* - @copydoc basic_json::value(const json_pointer&, ValueType) const - */ - JSON_HEDLEY_NON_NULL(3) - string_t value(const json_pointer& ptr, const char* default_value) const - { - return value(ptr, string_t(default_value)); - } - - /*! - @brief access the first element - - Returns a reference to the first element in the container. For a JSON - container `c`, the expression `c.front()` is equivalent to `*c.begin()`. - - @return In case of a structured type (array or object), a reference to the - first element is returned. In case of number, string, boolean, or binary - values, a reference to the value is returned. - - @complexity Constant. - - @pre The JSON value must not be `null` (would throw `std::out_of_range`) - or an empty array or object (undefined behavior, **guarded by - assertions**). - @post The JSON value remains unchanged. - - @throw invalid_iterator.214 when called on `null` value - - @liveexample{The following code shows an example for `front()`.,front} - - @sa @ref back() -- access the last element - - @since version 1.0.0 - */ - reference front() - { - return *begin(); - } - - /*! - @copydoc basic_json::front() - */ - const_reference front() const - { - return *cbegin(); - } - - /*! - @brief access the last element - - Returns a reference to the last element in the container. For a JSON - container `c`, the expression `c.back()` is equivalent to - @code {.cpp} - auto tmp = c.end(); - --tmp; - return *tmp; - @endcode - - @return In case of a structured type (array or object), a reference to the - last element is returned. In case of number, string, boolean, or binary - values, a reference to the value is returned. - - @complexity Constant. - - @pre The JSON value must not be `null` (would throw `std::out_of_range`) - or an empty array or object (undefined behavior, **guarded by - assertions**). - @post The JSON value remains unchanged. - - @throw invalid_iterator.214 when called on a `null` value. See example - below. - - @liveexample{The following code shows an example for `back()`.,back} - - @sa @ref front() -- access the first element - - @since version 1.0.0 - */ - reference back() - { - auto tmp = end(); - --tmp; - return *tmp; - } - - /*! - @copydoc basic_json::back() - */ - const_reference back() const - { - auto tmp = cend(); - --tmp; - return *tmp; - } - - /*! - @brief remove element given an iterator - - Removes the element specified by iterator @a pos. The iterator @a pos must - be valid and dereferenceable. Thus the `end()` iterator (which is valid, - but is not dereferenceable) cannot be used as a value for @a pos. - - If called on a primitive type other than `null`, the resulting JSON value - will be `null`. - - @param[in] pos iterator to the element to remove - @return Iterator following the last removed element. If the iterator @a - pos refers to the last element, the `end()` iterator is returned. - - @tparam IteratorType an @ref iterator or @ref const_iterator - - @post Invalidates iterators and references at or after the point of the - erase, including the `end()` iterator. - - @throw type_error.307 if called on a `null` value; example: `"cannot use - erase() with null"` - @throw invalid_iterator.202 if called on an iterator which does not belong - to the current JSON value; example: `"iterator does not fit current - value"` - @throw invalid_iterator.205 if called on a primitive type with invalid - iterator (i.e., any iterator which is not `begin()`); example: `"iterator - out of range"` - - @complexity The complexity depends on the type: - - objects: amortized constant - - arrays: linear in distance between @a pos and the end of the container - - strings and binary: linear in the length of the member - - other types: constant - - @liveexample{The example shows the result of `erase()` for different JSON - types.,erase__IteratorType} - - @sa @ref erase(IteratorType, IteratorType) -- removes the elements in - the given range - @sa @ref erase(const typename object_t::key_type&) -- removes the element - from an object at the given key - @sa @ref erase(const size_type) -- removes the element from an array at - the given index - - @since version 1.0.0 - */ - template < class IteratorType, typename std::enable_if < - std::is_same::value || - std::is_same::value, int >::type - = 0 > - IteratorType erase(IteratorType pos) - { - // make sure iterator fits the current value - if (JSON_HEDLEY_UNLIKELY(this != pos.m_object)) - { - JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); - } - - IteratorType result = end(); - - switch (m_type) - { - case value_t::boolean: - case value_t::number_float: - case value_t::number_integer: - case value_t::number_unsigned: - case value_t::string: - case value_t::binary: - { - if (JSON_HEDLEY_UNLIKELY(!pos.m_it.primitive_iterator.is_begin())) - { - JSON_THROW(invalid_iterator::create(205, "iterator out of range", *this)); - } - - if (is_string()) - { - AllocatorType alloc; - std::allocator_traits::destroy(alloc, m_value.string); - std::allocator_traits::deallocate(alloc, m_value.string, 1); - m_value.string = nullptr; - } - else if (is_binary()) - { - AllocatorType alloc; - std::allocator_traits::destroy(alloc, m_value.binary); - std::allocator_traits::deallocate(alloc, m_value.binary, 1); - m_value.binary = nullptr; - } - - m_type = value_t::null; - assert_invariant(); - break; - } - - case value_t::object: - { - result.m_it.object_iterator = m_value.object->erase(pos.m_it.object_iterator); - break; - } - - case value_t::array: - { - result.m_it.array_iterator = m_value.array->erase(pos.m_it.array_iterator); - break; - } - - default: - JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()), *this)); - } - - return result; - } - - /*! - @brief remove elements given an iterator range - - Removes the element specified by the range `[first; last)`. The iterator - @a first does not need to be dereferenceable if `first == last`: erasing - an empty range is a no-op. - - If called on a primitive type other than `null`, the resulting JSON value - will be `null`. - - @param[in] first iterator to the beginning of the range to remove - @param[in] last iterator past the end of the range to remove - @return Iterator following the last removed element. If the iterator @a - second refers to the last element, the `end()` iterator is returned. - - @tparam IteratorType an @ref iterator or @ref const_iterator - - @post Invalidates iterators and references at or after the point of the - erase, including the `end()` iterator. - - @throw type_error.307 if called on a `null` value; example: `"cannot use - erase() with null"` - @throw invalid_iterator.203 if called on iterators which does not belong - to the current JSON value; example: `"iterators do not fit current value"` - @throw invalid_iterator.204 if called on a primitive type with invalid - iterators (i.e., if `first != begin()` and `last != end()`); example: - `"iterators out of range"` - - @complexity The complexity depends on the type: - - objects: `log(size()) + std::distance(first, last)` - - arrays: linear in the distance between @a first and @a last, plus linear - in the distance between @a last and end of the container - - strings and binary: linear in the length of the member - - other types: constant - - @liveexample{The example shows the result of `erase()` for different JSON - types.,erase__IteratorType_IteratorType} - - @sa @ref erase(IteratorType) -- removes the element at a given position - @sa @ref erase(const typename object_t::key_type&) -- removes the element - from an object at the given key - @sa @ref erase(const size_type) -- removes the element from an array at - the given index - - @since version 1.0.0 - */ - template < class IteratorType, typename std::enable_if < - std::is_same::value || - std::is_same::value, int >::type - = 0 > - IteratorType erase(IteratorType first, IteratorType last) - { - // make sure iterator fits the current value - if (JSON_HEDLEY_UNLIKELY(this != first.m_object || this != last.m_object)) - { - JSON_THROW(invalid_iterator::create(203, "iterators do not fit current value", *this)); - } - - IteratorType result = end(); - - switch (m_type) - { - case value_t::boolean: - case value_t::number_float: - case value_t::number_integer: - case value_t::number_unsigned: - case value_t::string: - case value_t::binary: - { - if (JSON_HEDLEY_LIKELY(!first.m_it.primitive_iterator.is_begin() - || !last.m_it.primitive_iterator.is_end())) - { - JSON_THROW(invalid_iterator::create(204, "iterators out of range", *this)); - } - - if (is_string()) - { - AllocatorType alloc; - std::allocator_traits::destroy(alloc, m_value.string); - std::allocator_traits::deallocate(alloc, m_value.string, 1); - m_value.string = nullptr; - } - else if (is_binary()) - { - AllocatorType alloc; - std::allocator_traits::destroy(alloc, m_value.binary); - std::allocator_traits::deallocate(alloc, m_value.binary, 1); - m_value.binary = nullptr; - } - - m_type = value_t::null; - assert_invariant(); - break; - } - - case value_t::object: - { - result.m_it.object_iterator = m_value.object->erase(first.m_it.object_iterator, - last.m_it.object_iterator); - break; - } - - case value_t::array: - { - result.m_it.array_iterator = m_value.array->erase(first.m_it.array_iterator, - last.m_it.array_iterator); - break; - } - - default: - JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()), *this)); - } - - return result; - } - - /*! - @brief remove element from a JSON object given a key - - Removes elements from a JSON object with the key value @a key. - - @param[in] key value of the elements to remove - - @return Number of elements removed. If @a ObjectType is the default - `std::map` type, the return value will always be `0` (@a key was not - found) or `1` (@a key was found). - - @post References and iterators to the erased elements are invalidated. - Other references and iterators are not affected. - - @throw type_error.307 when called on a type other than JSON object; - example: `"cannot use erase() with null"` - - @complexity `log(size()) + count(key)` - - @liveexample{The example shows the effect of `erase()`.,erase__key_type} - - @sa @ref erase(IteratorType) -- removes the element at a given position - @sa @ref erase(IteratorType, IteratorType) -- removes the elements in - the given range - @sa @ref erase(const size_type) -- removes the element from an array at - the given index - - @since version 1.0.0 - */ - size_type erase(const typename object_t::key_type& key) - { - // this erase only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - return m_value.object->erase(key); - } - - JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()), *this)); - } - - /*! - @brief remove element from a JSON array given an index - - Removes element from a JSON array at the index @a idx. - - @param[in] idx index of the element to remove - - @throw type_error.307 when called on a type other than JSON object; - example: `"cannot use erase() with null"` - @throw out_of_range.401 when `idx >= size()`; example: `"array index 17 - is out of range"` - - @complexity Linear in distance between @a idx and the end of the container. - - @liveexample{The example shows the effect of `erase()`.,erase__size_type} - - @sa @ref erase(IteratorType) -- removes the element at a given position - @sa @ref erase(IteratorType, IteratorType) -- removes the elements in - the given range - @sa @ref erase(const typename object_t::key_type&) -- removes the element - from an object at the given key - - @since version 1.0.0 - */ - void erase(const size_type idx) - { - // this erase only works for arrays - if (JSON_HEDLEY_LIKELY(is_array())) - { - if (JSON_HEDLEY_UNLIKELY(idx >= size())) - { - JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range", *this)); - } - - m_value.array->erase(m_value.array->begin() + static_cast(idx)); - } - else - { - JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name()), *this)); - } - } - - /// @} - - - //////////// - // lookup // - //////////// - - /// @name lookup - /// @{ - - /*! - @brief find an element in a JSON object - - Finds an element in a JSON object with key equivalent to @a key. If the - element is not found or the JSON value is not an object, end() is - returned. - - @note This method always returns @ref end() when executed on a JSON type - that is not an object. - - @param[in] key key value of the element to search for. - - @return Iterator to an element with key equivalent to @a key. If no such - element is found or the JSON value is not an object, past-the-end (see - @ref end()) iterator is returned. - - @complexity Logarithmic in the size of the JSON object. - - @liveexample{The example shows how `find()` is used.,find__key_type} - - @sa @ref contains(KeyT&&) const -- checks whether a key exists - - @since version 1.0.0 - */ - template - iterator find(KeyT&& key) - { - auto result = end(); - - if (is_object()) - { - result.m_it.object_iterator = m_value.object->find(std::forward(key)); - } - - return result; - } - - /*! - @brief find an element in a JSON object - @copydoc find(KeyT&&) - */ - template - const_iterator find(KeyT&& key) const - { - auto result = cend(); - - if (is_object()) - { - result.m_it.object_iterator = m_value.object->find(std::forward(key)); - } - - return result; - } - - /*! - @brief returns the number of occurrences of a key in a JSON object - - Returns the number of elements with key @a key. If ObjectType is the - default `std::map` type, the return value will always be `0` (@a key was - not found) or `1` (@a key was found). - - @note This method always returns `0` when executed on a JSON type that is - not an object. - - @param[in] key key value of the element to count - - @return Number of elements with key @a key. If the JSON value is not an - object, the return value will be `0`. - - @complexity Logarithmic in the size of the JSON object. - - @liveexample{The example shows how `count()` is used.,count} - - @since version 1.0.0 - */ - template - size_type count(KeyT&& key) const - { - // return 0 for all nonobject types - return is_object() ? m_value.object->count(std::forward(key)) : 0; - } - - /*! - @brief check the existence of an element in a JSON object - - Check whether an element exists in a JSON object with key equivalent to - @a key. If the element is not found or the JSON value is not an object, - false is returned. - - @note This method always returns false when executed on a JSON type - that is not an object. - - @param[in] key key value to check its existence. - - @return true if an element with specified @a key exists. If no such - element with such key is found or the JSON value is not an object, - false is returned. - - @complexity Logarithmic in the size of the JSON object. - - @liveexample{The following code shows an example for `contains()`.,contains} - - @sa @ref find(KeyT&&) -- returns an iterator to an object element - @sa @ref contains(const json_pointer&) const -- checks the existence for a JSON pointer - - @since version 3.6.0 - */ - template < typename KeyT, typename std::enable_if < - !std::is_same::type, json_pointer>::value, int >::type = 0 > - bool contains(KeyT && key) const - { - return is_object() && m_value.object->find(std::forward(key)) != m_value.object->end(); - } - - /*! - @brief check the existence of an element in a JSON object given a JSON pointer - - Check whether the given JSON pointer @a ptr can be resolved in the current - JSON value. - - @note This method can be executed on any JSON value type. - - @param[in] ptr JSON pointer to check its existence. - - @return true if the JSON pointer can be resolved to a stored value, false - otherwise. - - @post If `j.contains(ptr)` returns true, it is safe to call `j[ptr]`. - - @throw parse_error.106 if an array index begins with '0' - @throw parse_error.109 if an array index was not a number - - @complexity Logarithmic in the size of the JSON object. - - @liveexample{The following code shows an example for `contains()`.,contains_json_pointer} - - @sa @ref contains(KeyT &&) const -- checks the existence of a key - - @since version 3.7.0 - */ - bool contains(const json_pointer& ptr) const - { - return ptr.contains(this); - } - - /// @} - - - /////////////// - // iterators // - /////////////// - - /// @name iterators - /// @{ - - /*! - @brief returns an iterator to the first element - - Returns an iterator to the first element. - - @image html range-begin-end.svg "Illustration from cppreference.com" - - @return iterator to the first element - - @complexity Constant. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is constant. - - @liveexample{The following code shows an example for `begin()`.,begin} - - @sa @ref cbegin() -- returns a const iterator to the beginning - @sa @ref end() -- returns an iterator to the end - @sa @ref cend() -- returns a const iterator to the end - - @since version 1.0.0 - */ - iterator begin() noexcept - { - iterator result(this); - result.set_begin(); - return result; - } - - /*! - @copydoc basic_json::cbegin() - */ - const_iterator begin() const noexcept - { - return cbegin(); - } - - /*! - @brief returns a const iterator to the first element - - Returns a const iterator to the first element. - - @image html range-begin-end.svg "Illustration from cppreference.com" - - @return const iterator to the first element - - @complexity Constant. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is constant. - - Has the semantics of `const_cast(*this).begin()`. - - @liveexample{The following code shows an example for `cbegin()`.,cbegin} - - @sa @ref begin() -- returns an iterator to the beginning - @sa @ref end() -- returns an iterator to the end - @sa @ref cend() -- returns a const iterator to the end - - @since version 1.0.0 - */ - const_iterator cbegin() const noexcept - { - const_iterator result(this); - result.set_begin(); - return result; - } - - /*! - @brief returns an iterator to one past the last element - - Returns an iterator to one past the last element. - - @image html range-begin-end.svg "Illustration from cppreference.com" - - @return iterator one past the last element - - @complexity Constant. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is constant. - - @liveexample{The following code shows an example for `end()`.,end} - - @sa @ref cend() -- returns a const iterator to the end - @sa @ref begin() -- returns an iterator to the beginning - @sa @ref cbegin() -- returns a const iterator to the beginning - - @since version 1.0.0 - */ - iterator end() noexcept - { - iterator result(this); - result.set_end(); - return result; - } - - /*! - @copydoc basic_json::cend() - */ - const_iterator end() const noexcept - { - return cend(); - } - - /*! - @brief returns a const iterator to one past the last element - - Returns a const iterator to one past the last element. - - @image html range-begin-end.svg "Illustration from cppreference.com" - - @return const iterator one past the last element - - @complexity Constant. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is constant. - - Has the semantics of `const_cast(*this).end()`. - - @liveexample{The following code shows an example for `cend()`.,cend} - - @sa @ref end() -- returns an iterator to the end - @sa @ref begin() -- returns an iterator to the beginning - @sa @ref cbegin() -- returns a const iterator to the beginning - - @since version 1.0.0 - */ - const_iterator cend() const noexcept - { - const_iterator result(this); - result.set_end(); - return result; - } - - /*! - @brief returns an iterator to the reverse-beginning - - Returns an iterator to the reverse-beginning; that is, the last element. - - @image html range-rbegin-rend.svg "Illustration from cppreference.com" - - @complexity Constant. - - @requirement This function helps `basic_json` satisfying the - [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) - requirements: - - The complexity is constant. - - Has the semantics of `reverse_iterator(end())`. - - @liveexample{The following code shows an example for `rbegin()`.,rbegin} - - @sa @ref crbegin() -- returns a const reverse iterator to the beginning - @sa @ref rend() -- returns a reverse iterator to the end - @sa @ref crend() -- returns a const reverse iterator to the end - - @since version 1.0.0 - */ - reverse_iterator rbegin() noexcept - { - return reverse_iterator(end()); - } - - /*! - @copydoc basic_json::crbegin() - */ - const_reverse_iterator rbegin() const noexcept - { - return crbegin(); - } - - /*! - @brief returns an iterator to the reverse-end - - Returns an iterator to the reverse-end; that is, one before the first - element. - - @image html range-rbegin-rend.svg "Illustration from cppreference.com" - - @complexity Constant. - - @requirement This function helps `basic_json` satisfying the - [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) - requirements: - - The complexity is constant. - - Has the semantics of `reverse_iterator(begin())`. - - @liveexample{The following code shows an example for `rend()`.,rend} - - @sa @ref crend() -- returns a const reverse iterator to the end - @sa @ref rbegin() -- returns a reverse iterator to the beginning - @sa @ref crbegin() -- returns a const reverse iterator to the beginning - - @since version 1.0.0 - */ - reverse_iterator rend() noexcept - { - return reverse_iterator(begin()); - } - - /*! - @copydoc basic_json::crend() - */ - const_reverse_iterator rend() const noexcept - { - return crend(); - } - - /*! - @brief returns a const reverse iterator to the last element - - Returns a const iterator to the reverse-beginning; that is, the last - element. - - @image html range-rbegin-rend.svg "Illustration from cppreference.com" - - @complexity Constant. - - @requirement This function helps `basic_json` satisfying the - [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) - requirements: - - The complexity is constant. - - Has the semantics of `const_cast(*this).rbegin()`. - - @liveexample{The following code shows an example for `crbegin()`.,crbegin} - - @sa @ref rbegin() -- returns a reverse iterator to the beginning - @sa @ref rend() -- returns a reverse iterator to the end - @sa @ref crend() -- returns a const reverse iterator to the end - - @since version 1.0.0 - */ - const_reverse_iterator crbegin() const noexcept - { - return const_reverse_iterator(cend()); - } - - /*! - @brief returns a const reverse iterator to one before the first - - Returns a const reverse iterator to the reverse-end; that is, one before - the first element. - - @image html range-rbegin-rend.svg "Illustration from cppreference.com" - - @complexity Constant. - - @requirement This function helps `basic_json` satisfying the - [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer) - requirements: - - The complexity is constant. - - Has the semantics of `const_cast(*this).rend()`. - - @liveexample{The following code shows an example for `crend()`.,crend} - - @sa @ref rend() -- returns a reverse iterator to the end - @sa @ref rbegin() -- returns a reverse iterator to the beginning - @sa @ref crbegin() -- returns a const reverse iterator to the beginning - - @since version 1.0.0 - */ - const_reverse_iterator crend() const noexcept - { - return const_reverse_iterator(cbegin()); - } - - public: - /*! - @brief wrapper to access iterator member functions in range-based for - - This function allows to access @ref iterator::key() and @ref - iterator::value() during range-based for loops. In these loops, a - reference to the JSON values is returned, so there is no access to the - underlying iterator. - - For loop without iterator_wrapper: - - @code{cpp} - for (auto it = j_object.begin(); it != j_object.end(); ++it) - { - std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; - } - @endcode - - Range-based for loop without iterator proxy: - - @code{cpp} - for (auto it : j_object) - { - // "it" is of type json::reference and has no key() member - std::cout << "value: " << it << '\n'; - } - @endcode - - Range-based for loop with iterator proxy: - - @code{cpp} - for (auto it : json::iterator_wrapper(j_object)) - { - std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; - } - @endcode - - @note When iterating over an array, `key()` will return the index of the - element as string (see example). - - @param[in] ref reference to a JSON value - @return iteration proxy object wrapping @a ref with an interface to use in - range-based for loops - - @liveexample{The following code shows how the wrapper is used,iterator_wrapper} - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes in the JSON value. - - @complexity Constant. - - @note The name of this function is not yet final and may change in the - future. - - @deprecated This stream operator is deprecated and will be removed in - future 4.0.0 of the library. Please use @ref items() instead; - that is, replace `json::iterator_wrapper(j)` with `j.items()`. - */ - JSON_HEDLEY_DEPRECATED_FOR(3.1.0, items()) - static iteration_proxy iterator_wrapper(reference ref) noexcept - { - return ref.items(); - } - - /*! - @copydoc iterator_wrapper(reference) - */ - JSON_HEDLEY_DEPRECATED_FOR(3.1.0, items()) - static iteration_proxy iterator_wrapper(const_reference ref) noexcept - { - return ref.items(); - } - - /*! - @brief helper to access iterator member functions in range-based for - - This function allows to access @ref iterator::key() and @ref - iterator::value() during range-based for loops. In these loops, a - reference to the JSON values is returned, so there is no access to the - underlying iterator. - - For loop without `items()` function: - - @code{cpp} - for (auto it = j_object.begin(); it != j_object.end(); ++it) - { - std::cout << "key: " << it.key() << ", value:" << it.value() << '\n'; - } - @endcode - - Range-based for loop without `items()` function: - - @code{cpp} - for (auto it : j_object) - { - // "it" is of type json::reference and has no key() member - std::cout << "value: " << it << '\n'; - } - @endcode - - Range-based for loop with `items()` function: - - @code{cpp} - for (auto& el : j_object.items()) - { - std::cout << "key: " << el.key() << ", value:" << el.value() << '\n'; - } - @endcode - - The `items()` function also allows to use - [structured bindings](https://en.cppreference.com/w/cpp/language/structured_binding) - (C++17): - - @code{cpp} - for (auto& [key, val] : j_object.items()) - { - std::cout << "key: " << key << ", value:" << val << '\n'; - } - @endcode - - @note When iterating over an array, `key()` will return the index of the - element as string (see example). For primitive types (e.g., numbers), - `key()` returns an empty string. - - @warning Using `items()` on temporary objects is dangerous. Make sure the - object's lifetime exeeds the iteration. See - for more - information. - - @return iteration proxy object wrapping @a ref with an interface to use in - range-based for loops - - @liveexample{The following code shows how the function is used.,items} - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes in the JSON value. - - @complexity Constant. - - @since version 3.1.0, structured bindings support since 3.5.0. - */ - iteration_proxy items() noexcept - { - return iteration_proxy(*this); - } - - /*! - @copydoc items() - */ - iteration_proxy items() const noexcept - { - return iteration_proxy(*this); - } - - /// @} - - - ////////////// - // capacity // - ////////////// - - /// @name capacity - /// @{ - - /*! - @brief checks whether the container is empty. - - Checks if a JSON value has no elements (i.e. whether its @ref size is `0`). - - @return The return value depends on the different types and is - defined as follows: - Value type | return value - ----------- | ------------- - null | `true` - boolean | `false` - string | `false` - number | `false` - binary | `false` - object | result of function `object_t::empty()` - array | result of function `array_t::empty()` - - @liveexample{The following code uses `empty()` to check if a JSON - object contains any elements.,empty} - - @complexity Constant, as long as @ref array_t and @ref object_t satisfy - the Container concept; that is, their `empty()` functions have constant - complexity. - - @iterators No changes. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @note This function does not return whether a string stored as JSON value - is empty - it returns whether the JSON container itself is empty which is - false in the case of a string. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is constant. - - Has the semantics of `begin() == end()`. - - @sa @ref size() -- returns the number of elements - - @since version 1.0.0 - */ - bool empty() const noexcept - { - switch (m_type) - { - case value_t::null: - { - // null values are empty - return true; - } - - case value_t::array: - { - // delegate call to array_t::empty() - return m_value.array->empty(); - } - - case value_t::object: - { - // delegate call to object_t::empty() - return m_value.object->empty(); - } - - default: - { - // all other types are nonempty - return false; - } - } - } - - /*! - @brief returns the number of elements - - Returns the number of elements in a JSON value. - - @return The return value depends on the different types and is - defined as follows: - Value type | return value - ----------- | ------------- - null | `0` - boolean | `1` - string | `1` - number | `1` - binary | `1` - object | result of function object_t::size() - array | result of function array_t::size() - - @liveexample{The following code calls `size()` on the different value - types.,size} - - @complexity Constant, as long as @ref array_t and @ref object_t satisfy - the Container concept; that is, their size() functions have constant - complexity. - - @iterators No changes. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @note This function does not return the length of a string stored as JSON - value - it returns the number of elements in the JSON value which is 1 in - the case of a string. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is constant. - - Has the semantics of `std::distance(begin(), end())`. - - @sa @ref empty() -- checks whether the container is empty - @sa @ref max_size() -- returns the maximal number of elements - - @since version 1.0.0 - */ - size_type size() const noexcept - { - switch (m_type) - { - case value_t::null: - { - // null values are empty - return 0; - } - - case value_t::array: - { - // delegate call to array_t::size() - return m_value.array->size(); - } - - case value_t::object: - { - // delegate call to object_t::size() - return m_value.object->size(); - } - - default: - { - // all other types have size 1 - return 1; - } - } - } - - /*! - @brief returns the maximum possible number of elements - - Returns the maximum number of elements a JSON value is able to hold due to - system or library implementation limitations, i.e. `std::distance(begin(), - end())` for the JSON value. - - @return The return value depends on the different types and is - defined as follows: - Value type | return value - ----------- | ------------- - null | `0` (same as `size()`) - boolean | `1` (same as `size()`) - string | `1` (same as `size()`) - number | `1` (same as `size()`) - binary | `1` (same as `size()`) - object | result of function `object_t::max_size()` - array | result of function `array_t::max_size()` - - @liveexample{The following code calls `max_size()` on the different value - types. Note the output is implementation specific.,max_size} - - @complexity Constant, as long as @ref array_t and @ref object_t satisfy - the Container concept; that is, their `max_size()` functions have constant - complexity. - - @iterators No changes. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @requirement This function helps `basic_json` satisfying the - [Container](https://en.cppreference.com/w/cpp/named_req/Container) - requirements: - - The complexity is constant. - - Has the semantics of returning `b.size()` where `b` is the largest - possible JSON value. - - @sa @ref size() -- returns the number of elements - - @since version 1.0.0 - */ - size_type max_size() const noexcept - { - switch (m_type) - { - case value_t::array: - { - // delegate call to array_t::max_size() - return m_value.array->max_size(); - } - - case value_t::object: - { - // delegate call to object_t::max_size() - return m_value.object->max_size(); - } - - default: - { - // all other types have max_size() == size() - return size(); - } - } - } - - /// @} - - - /////////////// - // modifiers // - /////////////// - - /// @name modifiers - /// @{ - - /*! - @brief clears the contents - - Clears the content of a JSON value and resets it to the default value as - if @ref basic_json(value_t) would have been called with the current value - type from @ref type(): - - Value type | initial value - ----------- | ------------- - null | `null` - boolean | `false` - string | `""` - number | `0` - binary | An empty byte vector - object | `{}` - array | `[]` - - @post Has the same effect as calling - @code {.cpp} - *this = basic_json(type()); - @endcode - - @liveexample{The example below shows the effect of `clear()` to different - JSON types.,clear} - - @complexity Linear in the size of the JSON value. - - @iterators All iterators, pointers and references related to this container - are invalidated. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @sa @ref basic_json(value_t) -- constructor that creates an object with the - same value than calling `clear()` - - @since version 1.0.0 - */ - void clear() noexcept - { - switch (m_type) - { - case value_t::number_integer: - { - m_value.number_integer = 0; - break; - } - - case value_t::number_unsigned: - { - m_value.number_unsigned = 0; - break; - } - - case value_t::number_float: - { - m_value.number_float = 0.0; - break; - } - - case value_t::boolean: - { - m_value.boolean = false; - break; - } - - case value_t::string: - { - m_value.string->clear(); - break; - } - - case value_t::binary: - { - m_value.binary->clear(); - break; - } - - case value_t::array: - { - m_value.array->clear(); - break; - } - - case value_t::object: - { - m_value.object->clear(); - break; - } - - default: - break; - } - } - - /*! - @brief add an object to an array - - Appends the given element @a val to the end of the JSON value. If the - function is called on a JSON null value, an empty array is created before - appending @a val. - - @param[in] val the value to add to the JSON array - - @throw type_error.308 when called on a type other than JSON array or - null; example: `"cannot use push_back() with number"` - - @complexity Amortized constant. - - @liveexample{The example shows how `push_back()` and `+=` can be used to - add elements to a JSON array. Note how the `null` value was silently - converted to a JSON array.,push_back} - - @since version 1.0.0 - */ - void push_back(basic_json&& val) - { - // push_back only works for null objects or arrays - if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array()))) - { - JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name()), *this)); - } - - // transform null object into an array - if (is_null()) - { - m_type = value_t::array; - m_value = value_t::array; - assert_invariant(); - } - - // add element to array (move semantics) - m_value.array->push_back(std::move(val)); - set_parent(m_value.array->back()); - // if val is moved from, basic_json move constructor marks it null so we do not call the destructor - } - - /*! - @brief add an object to an array - @copydoc push_back(basic_json&&) - */ - reference operator+=(basic_json&& val) - { - push_back(std::move(val)); - return *this; - } - - /*! - @brief add an object to an array - @copydoc push_back(basic_json&&) - */ - void push_back(const basic_json& val) - { - // push_back only works for null objects or arrays - if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array()))) - { - JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name()), *this)); - } - - // transform null object into an array - if (is_null()) - { - m_type = value_t::array; - m_value = value_t::array; - assert_invariant(); - } - - // add element to array - m_value.array->push_back(val); - set_parent(m_value.array->back()); - } - - /*! - @brief add an object to an array - @copydoc push_back(basic_json&&) - */ - reference operator+=(const basic_json& val) - { - push_back(val); - return *this; - } - - /*! - @brief add an object to an object - - Inserts the given element @a val to the JSON object. If the function is - called on a JSON null value, an empty object is created before inserting - @a val. - - @param[in] val the value to add to the JSON object - - @throw type_error.308 when called on a type other than JSON object or - null; example: `"cannot use push_back() with number"` - - @complexity Logarithmic in the size of the container, O(log(`size()`)). - - @liveexample{The example shows how `push_back()` and `+=` can be used to - add elements to a JSON object. Note how the `null` value was silently - converted to a JSON object.,push_back__object_t__value} - - @since version 1.0.0 - */ - void push_back(const typename object_t::value_type& val) - { - // push_back only works for null objects or objects - if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_object()))) - { - JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name()), *this)); - } - - // transform null object into an object - if (is_null()) - { - m_type = value_t::object; - m_value = value_t::object; - assert_invariant(); - } - - // add element to object - auto res = m_value.object->insert(val); - set_parent(res.first->second); - } - - /*! - @brief add an object to an object - @copydoc push_back(const typename object_t::value_type&) - */ - reference operator+=(const typename object_t::value_type& val) - { - push_back(val); - return *this; - } - - /*! - @brief add an object to an object - - This function allows to use `push_back` with an initializer list. In case - - 1. the current value is an object, - 2. the initializer list @a init contains only two elements, and - 3. the first element of @a init is a string, - - @a init is converted into an object element and added using - @ref push_back(const typename object_t::value_type&). Otherwise, @a init - is converted to a JSON value and added using @ref push_back(basic_json&&). - - @param[in] init an initializer list - - @complexity Linear in the size of the initializer list @a init. - - @note This function is required to resolve an ambiguous overload error, - because pairs like `{"key", "value"}` can be both interpreted as - `object_t::value_type` or `std::initializer_list`, see - https://github.com/nlohmann/json/issues/235 for more information. - - @liveexample{The example shows how initializer lists are treated as - objects when possible.,push_back__initializer_list} - */ - void push_back(initializer_list_t init) - { - if (is_object() && init.size() == 2 && (*init.begin())->is_string()) - { - basic_json&& key = init.begin()->moved_or_copied(); - push_back(typename object_t::value_type( - std::move(key.get_ref()), (init.begin() + 1)->moved_or_copied())); - } - else - { - push_back(basic_json(init)); - } - } - - /*! - @brief add an object to an object - @copydoc push_back(initializer_list_t) - */ - reference operator+=(initializer_list_t init) - { - push_back(init); - return *this; - } - - /*! - @brief add an object to an array - - Creates a JSON value from the passed parameters @a args to the end of the - JSON value. If the function is called on a JSON null value, an empty array - is created before appending the value created from @a args. - - @param[in] args arguments to forward to a constructor of @ref basic_json - @tparam Args compatible types to create a @ref basic_json object - - @return reference to the inserted element - - @throw type_error.311 when called on a type other than JSON array or - null; example: `"cannot use emplace_back() with number"` - - @complexity Amortized constant. - - @liveexample{The example shows how `push_back()` can be used to add - elements to a JSON array. Note how the `null` value was silently converted - to a JSON array.,emplace_back} - - @since version 2.0.8, returns reference since 3.7.0 - */ - template - reference emplace_back(Args&& ... args) - { - // emplace_back only works for null objects or arrays - if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array()))) - { - JSON_THROW(type_error::create(311, "cannot use emplace_back() with " + std::string(type_name()), *this)); - } - - // transform null object into an array - if (is_null()) - { - m_type = value_t::array; - m_value = value_t::array; - assert_invariant(); - } - - // add element to array (perfect forwarding) -#ifdef JSON_HAS_CPP_17 - return set_parent(m_value.array->emplace_back(std::forward(args)...)); -#else - m_value.array->emplace_back(std::forward(args)...); - return set_parent(m_value.array->back()); -#endif - } - - /*! - @brief add an object to an object if key does not exist - - Inserts a new element into a JSON object constructed in-place with the - given @a args if there is no element with the key in the container. If the - function is called on a JSON null value, an empty object is created before - appending the value created from @a args. - - @param[in] args arguments to forward to a constructor of @ref basic_json - @tparam Args compatible types to create a @ref basic_json object - - @return a pair consisting of an iterator to the inserted element, or the - already-existing element if no insertion happened, and a bool - denoting whether the insertion took place. - - @throw type_error.311 when called on a type other than JSON object or - null; example: `"cannot use emplace() with number"` - - @complexity Logarithmic in the size of the container, O(log(`size()`)). - - @liveexample{The example shows how `emplace()` can be used to add elements - to a JSON object. Note how the `null` value was silently converted to a - JSON object. Further note how no value is added if there was already one - value stored with the same key.,emplace} - - @since version 2.0.8 - */ - template - std::pair emplace(Args&& ... args) - { - // emplace only works for null objects or arrays - if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_object()))) - { - JSON_THROW(type_error::create(311, "cannot use emplace() with " + std::string(type_name()), *this)); - } - - // transform null object into an object - if (is_null()) - { - m_type = value_t::object; - m_value = value_t::object; - assert_invariant(); - } - - // add element to array (perfect forwarding) - auto res = m_value.object->emplace(std::forward(args)...); - set_parent(res.first->second); - - // create result iterator and set iterator to the result of emplace - auto it = begin(); - it.m_it.object_iterator = res.first; - - // return pair of iterator and boolean - return {it, res.second}; - } - - /// Helper for insertion of an iterator - /// @note: This uses std::distance to support GCC 4.8, - /// see https://github.com/nlohmann/json/pull/1257 - template - iterator insert_iterator(const_iterator pos, Args&& ... args) - { - iterator result(this); - JSON_ASSERT(m_value.array != nullptr); - - auto insert_pos = std::distance(m_value.array->begin(), pos.m_it.array_iterator); - m_value.array->insert(pos.m_it.array_iterator, std::forward(args)...); - result.m_it.array_iterator = m_value.array->begin() + insert_pos; - - // This could have been written as: - // result.m_it.array_iterator = m_value.array->insert(pos.m_it.array_iterator, cnt, val); - // but the return value of insert is missing in GCC 4.8, so it is written this way instead. - - return result; - } - - /*! - @brief inserts element - - Inserts element @a val before iterator @a pos. - - @param[in] pos iterator before which the content will be inserted; may be - the end() iterator - @param[in] val element to insert - @return iterator pointing to the inserted @a val. - - @throw type_error.309 if called on JSON values other than arrays; - example: `"cannot use insert() with string"` - @throw invalid_iterator.202 if @a pos is not an iterator of *this; - example: `"iterator does not fit current value"` - - @complexity Constant plus linear in the distance between @a pos and end of - the container. - - @liveexample{The example shows how `insert()` is used.,insert} - - @since version 1.0.0 - */ - iterator insert(const_iterator pos, const basic_json& val) - { - // insert only works for arrays - if (JSON_HEDLEY_LIKELY(is_array())) - { - // check if iterator pos fits to this JSON value - if (JSON_HEDLEY_UNLIKELY(pos.m_object != this)) - { - JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); - } - - // insert to array and return iterator - return set_parents(insert_iterator(pos, val), static_cast(1)); - } - - JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); - } - - /*! - @brief inserts element - @copydoc insert(const_iterator, const basic_json&) - */ - iterator insert(const_iterator pos, basic_json&& val) - { - return insert(pos, val); - } - - /*! - @brief inserts elements - - Inserts @a cnt copies of @a val before iterator @a pos. - - @param[in] pos iterator before which the content will be inserted; may be - the end() iterator - @param[in] cnt number of copies of @a val to insert - @param[in] val element to insert - @return iterator pointing to the first element inserted, or @a pos if - `cnt==0` - - @throw type_error.309 if called on JSON values other than arrays; example: - `"cannot use insert() with string"` - @throw invalid_iterator.202 if @a pos is not an iterator of *this; - example: `"iterator does not fit current value"` - - @complexity Linear in @a cnt plus linear in the distance between @a pos - and end of the container. - - @liveexample{The example shows how `insert()` is used.,insert__count} - - @since version 1.0.0 - */ - iterator insert(const_iterator pos, size_type cnt, const basic_json& val) - { - // insert only works for arrays - if (JSON_HEDLEY_LIKELY(is_array())) - { - // check if iterator pos fits to this JSON value - if (JSON_HEDLEY_UNLIKELY(pos.m_object != this)) - { - JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); - } - - // insert to array and return iterator - return set_parents(insert_iterator(pos, cnt, val), static_cast(cnt)); - } - - JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); - } - - /*! - @brief inserts elements - - Inserts elements from range `[first, last)` before iterator @a pos. - - @param[in] pos iterator before which the content will be inserted; may be - the end() iterator - @param[in] first begin of the range of elements to insert - @param[in] last end of the range of elements to insert - - @throw type_error.309 if called on JSON values other than arrays; example: - `"cannot use insert() with string"` - @throw invalid_iterator.202 if @a pos is not an iterator of *this; - example: `"iterator does not fit current value"` - @throw invalid_iterator.210 if @a first and @a last do not belong to the - same JSON value; example: `"iterators do not fit"` - @throw invalid_iterator.211 if @a first or @a last are iterators into - container for which insert is called; example: `"passed iterators may not - belong to container"` - - @return iterator pointing to the first element inserted, or @a pos if - `first==last` - - @complexity Linear in `std::distance(first, last)` plus linear in the - distance between @a pos and end of the container. - - @liveexample{The example shows how `insert()` is used.,insert__range} - - @since version 1.0.0 - */ - iterator insert(const_iterator pos, const_iterator first, const_iterator last) - { - // insert only works for arrays - if (JSON_HEDLEY_UNLIKELY(!is_array())) - { - JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); - } - - // check if iterator pos fits to this JSON value - if (JSON_HEDLEY_UNLIKELY(pos.m_object != this)) - { - JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); - } - - // check if range iterators belong to the same JSON object - if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object)) - { - JSON_THROW(invalid_iterator::create(210, "iterators do not fit", *this)); - } - - if (JSON_HEDLEY_UNLIKELY(first.m_object == this)) - { - JSON_THROW(invalid_iterator::create(211, "passed iterators may not belong to container", *this)); - } - - // insert to array and return iterator - return set_parents(insert_iterator(pos, first.m_it.array_iterator, last.m_it.array_iterator), std::distance(first, last)); - } - - /*! - @brief inserts elements - - Inserts elements from initializer list @a ilist before iterator @a pos. - - @param[in] pos iterator before which the content will be inserted; may be - the end() iterator - @param[in] ilist initializer list to insert the values from - - @throw type_error.309 if called on JSON values other than arrays; example: - `"cannot use insert() with string"` - @throw invalid_iterator.202 if @a pos is not an iterator of *this; - example: `"iterator does not fit current value"` - - @return iterator pointing to the first element inserted, or @a pos if - `ilist` is empty - - @complexity Linear in `ilist.size()` plus linear in the distance between - @a pos and end of the container. - - @liveexample{The example shows how `insert()` is used.,insert__ilist} - - @since version 1.0.0 - */ - iterator insert(const_iterator pos, initializer_list_t ilist) - { - // insert only works for arrays - if (JSON_HEDLEY_UNLIKELY(!is_array())) - { - JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); - } - - // check if iterator pos fits to this JSON value - if (JSON_HEDLEY_UNLIKELY(pos.m_object != this)) - { - JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value", *this)); - } - - // insert to array and return iterator - return set_parents(insert_iterator(pos, ilist.begin(), ilist.end()), static_cast(ilist.size())); - } - - /*! - @brief inserts elements - - Inserts elements from range `[first, last)`. - - @param[in] first begin of the range of elements to insert - @param[in] last end of the range of elements to insert - - @throw type_error.309 if called on JSON values other than objects; example: - `"cannot use insert() with string"` - @throw invalid_iterator.202 if iterator @a first or @a last does does not - point to an object; example: `"iterators first and last must point to - objects"` - @throw invalid_iterator.210 if @a first and @a last do not belong to the - same JSON value; example: `"iterators do not fit"` - - @complexity Logarithmic: `O(N*log(size() + N))`, where `N` is the number - of elements to insert. - - @liveexample{The example shows how `insert()` is used.,insert__range_object} - - @since version 3.0.0 - */ - void insert(const_iterator first, const_iterator last) - { - // insert only works for objects - if (JSON_HEDLEY_UNLIKELY(!is_object())) - { - JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name()), *this)); - } - - // check if range iterators belong to the same JSON object - if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object)) - { - JSON_THROW(invalid_iterator::create(210, "iterators do not fit", *this)); - } - - // passed iterators must belong to objects - if (JSON_HEDLEY_UNLIKELY(!first.m_object->is_object())) - { - JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects", *this)); - } - - m_value.object->insert(first.m_it.object_iterator, last.m_it.object_iterator); - } - - /*! - @brief updates a JSON object from another object, overwriting existing keys - - Inserts all values from JSON object @a j and overwrites existing keys. - - @param[in] j JSON object to read values from - - @throw type_error.312 if called on JSON values other than objects; example: - `"cannot use update() with string"` - - @complexity O(N*log(size() + N)), where N is the number of elements to - insert. - - @liveexample{The example shows how `update()` is used.,update} - - @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update - - @since version 3.0.0 - */ - void update(const_reference j) - { - // implicitly convert null value to an empty object - if (is_null()) - { - m_type = value_t::object; - m_value.object = create(); - assert_invariant(); - } - - if (JSON_HEDLEY_UNLIKELY(!is_object())) - { - JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name()), *this)); - } - if (JSON_HEDLEY_UNLIKELY(!j.is_object())) - { - JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(j.type_name()), *this)); - } - - for (auto it = j.cbegin(); it != j.cend(); ++it) - { - m_value.object->operator[](it.key()) = it.value(); - } - } - - /*! - @brief updates a JSON object from another object, overwriting existing keys - - Inserts all values from from range `[first, last)` and overwrites existing - keys. - - @param[in] first begin of the range of elements to insert - @param[in] last end of the range of elements to insert - - @throw type_error.312 if called on JSON values other than objects; example: - `"cannot use update() with string"` - @throw invalid_iterator.202 if iterator @a first or @a last does does not - point to an object; example: `"iterators first and last must point to - objects"` - @throw invalid_iterator.210 if @a first and @a last do not belong to the - same JSON value; example: `"iterators do not fit"` - - @complexity O(N*log(size() + N)), where N is the number of elements to - insert. - - @liveexample{The example shows how `update()` is used__range.,update} - - @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update - - @since version 3.0.0 - */ - void update(const_iterator first, const_iterator last) - { - // implicitly convert null value to an empty object - if (is_null()) - { - m_type = value_t::object; - m_value.object = create(); - assert_invariant(); - } - - if (JSON_HEDLEY_UNLIKELY(!is_object())) - { - JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name()), *this)); - } - - // check if range iterators belong to the same JSON object - if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object)) - { - JSON_THROW(invalid_iterator::create(210, "iterators do not fit", *this)); - } - - // passed iterators must belong to objects - if (JSON_HEDLEY_UNLIKELY(!first.m_object->is_object() - || !last.m_object->is_object())) - { - JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects", *this)); - } - - for (auto it = first; it != last; ++it) - { - m_value.object->operator[](it.key()) = it.value(); - } - } - - /*! - @brief exchanges the values - - Exchanges the contents of the JSON value with those of @a other. Does not - invoke any move, copy, or swap operations on individual elements. All - iterators and references remain valid. The past-the-end iterator is - invalidated. - - @param[in,out] other JSON value to exchange the contents with - - @complexity Constant. - - @liveexample{The example below shows how JSON values can be swapped with - `swap()`.,swap__reference} - - @since version 1.0.0 - */ - void swap(reference other) noexcept ( - std::is_nothrow_move_constructible::value&& - std::is_nothrow_move_assignable::value&& - std::is_nothrow_move_constructible::value&& - std::is_nothrow_move_assignable::value - ) - { - std::swap(m_type, other.m_type); - std::swap(m_value, other.m_value); - - set_parents(); - other.set_parents(); - assert_invariant(); - } - - /*! - @brief exchanges the values - - Exchanges the contents of the JSON value from @a left with those of @a right. Does not - invoke any move, copy, or swap operations on individual elements. All - iterators and references remain valid. The past-the-end iterator is - invalidated. implemented as a friend function callable via ADL. - - @param[in,out] left JSON value to exchange the contents with - @param[in,out] right JSON value to exchange the contents with - - @complexity Constant. - - @liveexample{The example below shows how JSON values can be swapped with - `swap()`.,swap__reference} - - @since version 1.0.0 - */ - friend void swap(reference left, reference right) noexcept ( - std::is_nothrow_move_constructible::value&& - std::is_nothrow_move_assignable::value&& - std::is_nothrow_move_constructible::value&& - std::is_nothrow_move_assignable::value - ) - { - left.swap(right); - } - - /*! - @brief exchanges the values - - Exchanges the contents of a JSON array with those of @a other. Does not - invoke any move, copy, or swap operations on individual elements. All - iterators and references remain valid. The past-the-end iterator is - invalidated. - - @param[in,out] other array to exchange the contents with - - @throw type_error.310 when JSON value is not an array; example: `"cannot - use swap() with string"` - - @complexity Constant. - - @liveexample{The example below shows how arrays can be swapped with - `swap()`.,swap__array_t} - - @since version 1.0.0 - */ - void swap(array_t& other) - { - // swap only works for arrays - if (JSON_HEDLEY_LIKELY(is_array())) - { - std::swap(*(m_value.array), other); - } - else - { - JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); - } - } - - /*! - @brief exchanges the values - - Exchanges the contents of a JSON object with those of @a other. Does not - invoke any move, copy, or swap operations on individual elements. All - iterators and references remain valid. The past-the-end iterator is - invalidated. - - @param[in,out] other object to exchange the contents with - - @throw type_error.310 when JSON value is not an object; example: - `"cannot use swap() with string"` - - @complexity Constant. - - @liveexample{The example below shows how objects can be swapped with - `swap()`.,swap__object_t} - - @since version 1.0.0 - */ - void swap(object_t& other) - { - // swap only works for objects - if (JSON_HEDLEY_LIKELY(is_object())) - { - std::swap(*(m_value.object), other); - } - else - { - JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); - } - } - - /*! - @brief exchanges the values - - Exchanges the contents of a JSON string with those of @a other. Does not - invoke any move, copy, or swap operations on individual elements. All - iterators and references remain valid. The past-the-end iterator is - invalidated. - - @param[in,out] other string to exchange the contents with - - @throw type_error.310 when JSON value is not a string; example: `"cannot - use swap() with boolean"` - - @complexity Constant. - - @liveexample{The example below shows how strings can be swapped with - `swap()`.,swap__string_t} - - @since version 1.0.0 - */ - void swap(string_t& other) - { - // swap only works for strings - if (JSON_HEDLEY_LIKELY(is_string())) - { - std::swap(*(m_value.string), other); - } - else - { - JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); - } - } - - /*! - @brief exchanges the values - - Exchanges the contents of a JSON string with those of @a other. Does not - invoke any move, copy, or swap operations on individual elements. All - iterators and references remain valid. The past-the-end iterator is - invalidated. - - @param[in,out] other binary to exchange the contents with - - @throw type_error.310 when JSON value is not a string; example: `"cannot - use swap() with boolean"` - - @complexity Constant. - - @liveexample{The example below shows how strings can be swapped with - `swap()`.,swap__binary_t} - - @since version 3.8.0 - */ - void swap(binary_t& other) - { - // swap only works for strings - if (JSON_HEDLEY_LIKELY(is_binary())) - { - std::swap(*(m_value.binary), other); - } - else - { - JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); - } - } - - /// @copydoc swap(binary_t) - void swap(typename binary_t::container_type& other) - { - // swap only works for strings - if (JSON_HEDLEY_LIKELY(is_binary())) - { - std::swap(*(m_value.binary), other); - } - else - { - JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name()), *this)); - } - } - - /// @} - - public: - ////////////////////////////////////////// - // lexicographical comparison operators // - ////////////////////////////////////////// - - /// @name lexicographical comparison operators - /// @{ - - /*! - @brief comparison: equal - - Compares two JSON values for equality according to the following rules: - - Two JSON values are equal if (1) they are from the same type and (2) - their stored values are the same according to their respective - `operator==`. - - Integer and floating-point numbers are automatically converted before - comparison. Note that two NaN values are always treated as unequal. - - Two JSON null values are equal. - - @note Floating-point inside JSON values numbers are compared with - `json::number_float_t::operator==` which is `double::operator==` by - default. To compare floating-point while respecting an epsilon, an alternative - [comparison function](https://github.com/mariokonrad/marnav/blob/master/include/marnav/math/floatingpoint.hpp#L34-#L39) - could be used, for instance - @code {.cpp} - template::value, T>::type> - inline bool is_same(T a, T b, T epsilon = std::numeric_limits::epsilon()) noexcept - { - return std::abs(a - b) <= epsilon; - } - @endcode - Or you can self-defined operator equal function like this: - @code {.cpp} - bool my_equal(const_reference lhs, const_reference rhs) { - const auto lhs_type lhs.type(); - const auto rhs_type rhs.type(); - if (lhs_type == rhs_type) { - switch(lhs_type) - // self_defined case - case value_t::number_float: - return std::abs(lhs - rhs) <= std::numeric_limits::epsilon(); - // other cases remain the same with the original - ... - } - ... - } - @endcode - - @note NaN values never compare equal to themselves or to other NaN values. - - @param[in] lhs first JSON value to consider - @param[in] rhs second JSON value to consider - @return whether the values @a lhs and @a rhs are equal - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @complexity Linear. - - @liveexample{The example demonstrates comparing several JSON - types.,operator__equal} - - @since version 1.0.0 - */ - friend bool operator==(const_reference lhs, const_reference rhs) noexcept - { - const auto lhs_type = lhs.type(); - const auto rhs_type = rhs.type(); - - if (lhs_type == rhs_type) - { - switch (lhs_type) - { - case value_t::array: - return *lhs.m_value.array == *rhs.m_value.array; - - case value_t::object: - return *lhs.m_value.object == *rhs.m_value.object; - - case value_t::null: - return true; - - case value_t::string: - return *lhs.m_value.string == *rhs.m_value.string; - - case value_t::boolean: - return lhs.m_value.boolean == rhs.m_value.boolean; - - case value_t::number_integer: - return lhs.m_value.number_integer == rhs.m_value.number_integer; - - case value_t::number_unsigned: - return lhs.m_value.number_unsigned == rhs.m_value.number_unsigned; - - case value_t::number_float: - return lhs.m_value.number_float == rhs.m_value.number_float; - - case value_t::binary: - return *lhs.m_value.binary == *rhs.m_value.binary; - - default: - return false; - } - } - else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_float) - { - return static_cast(lhs.m_value.number_integer) == rhs.m_value.number_float; - } - else if (lhs_type == value_t::number_float && rhs_type == value_t::number_integer) - { - return lhs.m_value.number_float == static_cast(rhs.m_value.number_integer); - } - else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_float) - { - return static_cast(lhs.m_value.number_unsigned) == rhs.m_value.number_float; - } - else if (lhs_type == value_t::number_float && rhs_type == value_t::number_unsigned) - { - return lhs.m_value.number_float == static_cast(rhs.m_value.number_unsigned); - } - else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_integer) - { - return static_cast(lhs.m_value.number_unsigned) == rhs.m_value.number_integer; - } - else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_unsigned) - { - return lhs.m_value.number_integer == static_cast(rhs.m_value.number_unsigned); - } - - return false; - } - - /*! - @brief comparison: equal - @copydoc operator==(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator==(const_reference lhs, const ScalarType rhs) noexcept - { - return lhs == basic_json(rhs); - } - - /*! - @brief comparison: equal - @copydoc operator==(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator==(const ScalarType lhs, const_reference rhs) noexcept - { - return basic_json(lhs) == rhs; - } - - /*! - @brief comparison: not equal - - Compares two JSON values for inequality by calculating `not (lhs == rhs)`. - - @param[in] lhs first JSON value to consider - @param[in] rhs second JSON value to consider - @return whether the values @a lhs and @a rhs are not equal - - @complexity Linear. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @liveexample{The example demonstrates comparing several JSON - types.,operator__notequal} - - @since version 1.0.0 - */ - friend bool operator!=(const_reference lhs, const_reference rhs) noexcept - { - return !(lhs == rhs); - } - - /*! - @brief comparison: not equal - @copydoc operator!=(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator!=(const_reference lhs, const ScalarType rhs) noexcept - { - return lhs != basic_json(rhs); - } - - /*! - @brief comparison: not equal - @copydoc operator!=(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator!=(const ScalarType lhs, const_reference rhs) noexcept - { - return basic_json(lhs) != rhs; - } - - /*! - @brief comparison: less than - - Compares whether one JSON value @a lhs is less than another JSON value @a - rhs according to the following rules: - - If @a lhs and @a rhs have the same type, the values are compared using - the default `<` operator. - - Integer and floating-point numbers are automatically converted before - comparison - - In case @a lhs and @a rhs have different types, the values are ignored - and the order of the types is considered, see - @ref operator<(const value_t, const value_t). - - @param[in] lhs first JSON value to consider - @param[in] rhs second JSON value to consider - @return whether @a lhs is less than @a rhs - - @complexity Linear. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @liveexample{The example demonstrates comparing several JSON - types.,operator__less} - - @since version 1.0.0 - */ - friend bool operator<(const_reference lhs, const_reference rhs) noexcept - { - const auto lhs_type = lhs.type(); - const auto rhs_type = rhs.type(); - - if (lhs_type == rhs_type) - { - switch (lhs_type) - { - case value_t::array: - // note parentheses are necessary, see - // https://github.com/nlohmann/json/issues/1530 - return (*lhs.m_value.array) < (*rhs.m_value.array); - - case value_t::object: - return (*lhs.m_value.object) < (*rhs.m_value.object); - - case value_t::null: - return false; - - case value_t::string: - return (*lhs.m_value.string) < (*rhs.m_value.string); - - case value_t::boolean: - return (lhs.m_value.boolean) < (rhs.m_value.boolean); - - case value_t::number_integer: - return (lhs.m_value.number_integer) < (rhs.m_value.number_integer); - - case value_t::number_unsigned: - return (lhs.m_value.number_unsigned) < (rhs.m_value.number_unsigned); - - case value_t::number_float: - return (lhs.m_value.number_float) < (rhs.m_value.number_float); - - case value_t::binary: - return (*lhs.m_value.binary) < (*rhs.m_value.binary); - - default: - return false; - } - } - else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_float) - { - return static_cast(lhs.m_value.number_integer) < rhs.m_value.number_float; - } - else if (lhs_type == value_t::number_float && rhs_type == value_t::number_integer) - { - return lhs.m_value.number_float < static_cast(rhs.m_value.number_integer); - } - else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_float) - { - return static_cast(lhs.m_value.number_unsigned) < rhs.m_value.number_float; - } - else if (lhs_type == value_t::number_float && rhs_type == value_t::number_unsigned) - { - return lhs.m_value.number_float < static_cast(rhs.m_value.number_unsigned); - } - else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_unsigned) - { - return lhs.m_value.number_integer < static_cast(rhs.m_value.number_unsigned); - } - else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_integer) - { - return static_cast(lhs.m_value.number_unsigned) < rhs.m_value.number_integer; - } - - // We only reach this line if we cannot compare values. In that case, - // we compare types. Note we have to call the operator explicitly, - // because MSVC has problems otherwise. - return operator<(lhs_type, rhs_type); - } - - /*! - @brief comparison: less than - @copydoc operator<(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator<(const_reference lhs, const ScalarType rhs) noexcept - { - return lhs < basic_json(rhs); - } - - /*! - @brief comparison: less than - @copydoc operator<(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator<(const ScalarType lhs, const_reference rhs) noexcept - { - return basic_json(lhs) < rhs; - } - - /*! - @brief comparison: less than or equal - - Compares whether one JSON value @a lhs is less than or equal to another - JSON value by calculating `not (rhs < lhs)`. - - @param[in] lhs first JSON value to consider - @param[in] rhs second JSON value to consider - @return whether @a lhs is less than or equal to @a rhs - - @complexity Linear. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @liveexample{The example demonstrates comparing several JSON - types.,operator__greater} - - @since version 1.0.0 - */ - friend bool operator<=(const_reference lhs, const_reference rhs) noexcept - { - return !(rhs < lhs); - } - - /*! - @brief comparison: less than or equal - @copydoc operator<=(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator<=(const_reference lhs, const ScalarType rhs) noexcept - { - return lhs <= basic_json(rhs); - } - - /*! - @brief comparison: less than or equal - @copydoc operator<=(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator<=(const ScalarType lhs, const_reference rhs) noexcept - { - return basic_json(lhs) <= rhs; - } - - /*! - @brief comparison: greater than - - Compares whether one JSON value @a lhs is greater than another - JSON value by calculating `not (lhs <= rhs)`. - - @param[in] lhs first JSON value to consider - @param[in] rhs second JSON value to consider - @return whether @a lhs is greater than to @a rhs - - @complexity Linear. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @liveexample{The example demonstrates comparing several JSON - types.,operator__lessequal} - - @since version 1.0.0 - */ - friend bool operator>(const_reference lhs, const_reference rhs) noexcept - { - return !(lhs <= rhs); - } - - /*! - @brief comparison: greater than - @copydoc operator>(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator>(const_reference lhs, const ScalarType rhs) noexcept - { - return lhs > basic_json(rhs); - } - - /*! - @brief comparison: greater than - @copydoc operator>(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator>(const ScalarType lhs, const_reference rhs) noexcept - { - return basic_json(lhs) > rhs; - } - - /*! - @brief comparison: greater than or equal - - Compares whether one JSON value @a lhs is greater than or equal to another - JSON value by calculating `not (lhs < rhs)`. - - @param[in] lhs first JSON value to consider - @param[in] rhs second JSON value to consider - @return whether @a lhs is greater than or equal to @a rhs - - @complexity Linear. - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @liveexample{The example demonstrates comparing several JSON - types.,operator__greaterequal} - - @since version 1.0.0 - */ - friend bool operator>=(const_reference lhs, const_reference rhs) noexcept - { - return !(lhs < rhs); - } - - /*! - @brief comparison: greater than or equal - @copydoc operator>=(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator>=(const_reference lhs, const ScalarType rhs) noexcept - { - return lhs >= basic_json(rhs); - } - - /*! - @brief comparison: greater than or equal - @copydoc operator>=(const_reference, const_reference) - */ - template::value, int>::type = 0> - friend bool operator>=(const ScalarType lhs, const_reference rhs) noexcept - { - return basic_json(lhs) >= rhs; - } - - /// @} - - /////////////////// - // serialization // - /////////////////// - - /// @name serialization - /// @{ - - /*! - @brief serialize to stream - - Serialize the given JSON value @a j to the output stream @a o. The JSON - value will be serialized using the @ref dump member function. - - - The indentation of the output can be controlled with the member variable - `width` of the output stream @a o. For instance, using the manipulator - `std::setw(4)` on @a o sets the indentation level to `4` and the - serialization result is the same as calling `dump(4)`. - - - The indentation character can be controlled with the member variable - `fill` of the output stream @a o. For instance, the manipulator - `std::setfill('\\t')` sets indentation to use a tab character rather than - the default space character. - - @param[in,out] o stream to serialize to - @param[in] j JSON value to serialize - - @return the stream @a o - - @throw type_error.316 if a string stored inside the JSON value is not - UTF-8 encoded - - @complexity Linear. - - @liveexample{The example below shows the serialization with different - parameters to `width` to adjust the indentation level.,operator_serialize} - - @since version 1.0.0; indentation character added in version 3.0.0 - */ - friend std::ostream& operator<<(std::ostream& o, const basic_json& j) - { - // read width member and use it as indentation parameter if nonzero - const bool pretty_print = o.width() > 0; - const auto indentation = pretty_print ? o.width() : 0; - - // reset width to 0 for subsequent calls to this stream - o.width(0); - - // do the actual serialization - serializer s(detail::output_adapter(o), o.fill()); - s.dump(j, pretty_print, false, static_cast(indentation)); - return o; - } - - /*! - @brief serialize to stream - @deprecated This stream operator is deprecated and will be removed in - future 4.0.0 of the library. Please use - @ref operator<<(std::ostream&, const basic_json&) - instead; that is, replace calls like `j >> o;` with `o << j;`. - @since version 1.0.0; deprecated since version 3.0.0 - */ - JSON_HEDLEY_DEPRECATED_FOR(3.0.0, operator<<(std::ostream&, const basic_json&)) - friend std::ostream& operator>>(const basic_json& j, std::ostream& o) - { - return o << j; - } - - /// @} - - - ///////////////////// - // deserialization // - ///////////////////// - - /// @name deserialization - /// @{ - - /*! - @brief deserialize from a compatible input - - @tparam InputType A compatible input, for instance - - an std::istream object - - a FILE pointer - - a C-style array of characters - - a pointer to a null-terminated string of single byte characters - - an object obj for which begin(obj) and end(obj) produces a valid pair of - iterators. - - @param[in] i input to read from - @param[in] cb a parser callback function of type @ref parser_callback_t - which is used to control the deserialization by filtering unwanted values - (optional) - @param[in] allow_exceptions whether to throw exceptions in case of a - parse error (optional, true by default) - @param[in] ignore_comments whether comments should be ignored and treated - like whitespace (true) or yield a parse error (true); (optional, false by - default) - - @return deserialized JSON value; in case of a parse error and - @a allow_exceptions set to `false`, the return value will be - value_t::discarded. - - @throw parse_error.101 if a parse error occurs; example: `""unexpected end - of input; expected string literal""` - @throw parse_error.102 if to_unicode fails or surrogate error - @throw parse_error.103 if to_unicode fails - - @complexity Linear in the length of the input. The parser is a predictive - LL(1) parser. The complexity can be higher if the parser callback function - @a cb or reading from the input @a i has a super-linear complexity. - - @note A UTF-8 byte order mark is silently ignored. - - @liveexample{The example below demonstrates the `parse()` function reading - from an array.,parse__array__parser_callback_t} - - @liveexample{The example below demonstrates the `parse()` function with - and without callback function.,parse__string__parser_callback_t} - - @liveexample{The example below demonstrates the `parse()` function with - and without callback function.,parse__istream__parser_callback_t} - - @liveexample{The example below demonstrates the `parse()` function reading - from a contiguous container.,parse__contiguouscontainer__parser_callback_t} - - @since version 2.0.3 (contiguous containers); version 3.9.0 allowed to - ignore comments. - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json parse(InputType&& i, - const parser_callback_t cb = nullptr, - const bool allow_exceptions = true, - const bool ignore_comments = false) - { - basic_json result; - parser(detail::input_adapter(std::forward(i)), cb, allow_exceptions, ignore_comments).parse(true, result); - return result; - } - - /*! - @brief deserialize from a pair of character iterators - - The value_type of the iterator must be a integral type with size of 1, 2 or - 4 bytes, which will be interpreted respectively as UTF-8, UTF-16 and UTF-32. - - @param[in] first iterator to start of character range - @param[in] last iterator to end of character range - @param[in] cb a parser callback function of type @ref parser_callback_t - which is used to control the deserialization by filtering unwanted values - (optional) - @param[in] allow_exceptions whether to throw exceptions in case of a - parse error (optional, true by default) - @param[in] ignore_comments whether comments should be ignored and treated - like whitespace (true) or yield a parse error (true); (optional, false by - default) - - @return deserialized JSON value; in case of a parse error and - @a allow_exceptions set to `false`, the return value will be - value_t::discarded. - - @throw parse_error.101 if a parse error occurs; example: `""unexpected end - of input; expected string literal""` - @throw parse_error.102 if to_unicode fails or surrogate error - @throw parse_error.103 if to_unicode fails - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json parse(IteratorType first, - IteratorType last, - const parser_callback_t cb = nullptr, - const bool allow_exceptions = true, - const bool ignore_comments = false) - { - basic_json result; - parser(detail::input_adapter(std::move(first), std::move(last)), cb, allow_exceptions, ignore_comments).parse(true, result); - return result; - } - - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, parse(ptr, ptr + len)) - static basic_json parse(detail::span_input_adapter&& i, - const parser_callback_t cb = nullptr, - const bool allow_exceptions = true, - const bool ignore_comments = false) - { - basic_json result; - parser(i.get(), cb, allow_exceptions, ignore_comments).parse(true, result); - return result; - } - - /*! - @brief check if the input is valid JSON - - Unlike the @ref parse(InputType&&, const parser_callback_t,const bool) - function, this function neither throws an exception in case of invalid JSON - input (i.e., a parse error) nor creates diagnostic information. - - @tparam InputType A compatible input, for instance - - an std::istream object - - a FILE pointer - - a C-style array of characters - - a pointer to a null-terminated string of single byte characters - - an object obj for which begin(obj) and end(obj) produces a valid pair of - iterators. - - @param[in] i input to read from - @param[in] ignore_comments whether comments should be ignored and treated - like whitespace (true) or yield a parse error (true); (optional, false by - default) - - @return Whether the input read from @a i is valid JSON. - - @complexity Linear in the length of the input. The parser is a predictive - LL(1) parser. - - @note A UTF-8 byte order mark is silently ignored. - - @liveexample{The example below demonstrates the `accept()` function reading - from a string.,accept__string} - */ - template - static bool accept(InputType&& i, - const bool ignore_comments = false) - { - return parser(detail::input_adapter(std::forward(i)), nullptr, false, ignore_comments).accept(true); - } - - template - static bool accept(IteratorType first, IteratorType last, - const bool ignore_comments = false) - { - return parser(detail::input_adapter(std::move(first), std::move(last)), nullptr, false, ignore_comments).accept(true); - } - - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, accept(ptr, ptr + len)) - static bool accept(detail::span_input_adapter&& i, - const bool ignore_comments = false) - { - return parser(i.get(), nullptr, false, ignore_comments).accept(true); - } - - /*! - @brief generate SAX events - - The SAX event lister must follow the interface of @ref json_sax. - - This function reads from a compatible input. Examples are: - - an std::istream object - - a FILE pointer - - a C-style array of characters - - a pointer to a null-terminated string of single byte characters - - an object obj for which begin(obj) and end(obj) produces a valid pair of - iterators. - - @param[in] i input to read from - @param[in,out] sax SAX event listener - @param[in] format the format to parse (JSON, CBOR, MessagePack, or UBJSON) - @param[in] strict whether the input has to be consumed completely - @param[in] ignore_comments whether comments should be ignored and treated - like whitespace (true) or yield a parse error (true); (optional, false by - default); only applies to the JSON file format. - - @return return value of the last processed SAX event - - @throw parse_error.101 if a parse error occurs; example: `""unexpected end - of input; expected string literal""` - @throw parse_error.102 if to_unicode fails or surrogate error - @throw parse_error.103 if to_unicode fails - - @complexity Linear in the length of the input. The parser is a predictive - LL(1) parser. The complexity can be higher if the SAX consumer @a sax has - a super-linear complexity. - - @note A UTF-8 byte order mark is silently ignored. - - @liveexample{The example below demonstrates the `sax_parse()` function - reading from string and processing the events with a user-defined SAX - event consumer.,sax_parse} - - @since version 3.2.0 - */ - template - JSON_HEDLEY_NON_NULL(2) - static bool sax_parse(InputType&& i, SAX* sax, - input_format_t format = input_format_t::json, - const bool strict = true, - const bool ignore_comments = false) - { - auto ia = detail::input_adapter(std::forward(i)); - return format == input_format_t::json - ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict) - : detail::binary_reader(std::move(ia)).sax_parse(format, sax, strict); - } - - template - JSON_HEDLEY_NON_NULL(3) - static bool sax_parse(IteratorType first, IteratorType last, SAX* sax, - input_format_t format = input_format_t::json, - const bool strict = true, - const bool ignore_comments = false) - { - auto ia = detail::input_adapter(std::move(first), std::move(last)); - return format == input_format_t::json - ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict) - : detail::binary_reader(std::move(ia)).sax_parse(format, sax, strict); - } - - template - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, sax_parse(ptr, ptr + len, ...)) - JSON_HEDLEY_NON_NULL(2) - static bool sax_parse(detail::span_input_adapter&& i, SAX* sax, - input_format_t format = input_format_t::json, - const bool strict = true, - const bool ignore_comments = false) - { - auto ia = i.get(); - return format == input_format_t::json - ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict) - : detail::binary_reader(std::move(ia)).sax_parse(format, sax, strict); - } - - /*! - @brief deserialize from stream - @deprecated This stream operator is deprecated and will be removed in - version 4.0.0 of the library. Please use - @ref operator>>(std::istream&, basic_json&) - instead; that is, replace calls like `j << i;` with `i >> j;`. - @since version 1.0.0; deprecated since version 3.0.0 - */ - JSON_HEDLEY_DEPRECATED_FOR(3.0.0, operator>>(std::istream&, basic_json&)) - friend std::istream& operator<<(basic_json& j, std::istream& i) - { - return operator>>(i, j); - } - - /*! - @brief deserialize from stream - - Deserializes an input stream to a JSON value. - - @param[in,out] i input stream to read a serialized JSON value from - @param[in,out] j JSON value to write the deserialized input to - - @throw parse_error.101 in case of an unexpected token - @throw parse_error.102 if to_unicode fails or surrogate error - @throw parse_error.103 if to_unicode fails - - @complexity Linear in the length of the input. The parser is a predictive - LL(1) parser. - - @note A UTF-8 byte order mark is silently ignored. - - @liveexample{The example below shows how a JSON value is constructed by - reading a serialization from a stream.,operator_deserialize} - - @sa parse(std::istream&, const parser_callback_t) for a variant with a - parser callback function to filter values while parsing - - @since version 1.0.0 - */ - friend std::istream& operator>>(std::istream& i, basic_json& j) - { - parser(detail::input_adapter(i)).parse(false, j); - return i; - } - - /// @} - - /////////////////////////// - // convenience functions // - /////////////////////////// - - /*! - @brief return the type as string - - Returns the type name as string to be used in error messages - usually to - indicate that a function was called on a wrong JSON type. - - @return a string representation of a the @a m_type member: - Value type | return value - ----------- | ------------- - null | `"null"` - boolean | `"boolean"` - string | `"string"` - number | `"number"` (for all number types) - object | `"object"` - array | `"array"` - binary | `"binary"` - discarded | `"discarded"` - - @exceptionsafety No-throw guarantee: this function never throws exceptions. - - @complexity Constant. - - @liveexample{The following code exemplifies `type_name()` for all JSON - types.,type_name} - - @sa @ref type() -- return the type of the JSON value - @sa @ref operator value_t() -- return the type of the JSON value (implicit) - - @since version 1.0.0, public since 2.1.0, `const char*` and `noexcept` - since 3.0.0 - */ - JSON_HEDLEY_RETURNS_NON_NULL - const char* type_name() const noexcept - { - { - switch (m_type) - { - case value_t::null: - return "null"; - case value_t::object: - return "object"; - case value_t::array: - return "array"; - case value_t::string: - return "string"; - case value_t::boolean: - return "boolean"; - case value_t::binary: - return "binary"; - case value_t::discarded: - return "discarded"; - default: - return "number"; - } - } - } - - - JSON_PRIVATE_UNLESS_TESTED: - ////////////////////// - // member variables // - ////////////////////// - - /// the type of the current element - value_t m_type = value_t::null; - - /// the value of the current element - json_value m_value = {}; - -#if JSON_DIAGNOSTICS - /// a pointer to a parent value (for debugging purposes) - basic_json* m_parent = nullptr; -#endif - - ////////////////////////////////////////// - // binary serialization/deserialization // - ////////////////////////////////////////// - - /// @name binary serialization/deserialization support - /// @{ - - public: - /*! - @brief create a CBOR serialization of a given JSON value - - Serializes a given JSON value @a j to a byte vector using the CBOR (Concise - Binary Object Representation) serialization format. CBOR is a binary - serialization format which aims to be more compact than JSON itself, yet - more efficient to parse. - - The library uses the following mapping from JSON values types to - CBOR types according to the CBOR specification (RFC 7049): - - JSON value type | value/range | CBOR type | first byte - --------------- | ------------------------------------------ | ---------------------------------- | --------------- - null | `null` | Null | 0xF6 - boolean | `true` | True | 0xF5 - boolean | `false` | False | 0xF4 - number_integer | -9223372036854775808..-2147483649 | Negative integer (8 bytes follow) | 0x3B - number_integer | -2147483648..-32769 | Negative integer (4 bytes follow) | 0x3A - number_integer | -32768..-129 | Negative integer (2 bytes follow) | 0x39 - number_integer | -128..-25 | Negative integer (1 byte follow) | 0x38 - number_integer | -24..-1 | Negative integer | 0x20..0x37 - number_integer | 0..23 | Integer | 0x00..0x17 - number_integer | 24..255 | Unsigned integer (1 byte follow) | 0x18 - number_integer | 256..65535 | Unsigned integer (2 bytes follow) | 0x19 - number_integer | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A - number_integer | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B - number_unsigned | 0..23 | Integer | 0x00..0x17 - number_unsigned | 24..255 | Unsigned integer (1 byte follow) | 0x18 - number_unsigned | 256..65535 | Unsigned integer (2 bytes follow) | 0x19 - number_unsigned | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A - number_unsigned | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B - number_float | *any value representable by a float* | Single-Precision Float | 0xFA - number_float | *any value NOT representable by a float* | Double-Precision Float | 0xFB - string | *length*: 0..23 | UTF-8 string | 0x60..0x77 - string | *length*: 23..255 | UTF-8 string (1 byte follow) | 0x78 - string | *length*: 256..65535 | UTF-8 string (2 bytes follow) | 0x79 - string | *length*: 65536..4294967295 | UTF-8 string (4 bytes follow) | 0x7A - string | *length*: 4294967296..18446744073709551615 | UTF-8 string (8 bytes follow) | 0x7B - array | *size*: 0..23 | array | 0x80..0x97 - array | *size*: 23..255 | array (1 byte follow) | 0x98 - array | *size*: 256..65535 | array (2 bytes follow) | 0x99 - array | *size*: 65536..4294967295 | array (4 bytes follow) | 0x9A - array | *size*: 4294967296..18446744073709551615 | array (8 bytes follow) | 0x9B - object | *size*: 0..23 | map | 0xA0..0xB7 - object | *size*: 23..255 | map (1 byte follow) | 0xB8 - object | *size*: 256..65535 | map (2 bytes follow) | 0xB9 - object | *size*: 65536..4294967295 | map (4 bytes follow) | 0xBA - object | *size*: 4294967296..18446744073709551615 | map (8 bytes follow) | 0xBB - binary | *size*: 0..23 | byte string | 0x40..0x57 - binary | *size*: 23..255 | byte string (1 byte follow) | 0x58 - binary | *size*: 256..65535 | byte string (2 bytes follow) | 0x59 - binary | *size*: 65536..4294967295 | byte string (4 bytes follow) | 0x5A - binary | *size*: 4294967296..18446744073709551615 | byte string (8 bytes follow) | 0x5B - - @note The mapping is **complete** in the sense that any JSON value type - can be converted to a CBOR value. - - @note If NaN or Infinity are stored inside a JSON number, they are - serialized properly. This behavior differs from the @ref dump() - function which serializes NaN or Infinity to `null`. - - @note The following CBOR types are not used in the conversion: - - UTF-8 strings terminated by "break" (0x7F) - - arrays terminated by "break" (0x9F) - - maps terminated by "break" (0xBF) - - byte strings terminated by "break" (0x5F) - - date/time (0xC0..0xC1) - - bignum (0xC2..0xC3) - - decimal fraction (0xC4) - - bigfloat (0xC5) - - expected conversions (0xD5..0xD7) - - simple values (0xE0..0xF3, 0xF8) - - undefined (0xF7) - - half-precision floats (0xF9) - - break (0xFF) - - @param[in] j JSON value to serialize - @return CBOR serialization as byte vector - - @complexity Linear in the size of the JSON value @a j. - - @liveexample{The example shows the serialization of a JSON value to a byte - vector in CBOR format.,to_cbor} - - @sa http://cbor.io - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the - analogous deserialization - @sa @ref to_msgpack(const basic_json&) for the related MessagePack format - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the - related UBJSON format - - @since version 2.0.9; compact representation of floating-point numbers - since version 3.8.0 - */ - static std::vector to_cbor(const basic_json& j) - { - std::vector result; - to_cbor(j, result); - return result; - } - - static void to_cbor(const basic_json& j, detail::output_adapter o) - { - binary_writer(o).write_cbor(j); - } - - static void to_cbor(const basic_json& j, detail::output_adapter o) - { - binary_writer(o).write_cbor(j); - } - - /*! - @brief create a MessagePack serialization of a given JSON value - - Serializes a given JSON value @a j to a byte vector using the MessagePack - serialization format. MessagePack is a binary serialization format which - aims to be more compact than JSON itself, yet more efficient to parse. - - The library uses the following mapping from JSON values types to - MessagePack types according to the MessagePack specification: - - JSON value type | value/range | MessagePack type | first byte - --------------- | --------------------------------- | ---------------- | ---------- - null | `null` | nil | 0xC0 - boolean | `true` | true | 0xC3 - boolean | `false` | false | 0xC2 - number_integer | -9223372036854775808..-2147483649 | int64 | 0xD3 - number_integer | -2147483648..-32769 | int32 | 0xD2 - number_integer | -32768..-129 | int16 | 0xD1 - number_integer | -128..-33 | int8 | 0xD0 - number_integer | -32..-1 | negative fixint | 0xE0..0xFF - number_integer | 0..127 | positive fixint | 0x00..0x7F - number_integer | 128..255 | uint 8 | 0xCC - number_integer | 256..65535 | uint 16 | 0xCD - number_integer | 65536..4294967295 | uint 32 | 0xCE - number_integer | 4294967296..18446744073709551615 | uint 64 | 0xCF - number_unsigned | 0..127 | positive fixint | 0x00..0x7F - number_unsigned | 128..255 | uint 8 | 0xCC - number_unsigned | 256..65535 | uint 16 | 0xCD - number_unsigned | 65536..4294967295 | uint 32 | 0xCE - number_unsigned | 4294967296..18446744073709551615 | uint 64 | 0xCF - number_float | *any value representable by a float* | float 32 | 0xCA - number_float | *any value NOT representable by a float* | float 64 | 0xCB - string | *length*: 0..31 | fixstr | 0xA0..0xBF - string | *length*: 32..255 | str 8 | 0xD9 - string | *length*: 256..65535 | str 16 | 0xDA - string | *length*: 65536..4294967295 | str 32 | 0xDB - array | *size*: 0..15 | fixarray | 0x90..0x9F - array | *size*: 16..65535 | array 16 | 0xDC - array | *size*: 65536..4294967295 | array 32 | 0xDD - object | *size*: 0..15 | fix map | 0x80..0x8F - object | *size*: 16..65535 | map 16 | 0xDE - object | *size*: 65536..4294967295 | map 32 | 0xDF - binary | *size*: 0..255 | bin 8 | 0xC4 - binary | *size*: 256..65535 | bin 16 | 0xC5 - binary | *size*: 65536..4294967295 | bin 32 | 0xC6 - - @note The mapping is **complete** in the sense that any JSON value type - can be converted to a MessagePack value. - - @note The following values can **not** be converted to a MessagePack value: - - strings with more than 4294967295 bytes - - byte strings with more than 4294967295 bytes - - arrays with more than 4294967295 elements - - objects with more than 4294967295 elements - - @note Any MessagePack output created @ref to_msgpack can be successfully - parsed by @ref from_msgpack. - - @note If NaN or Infinity are stored inside a JSON number, they are - serialized properly. This behavior differs from the @ref dump() - function which serializes NaN or Infinity to `null`. - - @param[in] j JSON value to serialize - @return MessagePack serialization as byte vector - - @complexity Linear in the size of the JSON value @a j. - - @liveexample{The example shows the serialization of a JSON value to a byte - vector in MessagePack format.,to_msgpack} - - @sa http://msgpack.org - @sa @ref from_msgpack for the analogous deserialization - @sa @ref to_cbor(const basic_json& for the related CBOR format - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the - related UBJSON format - - @since version 2.0.9 - */ - static std::vector to_msgpack(const basic_json& j) - { - std::vector result; - to_msgpack(j, result); - return result; - } - - static void to_msgpack(const basic_json& j, detail::output_adapter o) - { - binary_writer(o).write_msgpack(j); - } - - static void to_msgpack(const basic_json& j, detail::output_adapter o) - { - binary_writer(o).write_msgpack(j); - } - - /*! - @brief create a UBJSON serialization of a given JSON value - - Serializes a given JSON value @a j to a byte vector using the UBJSON - (Universal Binary JSON) serialization format. UBJSON aims to be more compact - than JSON itself, yet more efficient to parse. - - The library uses the following mapping from JSON values types to - UBJSON types according to the UBJSON specification: - - JSON value type | value/range | UBJSON type | marker - --------------- | --------------------------------- | ----------- | ------ - null | `null` | null | `Z` - boolean | `true` | true | `T` - boolean | `false` | false | `F` - number_integer | -9223372036854775808..-2147483649 | int64 | `L` - number_integer | -2147483648..-32769 | int32 | `l` - number_integer | -32768..-129 | int16 | `I` - number_integer | -128..127 | int8 | `i` - number_integer | 128..255 | uint8 | `U` - number_integer | 256..32767 | int16 | `I` - number_integer | 32768..2147483647 | int32 | `l` - number_integer | 2147483648..9223372036854775807 | int64 | `L` - number_unsigned | 0..127 | int8 | `i` - number_unsigned | 128..255 | uint8 | `U` - number_unsigned | 256..32767 | int16 | `I` - number_unsigned | 32768..2147483647 | int32 | `l` - number_unsigned | 2147483648..9223372036854775807 | int64 | `L` - number_unsigned | 2147483649..18446744073709551615 | high-precision | `H` - number_float | *any value* | float64 | `D` - string | *with shortest length indicator* | string | `S` - array | *see notes on optimized format* | array | `[` - object | *see notes on optimized format* | map | `{` - - @note The mapping is **complete** in the sense that any JSON value type - can be converted to a UBJSON value. - - @note The following values can **not** be converted to a UBJSON value: - - strings with more than 9223372036854775807 bytes (theoretical) - - @note The following markers are not used in the conversion: - - `Z`: no-op values are not created. - - `C`: single-byte strings are serialized with `S` markers. - - @note Any UBJSON output created @ref to_ubjson can be successfully parsed - by @ref from_ubjson. - - @note If NaN or Infinity are stored inside a JSON number, they are - serialized properly. This behavior differs from the @ref dump() - function which serializes NaN or Infinity to `null`. - - @note The optimized formats for containers are supported: Parameter - @a use_size adds size information to the beginning of a container and - removes the closing marker. Parameter @a use_type further checks - whether all elements of a container have the same type and adds the - type marker to the beginning of the container. The @a use_type - parameter must only be used together with @a use_size = true. Note - that @a use_size = true alone may result in larger representations - - the benefit of this parameter is that the receiving side is - immediately informed on the number of elements of the container. - - @note If the JSON data contains the binary type, the value stored is a list - of integers, as suggested by the UBJSON documentation. In particular, - this means that serialization and the deserialization of a JSON - containing binary values into UBJSON and back will result in a - different JSON object. - - @param[in] j JSON value to serialize - @param[in] use_size whether to add size annotations to container types - @param[in] use_type whether to add type annotations to container types - (must be combined with @a use_size = true) - @return UBJSON serialization as byte vector - - @complexity Linear in the size of the JSON value @a j. - - @liveexample{The example shows the serialization of a JSON value to a byte - vector in UBJSON format.,to_ubjson} - - @sa http://ubjson.org - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the - analogous deserialization - @sa @ref to_cbor(const basic_json& for the related CBOR format - @sa @ref to_msgpack(const basic_json&) for the related MessagePack format - - @since version 3.1.0 - */ - static std::vector to_ubjson(const basic_json& j, - const bool use_size = false, - const bool use_type = false) - { - std::vector result; - to_ubjson(j, result, use_size, use_type); - return result; - } - - static void to_ubjson(const basic_json& j, detail::output_adapter o, - const bool use_size = false, const bool use_type = false) - { - binary_writer(o).write_ubjson(j, use_size, use_type); - } - - static void to_ubjson(const basic_json& j, detail::output_adapter o, - const bool use_size = false, const bool use_type = false) - { - binary_writer(o).write_ubjson(j, use_size, use_type); - } - - - /*! - @brief Serializes the given JSON object `j` to BSON and returns a vector - containing the corresponding BSON-representation. - - BSON (Binary JSON) is a binary format in which zero or more ordered key/value pairs are - stored as a single entity (a so-called document). - - The library uses the following mapping from JSON values types to BSON types: - - JSON value type | value/range | BSON type | marker - --------------- | --------------------------------- | ----------- | ------ - null | `null` | null | 0x0A - boolean | `true`, `false` | boolean | 0x08 - number_integer | -9223372036854775808..-2147483649 | int64 | 0x12 - number_integer | -2147483648..2147483647 | int32 | 0x10 - number_integer | 2147483648..9223372036854775807 | int64 | 0x12 - number_unsigned | 0..2147483647 | int32 | 0x10 - number_unsigned | 2147483648..9223372036854775807 | int64 | 0x12 - number_unsigned | 9223372036854775808..18446744073709551615| -- | -- - number_float | *any value* | double | 0x01 - string | *any value* | string | 0x02 - array | *any value* | document | 0x04 - object | *any value* | document | 0x03 - binary | *any value* | binary | 0x05 - - @warning The mapping is **incomplete**, since only JSON-objects (and things - contained therein) can be serialized to BSON. - Also, integers larger than 9223372036854775807 cannot be serialized to BSON, - and the keys may not contain U+0000, since they are serialized a - zero-terminated c-strings. - - @throw out_of_range.407 if `j.is_number_unsigned() && j.get() > 9223372036854775807` - @throw out_of_range.409 if a key in `j` contains a NULL (U+0000) - @throw type_error.317 if `!j.is_object()` - - @pre The input `j` is required to be an object: `j.is_object() == true`. - - @note Any BSON output created via @ref to_bson can be successfully parsed - by @ref from_bson. - - @param[in] j JSON value to serialize - @return BSON serialization as byte vector - - @complexity Linear in the size of the JSON value @a j. - - @liveexample{The example shows the serialization of a JSON value to a byte - vector in BSON format.,to_bson} - - @sa http://bsonspec.org/spec.html - @sa @ref from_bson(detail::input_adapter&&, const bool strict) for the - analogous deserialization - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the - related UBJSON format - @sa @ref to_cbor(const basic_json&) for the related CBOR format - @sa @ref to_msgpack(const basic_json&) for the related MessagePack format - */ - static std::vector to_bson(const basic_json& j) - { - std::vector result; - to_bson(j, result); - return result; - } - - /*! - @brief Serializes the given JSON object `j` to BSON and forwards the - corresponding BSON-representation to the given output_adapter `o`. - @param j The JSON object to convert to BSON. - @param o The output adapter that receives the binary BSON representation. - @pre The input `j` shall be an object: `j.is_object() == true` - @sa @ref to_bson(const basic_json&) - */ - static void to_bson(const basic_json& j, detail::output_adapter o) - { - binary_writer(o).write_bson(j); - } - - /*! - @copydoc to_bson(const basic_json&, detail::output_adapter) - */ - static void to_bson(const basic_json& j, detail::output_adapter o) - { - binary_writer(o).write_bson(j); - } - - - /*! - @brief create a JSON value from an input in CBOR format - - Deserializes a given input @a i to a JSON value using the CBOR (Concise - Binary Object Representation) serialization format. - - The library maps CBOR types to JSON value types as follows: - - CBOR type | JSON value type | first byte - ---------------------- | --------------- | ---------- - Integer | number_unsigned | 0x00..0x17 - Unsigned integer | number_unsigned | 0x18 - Unsigned integer | number_unsigned | 0x19 - Unsigned integer | number_unsigned | 0x1A - Unsigned integer | number_unsigned | 0x1B - Negative integer | number_integer | 0x20..0x37 - Negative integer | number_integer | 0x38 - Negative integer | number_integer | 0x39 - Negative integer | number_integer | 0x3A - Negative integer | number_integer | 0x3B - Byte string | binary | 0x40..0x57 - Byte string | binary | 0x58 - Byte string | binary | 0x59 - Byte string | binary | 0x5A - Byte string | binary | 0x5B - UTF-8 string | string | 0x60..0x77 - UTF-8 string | string | 0x78 - UTF-8 string | string | 0x79 - UTF-8 string | string | 0x7A - UTF-8 string | string | 0x7B - UTF-8 string | string | 0x7F - array | array | 0x80..0x97 - array | array | 0x98 - array | array | 0x99 - array | array | 0x9A - array | array | 0x9B - array | array | 0x9F - map | object | 0xA0..0xB7 - map | object | 0xB8 - map | object | 0xB9 - map | object | 0xBA - map | object | 0xBB - map | object | 0xBF - False | `false` | 0xF4 - True | `true` | 0xF5 - Null | `null` | 0xF6 - Half-Precision Float | number_float | 0xF9 - Single-Precision Float | number_float | 0xFA - Double-Precision Float | number_float | 0xFB - - @warning The mapping is **incomplete** in the sense that not all CBOR - types can be converted to a JSON value. The following CBOR types - are not supported and will yield parse errors (parse_error.112): - - date/time (0xC0..0xC1) - - bignum (0xC2..0xC3) - - decimal fraction (0xC4) - - bigfloat (0xC5) - - expected conversions (0xD5..0xD7) - - simple values (0xE0..0xF3, 0xF8) - - undefined (0xF7) - - @warning CBOR allows map keys of any type, whereas JSON only allows - strings as keys in object values. Therefore, CBOR maps with keys - other than UTF-8 strings are rejected (parse_error.113). - - @note Any CBOR output created @ref to_cbor can be successfully parsed by - @ref from_cbor. - - @param[in] i an input in CBOR format convertible to an input adapter - @param[in] strict whether to expect the input to be consumed until EOF - (true by default) - @param[in] allow_exceptions whether to throw exceptions in case of a - parse error (optional, true by default) - @param[in] tag_handler how to treat CBOR tags (optional, error by default) - - @return deserialized JSON value; in case of a parse error and - @a allow_exceptions set to `false`, the return value will be - value_t::discarded. - - @throw parse_error.110 if the given input ends prematurely or the end of - file was not reached when @a strict was set to true - @throw parse_error.112 if unsupported features from CBOR were - used in the given input @a v or if the input is not valid CBOR - @throw parse_error.113 if a string was expected as map key, but not found - - @complexity Linear in the size of the input @a i. - - @liveexample{The example shows the deserialization of a byte vector in CBOR - format to a JSON value.,from_cbor} - - @sa http://cbor.io - @sa @ref to_cbor(const basic_json&) for the analogous serialization - @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the - related MessagePack format - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the - related UBJSON format - - @since version 2.0.9; parameter @a start_index since 2.1.1; changed to - consume input adapters, removed start_index parameter, and added - @a strict parameter since 3.0.0; added @a allow_exceptions parameter - since 3.2.0; added @a tag_handler parameter since 3.9.0. - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json from_cbor(InputType&& i, - const bool strict = true, - const bool allow_exceptions = true, - const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); - return res ? result : basic_json(value_t::discarded); - } - - /*! - @copydoc from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json from_cbor(IteratorType first, IteratorType last, - const bool strict = true, - const bool allow_exceptions = true, - const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); - return res ? result : basic_json(value_t::discarded); - } - - template - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_cbor(ptr, ptr + len)) - static basic_json from_cbor(const T* ptr, std::size_t len, - const bool strict = true, - const bool allow_exceptions = true, - const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) - { - return from_cbor(ptr, ptr + len, strict, allow_exceptions, tag_handler); - } - - - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_cbor(ptr, ptr + len)) - static basic_json from_cbor(detail::span_input_adapter&& i, - const bool strict = true, - const bool allow_exceptions = true, - const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = i.get(); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); - return res ? result : basic_json(value_t::discarded); - } - - /*! - @brief create a JSON value from an input in MessagePack format - - Deserializes a given input @a i to a JSON value using the MessagePack - serialization format. - - The library maps MessagePack types to JSON value types as follows: - - MessagePack type | JSON value type | first byte - ---------------- | --------------- | ---------- - positive fixint | number_unsigned | 0x00..0x7F - fixmap | object | 0x80..0x8F - fixarray | array | 0x90..0x9F - fixstr | string | 0xA0..0xBF - nil | `null` | 0xC0 - false | `false` | 0xC2 - true | `true` | 0xC3 - float 32 | number_float | 0xCA - float 64 | number_float | 0xCB - uint 8 | number_unsigned | 0xCC - uint 16 | number_unsigned | 0xCD - uint 32 | number_unsigned | 0xCE - uint 64 | number_unsigned | 0xCF - int 8 | number_integer | 0xD0 - int 16 | number_integer | 0xD1 - int 32 | number_integer | 0xD2 - int 64 | number_integer | 0xD3 - str 8 | string | 0xD9 - str 16 | string | 0xDA - str 32 | string | 0xDB - array 16 | array | 0xDC - array 32 | array | 0xDD - map 16 | object | 0xDE - map 32 | object | 0xDF - bin 8 | binary | 0xC4 - bin 16 | binary | 0xC5 - bin 32 | binary | 0xC6 - ext 8 | binary | 0xC7 - ext 16 | binary | 0xC8 - ext 32 | binary | 0xC9 - fixext 1 | binary | 0xD4 - fixext 2 | binary | 0xD5 - fixext 4 | binary | 0xD6 - fixext 8 | binary | 0xD7 - fixext 16 | binary | 0xD8 - negative fixint | number_integer | 0xE0-0xFF - - @note Any MessagePack output created @ref to_msgpack can be successfully - parsed by @ref from_msgpack. - - @param[in] i an input in MessagePack format convertible to an input - adapter - @param[in] strict whether to expect the input to be consumed until EOF - (true by default) - @param[in] allow_exceptions whether to throw exceptions in case of a - parse error (optional, true by default) - - @return deserialized JSON value; in case of a parse error and - @a allow_exceptions set to `false`, the return value will be - value_t::discarded. - - @throw parse_error.110 if the given input ends prematurely or the end of - file was not reached when @a strict was set to true - @throw parse_error.112 if unsupported features from MessagePack were - used in the given input @a i or if the input is not valid MessagePack - @throw parse_error.113 if a string was expected as map key, but not found - - @complexity Linear in the size of the input @a i. - - @liveexample{The example shows the deserialization of a byte vector in - MessagePack format to a JSON value.,from_msgpack} - - @sa http://msgpack.org - @sa @ref to_msgpack(const basic_json&) for the analogous serialization - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the - related CBOR format - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for - the related UBJSON format - @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for - the related BSON format - - @since version 2.0.9; parameter @a start_index since 2.1.1; changed to - consume input adapters, removed start_index parameter, and added - @a strict parameter since 3.0.0; added @a allow_exceptions parameter - since 3.2.0 - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json from_msgpack(InputType&& i, - const bool strict = true, - const bool allow_exceptions = true) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict); - return res ? result : basic_json(value_t::discarded); - } - - /*! - @copydoc from_msgpack(detail::input_adapter&&, const bool, const bool) - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json from_msgpack(IteratorType first, IteratorType last, - const bool strict = true, - const bool allow_exceptions = true) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict); - return res ? result : basic_json(value_t::discarded); - } - - - template - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_msgpack(ptr, ptr + len)) - static basic_json from_msgpack(const T* ptr, std::size_t len, - const bool strict = true, - const bool allow_exceptions = true) - { - return from_msgpack(ptr, ptr + len, strict, allow_exceptions); - } - - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_msgpack(ptr, ptr + len)) - static basic_json from_msgpack(detail::span_input_adapter&& i, - const bool strict = true, - const bool allow_exceptions = true) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = i.get(); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict); - return res ? result : basic_json(value_t::discarded); - } - - - /*! - @brief create a JSON value from an input in UBJSON format - - Deserializes a given input @a i to a JSON value using the UBJSON (Universal - Binary JSON) serialization format. - - The library maps UBJSON types to JSON value types as follows: - - UBJSON type | JSON value type | marker - ----------- | --------------------------------------- | ------ - no-op | *no value, next value is read* | `N` - null | `null` | `Z` - false | `false` | `F` - true | `true` | `T` - float32 | number_float | `d` - float64 | number_float | `D` - uint8 | number_unsigned | `U` - int8 | number_integer | `i` - int16 | number_integer | `I` - int32 | number_integer | `l` - int64 | number_integer | `L` - high-precision number | number_integer, number_unsigned, or number_float - depends on number string | 'H' - string | string | `S` - char | string | `C` - array | array (optimized values are supported) | `[` - object | object (optimized values are supported) | `{` - - @note The mapping is **complete** in the sense that any UBJSON value can - be converted to a JSON value. - - @param[in] i an input in UBJSON format convertible to an input adapter - @param[in] strict whether to expect the input to be consumed until EOF - (true by default) - @param[in] allow_exceptions whether to throw exceptions in case of a - parse error (optional, true by default) - - @return deserialized JSON value; in case of a parse error and - @a allow_exceptions set to `false`, the return value will be - value_t::discarded. - - @throw parse_error.110 if the given input ends prematurely or the end of - file was not reached when @a strict was set to true - @throw parse_error.112 if a parse error occurs - @throw parse_error.113 if a string could not be parsed successfully - - @complexity Linear in the size of the input @a i. - - @liveexample{The example shows the deserialization of a byte vector in - UBJSON format to a JSON value.,from_ubjson} - - @sa http://ubjson.org - @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the - analogous serialization - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the - related CBOR format - @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for - the related MessagePack format - @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for - the related BSON format - - @since version 3.1.0; added @a allow_exceptions parameter since 3.2.0 - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json from_ubjson(InputType&& i, - const bool strict = true, - const bool allow_exceptions = true) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict); - return res ? result : basic_json(value_t::discarded); - } - - /*! - @copydoc from_ubjson(detail::input_adapter&&, const bool, const bool) - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json from_ubjson(IteratorType first, IteratorType last, - const bool strict = true, - const bool allow_exceptions = true) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict); - return res ? result : basic_json(value_t::discarded); - } - - template - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_ubjson(ptr, ptr + len)) - static basic_json from_ubjson(const T* ptr, std::size_t len, - const bool strict = true, - const bool allow_exceptions = true) - { - return from_ubjson(ptr, ptr + len, strict, allow_exceptions); - } - - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_ubjson(ptr, ptr + len)) - static basic_json from_ubjson(detail::span_input_adapter&& i, - const bool strict = true, - const bool allow_exceptions = true) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = i.get(); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict); - return res ? result : basic_json(value_t::discarded); - } - - - /*! - @brief Create a JSON value from an input in BSON format - - Deserializes a given input @a i to a JSON value using the BSON (Binary JSON) - serialization format. - - The library maps BSON record types to JSON value types as follows: - - BSON type | BSON marker byte | JSON value type - --------------- | ---------------- | --------------------------- - double | 0x01 | number_float - string | 0x02 | string - document | 0x03 | object - array | 0x04 | array - binary | 0x05 | binary - undefined | 0x06 | still unsupported - ObjectId | 0x07 | still unsupported - boolean | 0x08 | boolean - UTC Date-Time | 0x09 | still unsupported - null | 0x0A | null - Regular Expr. | 0x0B | still unsupported - DB Pointer | 0x0C | still unsupported - JavaScript Code | 0x0D | still unsupported - Symbol | 0x0E | still unsupported - JavaScript Code | 0x0F | still unsupported - int32 | 0x10 | number_integer - Timestamp | 0x11 | still unsupported - 128-bit decimal float | 0x13 | still unsupported - Max Key | 0x7F | still unsupported - Min Key | 0xFF | still unsupported - - @warning The mapping is **incomplete**. The unsupported mappings - are indicated in the table above. - - @param[in] i an input in BSON format convertible to an input adapter - @param[in] strict whether to expect the input to be consumed until EOF - (true by default) - @param[in] allow_exceptions whether to throw exceptions in case of a - parse error (optional, true by default) - - @return deserialized JSON value; in case of a parse error and - @a allow_exceptions set to `false`, the return value will be - value_t::discarded. - - @throw parse_error.114 if an unsupported BSON record type is encountered - - @complexity Linear in the size of the input @a i. - - @liveexample{The example shows the deserialization of a byte vector in - BSON format to a JSON value.,from_bson} - - @sa http://bsonspec.org/spec.html - @sa @ref to_bson(const basic_json&) for the analogous serialization - @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the - related CBOR format - @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for - the related MessagePack format - @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the - related UBJSON format - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json from_bson(InputType&& i, - const bool strict = true, - const bool allow_exceptions = true) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict); - return res ? result : basic_json(value_t::discarded); - } - - /*! - @copydoc from_bson(detail::input_adapter&&, const bool, const bool) - */ - template - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json from_bson(IteratorType first, IteratorType last, - const bool strict = true, - const bool allow_exceptions = true) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict); - return res ? result : basic_json(value_t::discarded); - } - - template - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_bson(ptr, ptr + len)) - static basic_json from_bson(const T* ptr, std::size_t len, - const bool strict = true, - const bool allow_exceptions = true) - { - return from_bson(ptr, ptr + len, strict, allow_exceptions); - } - - JSON_HEDLEY_WARN_UNUSED_RESULT - JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_bson(ptr, ptr + len)) - static basic_json from_bson(detail::span_input_adapter&& i, - const bool strict = true, - const bool allow_exceptions = true) - { - basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); - auto ia = i.get(); - const bool res = binary_reader(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict); - return res ? result : basic_json(value_t::discarded); - } - /// @} - - ////////////////////////// - // JSON Pointer support // - ////////////////////////// - - /// @name JSON Pointer functions - /// @{ - - /*! - @brief access specified element via JSON Pointer - - Uses a JSON pointer to retrieve a reference to the respective JSON value. - No bound checking is performed. Similar to @ref operator[](const typename - object_t::key_type&), `null` values are created in arrays and objects if - necessary. - - In particular: - - If the JSON pointer points to an object key that does not exist, it - is created an filled with a `null` value before a reference to it - is returned. - - If the JSON pointer points to an array index that does not exist, it - is created an filled with a `null` value before a reference to it - is returned. All indices between the current maximum and the given - index are also filled with `null`. - - The special value `-` is treated as a synonym for the index past the - end. - - @param[in] ptr a JSON pointer - - @return reference to the element pointed to by @a ptr - - @complexity Constant. - - @throw parse_error.106 if an array index begins with '0' - @throw parse_error.109 if an array index was not a number - @throw out_of_range.404 if the JSON pointer can not be resolved - - @liveexample{The behavior is shown in the example.,operatorjson_pointer} - - @since version 2.0.0 - */ - reference operator[](const json_pointer& ptr) - { - return ptr.get_unchecked(this); - } - - /*! - @brief access specified element via JSON Pointer - - Uses a JSON pointer to retrieve a reference to the respective JSON value. - No bound checking is performed. The function does not change the JSON - value; no `null` values are created. In particular, the special value - `-` yields an exception. - - @param[in] ptr JSON pointer to the desired element - - @return const reference to the element pointed to by @a ptr - - @complexity Constant. - - @throw parse_error.106 if an array index begins with '0' - @throw parse_error.109 if an array index was not a number - @throw out_of_range.402 if the array index '-' is used - @throw out_of_range.404 if the JSON pointer can not be resolved - - @liveexample{The behavior is shown in the example.,operatorjson_pointer_const} - - @since version 2.0.0 - */ - const_reference operator[](const json_pointer& ptr) const - { - return ptr.get_unchecked(this); - } - - /*! - @brief access specified element via JSON Pointer - - Returns a reference to the element at with specified JSON pointer @a ptr, - with bounds checking. - - @param[in] ptr JSON pointer to the desired element - - @return reference to the element pointed to by @a ptr - - @throw parse_error.106 if an array index in the passed JSON pointer @a ptr - begins with '0'. See example below. - - @throw parse_error.109 if an array index in the passed JSON pointer @a ptr - is not a number. See example below. - - @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr - is out of range. See example below. - - @throw out_of_range.402 if the array index '-' is used in the passed JSON - pointer @a ptr. As `at` provides checked access (and no elements are - implicitly inserted), the index '-' is always invalid. See example below. - - @throw out_of_range.403 if the JSON pointer describes a key of an object - which cannot be found. See example below. - - @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved. - See example below. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes in the JSON value. - - @complexity Constant. - - @since version 2.0.0 - - @liveexample{The behavior is shown in the example.,at_json_pointer} - */ - reference at(const json_pointer& ptr) - { - return ptr.get_checked(this); - } - - /*! - @brief access specified element via JSON Pointer - - Returns a const reference to the element at with specified JSON pointer @a - ptr, with bounds checking. - - @param[in] ptr JSON pointer to the desired element - - @return reference to the element pointed to by @a ptr - - @throw parse_error.106 if an array index in the passed JSON pointer @a ptr - begins with '0'. See example below. - - @throw parse_error.109 if an array index in the passed JSON pointer @a ptr - is not a number. See example below. - - @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr - is out of range. See example below. - - @throw out_of_range.402 if the array index '-' is used in the passed JSON - pointer @a ptr. As `at` provides checked access (and no elements are - implicitly inserted), the index '-' is always invalid. See example below. - - @throw out_of_range.403 if the JSON pointer describes a key of an object - which cannot be found. See example below. - - @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved. - See example below. - - @exceptionsafety Strong guarantee: if an exception is thrown, there are no - changes in the JSON value. - - @complexity Constant. - - @since version 2.0.0 - - @liveexample{The behavior is shown in the example.,at_json_pointer_const} - */ - const_reference at(const json_pointer& ptr) const - { - return ptr.get_checked(this); - } - - /*! - @brief return flattened JSON value - - The function creates a JSON object whose keys are JSON pointers (see [RFC - 6901](https://tools.ietf.org/html/rfc6901)) and whose values are all - primitive. The original JSON value can be restored using the @ref - unflatten() function. - - @return an object that maps JSON pointers to primitive values - - @note Empty objects and arrays are flattened to `null` and will not be - reconstructed correctly by the @ref unflatten() function. - - @complexity Linear in the size the JSON value. - - @liveexample{The following code shows how a JSON object is flattened to an - object whose keys consist of JSON pointers.,flatten} - - @sa @ref unflatten() for the reverse function - - @since version 2.0.0 - */ - basic_json flatten() const - { - basic_json result(value_t::object); - json_pointer::flatten("", *this, result); - return result; - } - - /*! - @brief unflatten a previously flattened JSON value - - The function restores the arbitrary nesting of a JSON value that has been - flattened before using the @ref flatten() function. The JSON value must - meet certain constraints: - 1. The value must be an object. - 2. The keys must be JSON pointers (see - [RFC 6901](https://tools.ietf.org/html/rfc6901)) - 3. The mapped values must be primitive JSON types. - - @return the original JSON from a flattened version - - @note Empty objects and arrays are flattened by @ref flatten() to `null` - values and can not unflattened to their original type. Apart from - this example, for a JSON value `j`, the following is always true: - `j == j.flatten().unflatten()`. - - @complexity Linear in the size the JSON value. - - @throw type_error.314 if value is not an object - @throw type_error.315 if object values are not primitive - - @liveexample{The following code shows how a flattened JSON object is - unflattened into the original nested JSON object.,unflatten} - - @sa @ref flatten() for the reverse function - - @since version 2.0.0 - */ - basic_json unflatten() const - { - return json_pointer::unflatten(*this); - } - - /// @} - - ////////////////////////// - // JSON Patch functions // - ////////////////////////// - - /// @name JSON Patch functions - /// @{ - - /*! - @brief applies a JSON patch - - [JSON Patch](http://jsonpatch.com) defines a JSON document structure for - expressing a sequence of operations to apply to a JSON) document. With - this function, a JSON Patch is applied to the current JSON value by - executing all operations from the patch. - - @param[in] json_patch JSON patch document - @return patched document - - @note The application of a patch is atomic: Either all operations succeed - and the patched document is returned or an exception is thrown. In - any case, the original value is not changed: the patch is applied - to a copy of the value. - - @throw parse_error.104 if the JSON patch does not consist of an array of - objects - - @throw parse_error.105 if the JSON patch is malformed (e.g., mandatory - attributes are missing); example: `"operation add must have member path"` - - @throw out_of_range.401 if an array index is out of range. - - @throw out_of_range.403 if a JSON pointer inside the patch could not be - resolved successfully in the current JSON value; example: `"key baz not - found"` - - @throw out_of_range.405 if JSON pointer has no parent ("add", "remove", - "move") - - @throw other_error.501 if "test" operation was unsuccessful - - @complexity Linear in the size of the JSON value and the length of the - JSON patch. As usually only a fraction of the JSON value is affected by - the patch, the complexity can usually be neglected. - - @liveexample{The following code shows how a JSON patch is applied to a - value.,patch} - - @sa @ref diff -- create a JSON patch by comparing two JSON values - - @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) - @sa [RFC 6901 (JSON Pointer)](https://tools.ietf.org/html/rfc6901) - - @since version 2.0.0 - */ - basic_json patch(const basic_json& json_patch) const - { - // make a working copy to apply the patch to - basic_json result = *this; - - // the valid JSON Patch operations - enum class patch_operations {add, remove, replace, move, copy, test, invalid}; - - const auto get_op = [](const std::string & op) - { - if (op == "add") - { - return patch_operations::add; - } - if (op == "remove") - { - return patch_operations::remove; - } - if (op == "replace") - { - return patch_operations::replace; - } - if (op == "move") - { - return patch_operations::move; - } - if (op == "copy") - { - return patch_operations::copy; - } - if (op == "test") - { - return patch_operations::test; - } - - return patch_operations::invalid; - }; - - // wrapper for "add" operation; add value at ptr - const auto operation_add = [&result](json_pointer & ptr, basic_json val) - { - // adding to the root of the target document means replacing it - if (ptr.empty()) - { - result = val; - return; - } - - // make sure the top element of the pointer exists - json_pointer top_pointer = ptr.top(); - if (top_pointer != ptr) - { - result.at(top_pointer); - } - - // get reference to parent of JSON pointer ptr - const auto last_path = ptr.back(); - ptr.pop_back(); - basic_json& parent = result[ptr]; - - switch (parent.m_type) - { - case value_t::null: - case value_t::object: - { - // use operator[] to add value - parent[last_path] = val; - break; - } - - case value_t::array: - { - if (last_path == "-") - { - // special case: append to back - parent.push_back(val); - } - else - { - const auto idx = json_pointer::array_index(last_path); - if (JSON_HEDLEY_UNLIKELY(idx > parent.size())) - { - // avoid undefined behavior - JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range", parent)); - } - - // default case: insert add offset - parent.insert(parent.begin() + static_cast(idx), val); - } - break; - } - - // if there exists a parent it cannot be primitive - default: // LCOV_EXCL_LINE - JSON_ASSERT(false); // LCOV_EXCL_LINE - } - }; - - // wrapper for "remove" operation; remove value at ptr - const auto operation_remove = [this, &result](json_pointer & ptr) - { - // get reference to parent of JSON pointer ptr - const auto last_path = ptr.back(); - ptr.pop_back(); - basic_json& parent = result.at(ptr); - - // remove child - if (parent.is_object()) - { - // perform range check - auto it = parent.find(last_path); - if (JSON_HEDLEY_LIKELY(it != parent.end())) - { - parent.erase(it); - } - else - { - JSON_THROW(out_of_range::create(403, "key '" + last_path + "' not found", *this)); - } - } - else if (parent.is_array()) - { - // note erase performs range check - parent.erase(json_pointer::array_index(last_path)); - } - }; - - // type check: top level value must be an array - if (JSON_HEDLEY_UNLIKELY(!json_patch.is_array())) - { - JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects", json_patch)); - } - - // iterate and apply the operations - for (const auto& val : json_patch) - { - // wrapper to get a value for an operation - const auto get_value = [&val](const std::string & op, - const std::string & member, - bool string_type) -> basic_json & - { - // find value - auto it = val.m_value.object->find(member); - - // context-sensitive error message - const auto error_msg = (op == "op") ? "operation" : "operation '" + op + "'"; - - // check if desired value is present - if (JSON_HEDLEY_UNLIKELY(it == val.m_value.object->end())) - { - JSON_THROW(parse_error::create(105, 0, error_msg + " must have member '" + member + "'", val)); - } - - // check if result is of type string - if (JSON_HEDLEY_UNLIKELY(string_type && !it->second.is_string())) - { - JSON_THROW(parse_error::create(105, 0, error_msg + " must have string member '" + member + "'", val)); - } - - // no error: return value - return it->second; - }; - - // type check: every element of the array must be an object - if (JSON_HEDLEY_UNLIKELY(!val.is_object())) - { - JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects", val)); - } - - // collect mandatory members - const auto op = get_value("op", "op", true).template get(); - const auto path = get_value(op, "path", true).template get(); - json_pointer ptr(path); - - switch (get_op(op)) - { - case patch_operations::add: - { - operation_add(ptr, get_value("add", "value", false)); - break; - } - - case patch_operations::remove: - { - operation_remove(ptr); - break; - } - - case patch_operations::replace: - { - // the "path" location must exist - use at() - result.at(ptr) = get_value("replace", "value", false); - break; - } - - case patch_operations::move: - { - const auto from_path = get_value("move", "from", true).template get(); - json_pointer from_ptr(from_path); - - // the "from" location must exist - use at() - basic_json v = result.at(from_ptr); - - // The move operation is functionally identical to a - // "remove" operation on the "from" location, followed - // immediately by an "add" operation at the target - // location with the value that was just removed. - operation_remove(from_ptr); - operation_add(ptr, v); - break; - } - - case patch_operations::copy: - { - const auto from_path = get_value("copy", "from", true).template get(); - const json_pointer from_ptr(from_path); - - // the "from" location must exist - use at() - basic_json v = result.at(from_ptr); - - // The copy is functionally identical to an "add" - // operation at the target location using the value - // specified in the "from" member. - operation_add(ptr, v); - break; - } - - case patch_operations::test: - { - bool success = false; - JSON_TRY - { - // check if "value" matches the one at "path" - // the "path" location must exist - use at() - success = (result.at(ptr) == get_value("test", "value", false)); - } - JSON_INTERNAL_CATCH (out_of_range&) - { - // ignore out of range errors: success remains false - } - - // throw an exception if test fails - if (JSON_HEDLEY_UNLIKELY(!success)) - { - JSON_THROW(other_error::create(501, "unsuccessful: " + val.dump(), val)); - } - - break; - } - - default: - { - // op must be "add", "remove", "replace", "move", "copy", or - // "test" - JSON_THROW(parse_error::create(105, 0, "operation value '" + op + "' is invalid", val)); - } - } - } - - return result; - } - - /*! - @brief creates a diff as a JSON patch - - Creates a [JSON Patch](http://jsonpatch.com) so that value @a source can - be changed into the value @a target by calling @ref patch function. - - @invariant For two JSON values @a source and @a target, the following code - yields always `true`: - @code {.cpp} - source.patch(diff(source, target)) == target; - @endcode - - @note Currently, only `remove`, `add`, and `replace` operations are - generated. - - @param[in] source JSON value to compare from - @param[in] target JSON value to compare against - @param[in] path helper value to create JSON pointers - - @return a JSON patch to convert the @a source to @a target - - @complexity Linear in the lengths of @a source and @a target. - - @liveexample{The following code shows how a JSON patch is created as a - diff for two JSON values.,diff} - - @sa @ref patch -- apply a JSON patch - @sa @ref merge_patch -- apply a JSON Merge Patch - - @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902) - - @since version 2.0.0 - */ - JSON_HEDLEY_WARN_UNUSED_RESULT - static basic_json diff(const basic_json& source, const basic_json& target, - const std::string& path = "") - { - // the patch - basic_json result(value_t::array); - - // if the values are the same, return empty patch - if (source == target) - { - return result; - } - - if (source.type() != target.type()) - { - // different types: replace value - result.push_back( - { - {"op", "replace"}, {"path", path}, {"value", target} - }); - return result; - } - - switch (source.type()) - { - case value_t::array: - { - // first pass: traverse common elements - std::size_t i = 0; - while (i < source.size() && i < target.size()) - { - // recursive call to compare array values at index i - auto temp_diff = diff(source[i], target[i], path + "/" + std::to_string(i)); - result.insert(result.end(), temp_diff.begin(), temp_diff.end()); - ++i; - } - - // i now reached the end of at least one array - // in a second pass, traverse the remaining elements - - // remove my remaining elements - const auto end_index = static_cast(result.size()); - while (i < source.size()) - { - // add operations in reverse order to avoid invalid - // indices - result.insert(result.begin() + end_index, object( - { - {"op", "remove"}, - {"path", path + "/" + std::to_string(i)} - })); - ++i; - } - - // add other remaining elements - while (i < target.size()) - { - result.push_back( - { - {"op", "add"}, - {"path", path + "/-"}, - {"value", target[i]} - }); - ++i; - } - - break; - } - - case value_t::object: - { - // first pass: traverse this object's elements - for (auto it = source.cbegin(); it != source.cend(); ++it) - { - // escape the key name to be used in a JSON patch - const auto key = detail::escape(it.key()); - - if (target.find(it.key()) != target.end()) - { - // recursive call to compare object values at key it - auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key); - result.insert(result.end(), temp_diff.begin(), temp_diff.end()); - } - else - { - // found a key that is not in o -> remove it - result.push_back(object( - { - {"op", "remove"}, {"path", path + "/" + key} - })); - } - } - - // second pass: traverse other object's elements - for (auto it = target.cbegin(); it != target.cend(); ++it) - { - if (source.find(it.key()) == source.end()) - { - // found a key that is not in this -> add it - const auto key = detail::escape(it.key()); - result.push_back( - { - {"op", "add"}, {"path", path + "/" + key}, - {"value", it.value()} - }); - } - } - - break; - } - - default: - { - // both primitive type: replace value - result.push_back( - { - {"op", "replace"}, {"path", path}, {"value", target} - }); - break; - } - } - - return result; - } - - /// @} - - //////////////////////////////// - // JSON Merge Patch functions // - //////////////////////////////// - - /// @name JSON Merge Patch functions - /// @{ - - /*! - @brief applies a JSON Merge Patch - - The merge patch format is primarily intended for use with the HTTP PATCH - method as a means of describing a set of modifications to a target - resource's content. This function applies a merge patch to the current - JSON value. - - The function implements the following algorithm from Section 2 of - [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396): - - ``` - define MergePatch(Target, Patch): - if Patch is an Object: - if Target is not an Object: - Target = {} // Ignore the contents and set it to an empty Object - for each Name/Value pair in Patch: - if Value is null: - if Name exists in Target: - remove the Name/Value pair from Target - else: - Target[Name] = MergePatch(Target[Name], Value) - return Target - else: - return Patch - ``` - - Thereby, `Target` is the current object; that is, the patch is applied to - the current value. - - @param[in] apply_patch the patch to apply - - @complexity Linear in the lengths of @a patch. - - @liveexample{The following code shows how a JSON Merge Patch is applied to - a JSON document.,merge_patch} - - @sa @ref patch -- apply a JSON patch - @sa [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396) - - @since version 3.0.0 - */ - void merge_patch(const basic_json& apply_patch) - { - if (apply_patch.is_object()) - { - if (!is_object()) - { - *this = object(); - } - for (auto it = apply_patch.begin(); it != apply_patch.end(); ++it) - { - if (it.value().is_null()) - { - erase(it.key()); - } - else - { - operator[](it.key()).merge_patch(it.value()); - } - } - } - else - { - *this = apply_patch; - } - } - - /// @} -}; - -/*! -@brief user-defined to_string function for JSON values - -This function implements a user-defined to_string for JSON objects. - -@param[in] j a JSON object -@return a std::string object -*/ - -NLOHMANN_BASIC_JSON_TPL_DECLARATION -std::string to_string(const NLOHMANN_BASIC_JSON_TPL& j) -{ - return j.dump(); -} -} // namespace nlohmann - -/////////////////////// -// nonmember support // -/////////////////////// - -// specialization of std::swap, and std::hash -namespace std -{ - -/// hash value for JSON objects -template<> -struct hash -{ - /*! - @brief return a hash value for a JSON object - - @since version 1.0.0 - */ - std::size_t operator()(const nlohmann::json& j) const - { - return nlohmann::detail::hash(j); - } -}; - -/// specialization for std::less -/// @note: do not remove the space after '<', -/// see https://github.com/nlohmann/json/pull/679 -template<> -struct less<::nlohmann::detail::value_t> -{ - /*! - @brief compare two value_t enum values - @since version 3.0.0 - */ - bool operator()(nlohmann::detail::value_t lhs, - nlohmann::detail::value_t rhs) const noexcept - { - return nlohmann::detail::operator<(lhs, rhs); - } -}; - -// C++20 prohibit function specialization in the std namespace. -#ifndef JSON_HAS_CPP_20 - -/*! -@brief exchanges the values of two JSON objects - -@since version 1.0.0 -*/ -template<> -inline void swap(nlohmann::json& j1, nlohmann::json& j2) noexcept( - is_nothrow_move_constructible::value&& - is_nothrow_move_assignable::value - ) -{ - j1.swap(j2); -} - -#endif - -} // namespace std - -/*! -@brief user-defined string literal for JSON values - -This operator implements a user-defined string literal for JSON objects. It -can be used by adding `"_json"` to a string literal and returns a JSON object -if no parse error occurred. - -@param[in] s a string representation of a JSON object -@param[in] n the length of string @a s -@return a JSON object - -@since version 1.0.0 -*/ -JSON_HEDLEY_NON_NULL(1) -inline nlohmann::json operator "" _json(const char* s, std::size_t n) -{ - return nlohmann::json::parse(s, s + n); -} - -/*! -@brief user-defined string literal for JSON pointer - -This operator implements a user-defined string literal for JSON Pointers. It -can be used by adding `"_json_pointer"` to a string literal and returns a JSON pointer -object if no parse error occurred. - -@param[in] s a string representation of a JSON Pointer -@param[in] n the length of string @a s -@return a JSON pointer object - -@since version 2.0.0 -*/ -JSON_HEDLEY_NON_NULL(1) -inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std::size_t n) -{ - return nlohmann::json::json_pointer(std::string(s, n)); -} - -// #include - - -// restore GCC/clang diagnostic settings -#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) - #pragma GCC diagnostic pop -#endif -#if defined(__clang__) - #pragma GCC diagnostic pop -#endif - -// clean up -#undef JSON_ASSERT -#undef JSON_INTERNAL_CATCH -#undef JSON_CATCH -#undef JSON_THROW -#undef JSON_TRY -#undef JSON_PRIVATE_UNLESS_TESTED -#undef JSON_HAS_CPP_14 -#undef JSON_HAS_CPP_17 -#undef NLOHMANN_BASIC_JSON_TPL_DECLARATION -#undef NLOHMANN_BASIC_JSON_TPL -#undef JSON_EXPLICIT - -// #include -#undef JSON_HEDLEY_ALWAYS_INLINE -#undef JSON_HEDLEY_ARM_VERSION -#undef JSON_HEDLEY_ARM_VERSION_CHECK -#undef JSON_HEDLEY_ARRAY_PARAM -#undef JSON_HEDLEY_ASSUME -#undef JSON_HEDLEY_BEGIN_C_DECLS -#undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE -#undef JSON_HEDLEY_CLANG_HAS_BUILTIN -#undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE -#undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE -#undef JSON_HEDLEY_CLANG_HAS_EXTENSION -#undef JSON_HEDLEY_CLANG_HAS_FEATURE -#undef JSON_HEDLEY_CLANG_HAS_WARNING -#undef JSON_HEDLEY_COMPCERT_VERSION -#undef JSON_HEDLEY_COMPCERT_VERSION_CHECK -#undef JSON_HEDLEY_CONCAT -#undef JSON_HEDLEY_CONCAT3 -#undef JSON_HEDLEY_CONCAT3_EX -#undef JSON_HEDLEY_CONCAT_EX -#undef JSON_HEDLEY_CONST -#undef JSON_HEDLEY_CONSTEXPR -#undef JSON_HEDLEY_CONST_CAST -#undef JSON_HEDLEY_CPP_CAST -#undef JSON_HEDLEY_CRAY_VERSION -#undef JSON_HEDLEY_CRAY_VERSION_CHECK -#undef JSON_HEDLEY_C_DECL -#undef JSON_HEDLEY_DEPRECATED -#undef JSON_HEDLEY_DEPRECATED_FOR -#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL -#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ -#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED -#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES -#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS -#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION -#undef JSON_HEDLEY_DIAGNOSTIC_POP -#undef JSON_HEDLEY_DIAGNOSTIC_PUSH -#undef JSON_HEDLEY_DMC_VERSION -#undef JSON_HEDLEY_DMC_VERSION_CHECK -#undef JSON_HEDLEY_EMPTY_BASES -#undef JSON_HEDLEY_EMSCRIPTEN_VERSION -#undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK -#undef JSON_HEDLEY_END_C_DECLS -#undef JSON_HEDLEY_FLAGS -#undef JSON_HEDLEY_FLAGS_CAST -#undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE -#undef JSON_HEDLEY_GCC_HAS_BUILTIN -#undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE -#undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE -#undef JSON_HEDLEY_GCC_HAS_EXTENSION -#undef JSON_HEDLEY_GCC_HAS_FEATURE -#undef JSON_HEDLEY_GCC_HAS_WARNING -#undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK -#undef JSON_HEDLEY_GCC_VERSION -#undef JSON_HEDLEY_GCC_VERSION_CHECK -#undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE -#undef JSON_HEDLEY_GNUC_HAS_BUILTIN -#undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE -#undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE -#undef JSON_HEDLEY_GNUC_HAS_EXTENSION -#undef JSON_HEDLEY_GNUC_HAS_FEATURE -#undef JSON_HEDLEY_GNUC_HAS_WARNING -#undef JSON_HEDLEY_GNUC_VERSION -#undef JSON_HEDLEY_GNUC_VERSION_CHECK -#undef JSON_HEDLEY_HAS_ATTRIBUTE -#undef JSON_HEDLEY_HAS_BUILTIN -#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE -#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS -#undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE -#undef JSON_HEDLEY_HAS_EXTENSION -#undef JSON_HEDLEY_HAS_FEATURE -#undef JSON_HEDLEY_HAS_WARNING -#undef JSON_HEDLEY_IAR_VERSION -#undef JSON_HEDLEY_IAR_VERSION_CHECK -#undef JSON_HEDLEY_IBM_VERSION -#undef JSON_HEDLEY_IBM_VERSION_CHECK -#undef JSON_HEDLEY_IMPORT -#undef JSON_HEDLEY_INLINE -#undef JSON_HEDLEY_INTEL_CL_VERSION -#undef JSON_HEDLEY_INTEL_CL_VERSION_CHECK -#undef JSON_HEDLEY_INTEL_VERSION -#undef JSON_HEDLEY_INTEL_VERSION_CHECK -#undef JSON_HEDLEY_IS_CONSTANT -#undef JSON_HEDLEY_IS_CONSTEXPR_ -#undef JSON_HEDLEY_LIKELY -#undef JSON_HEDLEY_MALLOC -#undef JSON_HEDLEY_MCST_LCC_VERSION -#undef JSON_HEDLEY_MCST_LCC_VERSION_CHECK -#undef JSON_HEDLEY_MESSAGE -#undef JSON_HEDLEY_MSVC_VERSION -#undef JSON_HEDLEY_MSVC_VERSION_CHECK -#undef JSON_HEDLEY_NEVER_INLINE -#undef JSON_HEDLEY_NON_NULL -#undef JSON_HEDLEY_NO_ESCAPE -#undef JSON_HEDLEY_NO_RETURN -#undef JSON_HEDLEY_NO_THROW -#undef JSON_HEDLEY_NULL -#undef JSON_HEDLEY_PELLES_VERSION -#undef JSON_HEDLEY_PELLES_VERSION_CHECK -#undef JSON_HEDLEY_PGI_VERSION -#undef JSON_HEDLEY_PGI_VERSION_CHECK -#undef JSON_HEDLEY_PREDICT -#undef JSON_HEDLEY_PRINTF_FORMAT -#undef JSON_HEDLEY_PRIVATE -#undef JSON_HEDLEY_PUBLIC -#undef JSON_HEDLEY_PURE -#undef JSON_HEDLEY_REINTERPRET_CAST -#undef JSON_HEDLEY_REQUIRE -#undef JSON_HEDLEY_REQUIRE_CONSTEXPR -#undef JSON_HEDLEY_REQUIRE_MSG -#undef JSON_HEDLEY_RESTRICT -#undef JSON_HEDLEY_RETURNS_NON_NULL -#undef JSON_HEDLEY_SENTINEL -#undef JSON_HEDLEY_STATIC_ASSERT -#undef JSON_HEDLEY_STATIC_CAST -#undef JSON_HEDLEY_STRINGIFY -#undef JSON_HEDLEY_STRINGIFY_EX -#undef JSON_HEDLEY_SUNPRO_VERSION -#undef JSON_HEDLEY_SUNPRO_VERSION_CHECK -#undef JSON_HEDLEY_TINYC_VERSION -#undef JSON_HEDLEY_TINYC_VERSION_CHECK -#undef JSON_HEDLEY_TI_ARMCL_VERSION -#undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK -#undef JSON_HEDLEY_TI_CL2000_VERSION -#undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK -#undef JSON_HEDLEY_TI_CL430_VERSION -#undef JSON_HEDLEY_TI_CL430_VERSION_CHECK -#undef JSON_HEDLEY_TI_CL6X_VERSION -#undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK -#undef JSON_HEDLEY_TI_CL7X_VERSION -#undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK -#undef JSON_HEDLEY_TI_CLPRU_VERSION -#undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK -#undef JSON_HEDLEY_TI_VERSION -#undef JSON_HEDLEY_TI_VERSION_CHECK -#undef JSON_HEDLEY_UNAVAILABLE -#undef JSON_HEDLEY_UNLIKELY -#undef JSON_HEDLEY_UNPREDICTABLE -#undef JSON_HEDLEY_UNREACHABLE -#undef JSON_HEDLEY_UNREACHABLE_RETURN -#undef JSON_HEDLEY_VERSION -#undef JSON_HEDLEY_VERSION_DECODE_MAJOR -#undef JSON_HEDLEY_VERSION_DECODE_MINOR -#undef JSON_HEDLEY_VERSION_DECODE_REVISION -#undef JSON_HEDLEY_VERSION_ENCODE -#undef JSON_HEDLEY_WARNING -#undef JSON_HEDLEY_WARN_UNUSED_RESULT -#undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG -#undef JSON_HEDLEY_FALL_THROUGH - - - -#endif // INCLUDE_NLOHMANN_JSON_HPP_ From b0adf3e08e11c71fc5b2c594f491878a9ad18e22 Mon Sep 17 00:00:00 2001 From: Chin-Chang Yang <2770271+ChinChangYang@users.noreply.github.com> Date: Sun, 22 Mar 2026 22:04:51 +0800 Subject: [PATCH 34/34] Deduplicate abseil module list and document GLOB caveat in katagocoreml CMake Extract the abseil pkg-config module names into a shared variable to avoid maintaining the same list in two places. Add a comment on file(GLOB) noting that cmake must be re-run when proto files change. Co-Authored-By: Claude Opus 4.6 (1M context) --- cpp/external/katagocoreml/CMakeLists.txt | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/cpp/external/katagocoreml/CMakeLists.txt b/cpp/external/katagocoreml/CMakeLists.txt index 38a543b9e..a3cdc9e16 100644 --- a/cpp/external/katagocoreml/CMakeLists.txt +++ b/cpp/external/katagocoreml/CMakeLists.txt @@ -15,17 +15,14 @@ find_package(ZLIB REQUIRED) find_package(Protobuf REQUIRED) # Needed for protoc executable and include dirs find_package(PkgConfig REQUIRED) -pkg_check_modules(KATAGOCOREML_ABSEIL REQUIRED +set(KATAGOCOREML_ABSEIL_MODULES absl_base absl_log absl_log_internal_check_op absl_log_internal_message absl_hash absl_strings absl_status absl_statusor ) +pkg_check_modules(KATAGOCOREML_ABSEIL REQUIRED ${KATAGOCOREML_ABSEIL_MODULES}) # Export link flags to parent scope for the final executable -pkg_check_modules(KATAGOCOREML_ALL_DEPS REQUIRED - protobuf - absl_base absl_log absl_log_internal_check_op absl_log_internal_message - absl_hash absl_strings absl_status absl_statusor -) +pkg_check_modules(KATAGOCOREML_ALL_DEPS REQUIRED protobuf ${KATAGOCOREML_ABSEIL_MODULES}) set(KATAGOCOREML_DEP_LDFLAGS ${KATAGOCOREML_ALL_DEPS_LDFLAGS} PARENT_SCOPE) # ============================================================================ @@ -37,7 +34,8 @@ set(PROTO_DIR "${COREMLTOOLS_ROOT}/mlmodel/format") set(PROTO_GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/proto") file(MAKE_DIRECTORY ${PROTO_GENERATED_DIR}) -# Get all proto files +# Get all proto files (GLOB does not re-run on file additions; re-run cmake +# manually if proto files are added or removed). file(GLOB PROTO_FILES "${PROTO_DIR}/*.proto") # Generate C++ from all proto files