used chunked data transfer
This commit is contained in:
parent
ec66df4549
commit
3710591edd
@ -31,7 +31,7 @@ bool WebApiDatabaseClass::write(float energy)
|
|||||||
|
|
||||||
// LittleFS.remove(DATABASE_FILENAME);
|
// LittleFS.remove(DATABASE_FILENAME);
|
||||||
|
|
||||||
//MessageOutput.println(energy, 6);
|
// MessageOutput.println(energy, 6);
|
||||||
|
|
||||||
struct tm timeinfo;
|
struct tm timeinfo;
|
||||||
if (!getLocalTime(&timeinfo, 5)) {
|
if (!getLocalTime(&timeinfo, 5)) {
|
||||||
@ -72,7 +72,7 @@ bool WebApiDatabaseClass::write(float energy)
|
|||||||
}
|
}
|
||||||
f.write((const uint8_t*)&d, sizeof(Data));
|
f.write((const uint8_t*)&d, sizeof(Data));
|
||||||
f.close();
|
f.close();
|
||||||
//MessageOutput.println("Write data point.");
|
// MessageOutput.println("Write data point.");
|
||||||
return (true);
|
return (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,34 +82,52 @@ void WebApiDatabaseClass::onDatabase(AsyncWebServerRequest* request)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
AsyncWebServerResponse* response = request->beginChunkedResponse("application/json",
|
||||||
File f = LittleFS.open(DATABASE_FILENAME, "r", false);
|
[](uint8_t* buffer, size_t maxLen, size_t index) -> size_t {
|
||||||
if (!f) {
|
static bool first = true;
|
||||||
MessageOutput.println("Failed to read database.");
|
static bool last = false;
|
||||||
request->send(400, "text/plain", "Failed to read database.");
|
static File f;
|
||||||
return;
|
uint8_t* pr = buffer;
|
||||||
}
|
uint8_t* pre = pr + maxLen - 30;
|
||||||
|
size_t r;
|
||||||
struct Data d;
|
struct Data d;
|
||||||
|
|
||||||
AsyncJsonResponse* response = new AsyncJsonResponse(true, 40000U);
|
if (first) {
|
||||||
JsonArray root = response->getRoot();
|
f = LittleFS.open(DATABASE_FILENAME, "r", false);
|
||||||
|
if (!f) {
|
||||||
while (f.read((uint8_t*)&d, sizeof(Data))) { // read from database
|
return (0);
|
||||||
JsonArray nested = root.createNestedArray(); // create new nested array and copy data to array
|
|
||||||
nested.add(d.tm_year);
|
|
||||||
nested.add(d.tm_mon);
|
|
||||||
nested.add(d.tm_mday);
|
|
||||||
nested.add(d.tm_hour);
|
|
||||||
nested.add(d.energy);
|
|
||||||
}
|
}
|
||||||
|
*pr++ = '[';
|
||||||
|
}
|
||||||
|
while(true) {
|
||||||
|
r = f.read((uint8_t*)&d, sizeof(Data)); // read from database
|
||||||
|
if (r <= 0) {
|
||||||
|
if (last) {
|
||||||
f.close();
|
f.close();
|
||||||
response->setLength();
|
first = true;
|
||||||
request->send(response);
|
last = false;
|
||||||
} catch (std::bad_alloc& bad_alloc) {
|
return (0); // end transmission
|
||||||
MessageOutput.printf("Call to /api/database temporarely out of resources. Reason: \"%s\".\r\n", bad_alloc.what());
|
|
||||||
WebApi.sendTooManyRequests(request);
|
|
||||||
}
|
}
|
||||||
|
last = true;
|
||||||
|
*pr++ = ']';
|
||||||
|
return (pr - buffer); // last chunk
|
||||||
|
}
|
||||||
|
if (first) {
|
||||||
|
first = false;
|
||||||
|
} else {
|
||||||
|
*pr++ = ',';
|
||||||
|
}
|
||||||
|
int len = sprintf((char*)pr, "[%d,%d,%d,%d,%f]",
|
||||||
|
d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.energy);
|
||||||
|
if (len >= 0) {
|
||||||
|
pr += len;
|
||||||
|
}
|
||||||
|
if (pr >= pre)
|
||||||
|
return (pr - buffer); // buffer full, return number of chars
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
request->send(response);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* JS
|
/* JS
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user