2016-09-16 10:10:18 +02:00
|
|
|
/**
|
|
|
|
This file encapsulates the SDK-based task handling for the NodeMCU Lua firmware.
|
|
|
|
*/
|
|
|
|
#include "task/task.h"
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
2016-09-20 05:35:56 +02:00
|
|
|
#include "freertos/FreeRTOS.h"
|
2016-09-16 10:10:18 +02:00
|
|
|
#include "freertos/queue.h"
|
|
|
|
#include "freertos/semphr.h"
|
|
|
|
|
|
|
|
#define TASK_HANDLE_MONIKER 0x68680000
|
|
|
|
#define TASK_HANDLE_MASK 0xFFF80000
|
|
|
|
#define TASK_HANDLE_UNMASK (~TASK_HANDLE_MASK)
|
|
|
|
#define TASK_HANDLE_ALLOCATION_BRICK 4 // must be a power of 2
|
|
|
|
|
|
|
|
#define CHECK(p,v,msg) if (!(p)) { NODE_DBG ( msg ); return (v); }
|
|
|
|
|
|
|
|
#ifndef NODE_DBG
|
|
|
|
# define NODE_DBG(...) do{}while(0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
task_handle_t sig;
|
|
|
|
task_param_t par;
|
|
|
|
} task_event_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Private arrays to hold the 3 event task queues and the dispatch callbacks
|
|
|
|
*/
|
|
|
|
static xQueueHandle task_Q[TASK_PRIORITY_COUNT];
|
|
|
|
|
|
|
|
/* Rather than using a QueueSet (which requires queues to be empty when created)
|
|
|
|
* we use a binary semaphore to unblock the pump whenever something is posted */
|
|
|
|
static xSemaphoreHandle pending;
|
|
|
|
|
|
|
|
static task_callback_t *task_func;
|
|
|
|
static int task_count;
|
|
|
|
|
|
|
|
|
|
|
|
task_handle_t task_get_id(task_callback_t t) {
|
|
|
|
if ( (task_count & (TASK_HANDLE_ALLOCATION_BRICK - 1)) == 0 ) {
|
|
|
|
/* With a brick size of 4 this branch is taken at 0, 4, 8 ... and the new size is +4 */
|
|
|
|
task_func =(task_callback_t *)realloc(
|
|
|
|
task_func,
|
|
|
|
sizeof(task_callback_t)*(task_count+TASK_HANDLE_ALLOCATION_BRICK));
|
|
|
|
|
|
|
|
CHECK(task_func, 0 , "Malloc failure in task_get_id");
|
|
|
|
memset (task_func+task_count, 0, sizeof(task_callback_t)*TASK_HANDLE_ALLOCATION_BRICK);
|
|
|
|
}
|
|
|
|
|
|
|
|
task_func[task_count] = t;
|
|
|
|
return TASK_HANDLE_MONIKER | task_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-26 07:36:57 +02:00
|
|
|
bool IRAM_ATTR task_post(task_prio_t priority, task_handle_t handle, task_param_t param)
|
2016-09-16 10:10:18 +02:00
|
|
|
{
|
|
|
|
if (priority >= TASK_PRIORITY_COUNT ||
|
|
|
|
(handle & TASK_HANDLE_MASK) != TASK_HANDLE_MONIKER)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
task_event_t ev = { handle, param };
|
2021-07-26 07:36:57 +02:00
|
|
|
bool res = (pdPASS == xQueueSendToBack(task_Q[priority], &ev, 0));
|
2016-09-16 10:10:18 +02:00
|
|
|
|
2021-07-26 07:36:57 +02:00
|
|
|
xSemaphoreGive(pending);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool IRAM_ATTR task_post_isr(task_prio_t priority, task_handle_t handle, task_param_t param)
|
|
|
|
{
|
|
|
|
if (priority >= TASK_PRIORITY_COUNT ||
|
|
|
|
(handle & TASK_HANDLE_MASK) != TASK_HANDLE_MONIKER)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
task_event_t ev = { handle, param };
|
|
|
|
bool res = (pdPASS == xQueueSendToBackFromISR (task_Q[priority], &ev, NULL));
|
|
|
|
|
|
|
|
BaseType_t woken = pdFALSE;
|
|
|
|
xSemaphoreGiveFromISR (pending, &woken);
|
|
|
|
portYIELD_FROM_ISR(woken);
|
2016-09-16 10:10:18 +02:00
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static bool next_event (task_event_t *ev, task_prio_t *prio)
|
|
|
|
{
|
|
|
|
for (task_prio_t pr = TASK_PRIORITY_COUNT; pr != TASK_PRIORITY_LOW; --pr)
|
|
|
|
{
|
|
|
|
task_prio_t p = pr -1;
|
|
|
|
if (task_Q[p] && xQueueReceive (task_Q[p], ev, 0) == pdTRUE)
|
|
|
|
{
|
|
|
|
*prio = p;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false; // no events queued
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void dispatch (task_event_t *e, uint8_t prio) {
|
|
|
|
task_handle_t handle = e->sig;
|
|
|
|
if ( (handle & TASK_HANDLE_MASK) == TASK_HANDLE_MONIKER) {
|
|
|
|
uint16_t entry = (handle & TASK_HANDLE_UNMASK);
|
|
|
|
if ( task_func && entry < task_count ){
|
|
|
|
/* call the registered task handler with the specified parameter and priority */
|
|
|
|
task_func[entry](e->par, prio);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Invalid signals are ignored */
|
|
|
|
NODE_DBG ( "Invalid signal issued: %08x", handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Fix net module data loss & RTOS task unsafety (#2829)
To avoid races between the lwIP callbacks (lwIP RTOS task) and the Lua
handlers (LVM RTOS task), the data flow and ownership has been simplified
and cleaned up.
lwIP callbacks now have no visibility of the userdata struct. They are
limited to creating small event objects and task_post()ing them over
to the LVM "thread", passing ownership in doing so. The shared identifier
then becomes the struct netconn*.
On the LVM side, we keep a linked list of active userdata objects. This
allows us to retrieve the correct userdata when we get an event with
a netconn pointer. Because this list is only ever used within the LVM
task, no locking is necessary.
The old approach of stashing a userdata pointer into the 'socket' field
on the netconn has been removed entirely, as this was both not
thread/RTOS-task safe, and also interfered with the IDFs internal use
of the socket field (even when using only the netconn layer). As an
added benefit, this removed the need for all the SYS_ARCH_PROTECT()
locking stuff.
The need to track receive events before the corresponding userdata object
has been established has been removed by virtue of not reordering the
"accept" and the "recv" events any more (previously accepts were posted
with medium priority, while the receives where high priority, leading
to the observed reordering and associated headaches).
The workaround for IDF issue 784 has been removed as it is now not needed
and is in fact directly harmful as it results in a double-free. Yay for
getting rid of old workarounds!
DNS resolution code paths were merged for the two instances of "socket"
initiated resolves (connect/dns functions).
Also fixed an instance of using a stack variable for receiving the resolved
IP address, with said variable going out of scope before the DNS resolution
necessarily completed (hello, memory corruption!).
Where possible, moved to use the Lua allocator rather than plain malloc.
Finally, the NodeMCU task posting mechanism got a polish and an adjustment.
Given all the Bad(tm) that tends to happen if something fails task posting,
I went through a couple of iterations on how to avoid that. Alas, the
preferred solution of blocking non-LVM RTOS tasks until a slot is free
turned out to not be viable, as this easily resulted in deadlocks with the
lwIP stack. After much deliberation I settled on increasing the number of
available queue slots for the task_post() mechanism, but in the interest
of user control also now made it user configurable via Kconfig.
2019-07-14 23:20:20 +02:00
|
|
|
void task_init (void)
|
|
|
|
{
|
|
|
|
pending = xSemaphoreCreateBinary ();
|
|
|
|
|
|
|
|
// Due to the nature of the RTOS, if we ever fail to do a task_post, we're
|
|
|
|
// up the proverbial creek without a paddle. Each queue slot only costs us
|
|
|
|
// 8 bytes, so it's a worthwhile trade-off to reserve a bit of RAM for this
|
|
|
|
// purpose. Trying to work around the issue in the places which post ends
|
|
|
|
// up being *at least* as bad, so better take the hit where the benefit
|
|
|
|
// is shared. That said, we let the user have the final say.
|
|
|
|
const size_t q_mem = CONFIG_NODEMCU_TASK_SLOT_MEMORY;
|
|
|
|
|
|
|
|
// Rather than blindly sizing everything the same, we try to be a little
|
|
|
|
// bit aware of the typical uses of the queues.
|
|
|
|
const size_t slice = q_mem / (10 * sizeof(task_event_t));
|
|
|
|
static const int qlens[] = { 1 * slice, 5 * slice, 4 * slice };
|
|
|
|
|
|
|
|
for (task_prio_t p = TASK_PRIORITY_LOW; p != TASK_PRIORITY_COUNT; ++p)
|
|
|
|
task_Q[p] = xQueueCreate (qlens[p], sizeof (task_event_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-09-16 10:10:18 +02:00
|
|
|
void task_pump_messages (void)
|
|
|
|
{
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
task_event_t ev;
|
|
|
|
task_prio_t prio;
|
|
|
|
if (next_event (&ev, &prio))
|
|
|
|
dispatch (&ev, prio);
|
|
|
|
else
|
|
|
|
xSemaphoreTake (pending, portMAX_DELAY);
|
|
|
|
}
|
|
|
|
}
|