2
0

esplink.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. /*
  2. * Handle ring buffer link between ESP32 and FPGA
  3. * This implements the ESP32 (upstream/active/initiator/master) side.
  4. */
  5. #define MODULE "esplink"
  6. #include "common.h"
  7. #include "esplink.h"
  8. #include "fpga.h"
  9. #include "boardinfo_esp.h"
  10. struct esplink_ringbuf_ptrs {
  11. const volatile struct esplink_ptrs_dstr *d;
  12. volatile struct esplink_ptrs_ustr *u;
  13. };
  14. struct esplink_sem {
  15. SemaphoreHandle_t lock;
  16. };
  17. /*
  18. * Event group indicating which ring buffers are ready for I/O
  19. */
  20. EventGroupHandle_t esplink_filled; /* Data requested in "fill" ready */
  21. static struct {
  22. volatile unsigned int ready;
  23. bool shutdown;
  24. SemaphoreHandle_t mutex; /* Configuration mutex */
  25. struct esplink_ringbuf_ptrs rb;
  26. struct esplink_ringbuf_desc *desc;
  27. struct esplink_ringbuf_head head;
  28. struct esplink_sem sem[EL_RB_COUNT][2];
  29. size_t need[EL_RB_COUNT][2]; /* Amount of data requested for wakeup */
  30. } elink;
  31. /* Leave at least this much space, to preserve alignment */
  32. #define RINGBUF_UNUSABLE 4
  33. #define ELQUEUE_ALL_MASK ((1UL << (EL_RB_COUNT*2)) - 1)
  34. static void esplink_stop(void)
  35. {
  36. elink.ready = 0;
  37. /* No waking up waiters that only want online wakeups */
  38. xEventGroupClearBits(esplink_filled, ELWAIT_ONLINE);
  39. /* Wake up all the internal waiters or others with online = false */
  40. xEventGroupSetBits(esplink_filled, ELQUEUE_ALL_MASK);
  41. struct esplink_sem *s = &elink.sem[0][0];
  42. for (size_t i = 0; i < EL_RB_COUNT*2; i++) {
  43. xSemaphoreTake(s->lock, portMAX_DELAY);
  44. s++;
  45. }
  46. }
  47. /* This must be called and completed before any other esplink functions! */
  48. void esplink_init(void)
  49. {
  50. elink.mutex = null_check(xSemaphoreCreateMutex());
  51. esplink_filled = null_check(xEventGroupCreate());
  52. elink.desc = xmalloc_dma(EL_RB_COUNT * sizeof *elink.desc);
  53. elink.rb.u = xmalloc_dma(EL_RB_COUNT * sizeof *elink.rb.u);
  54. elink.rb.d = xmalloc_dma(EL_RB_COUNT * sizeof *elink.rb.d);
  55. struct esplink_sem *s = &elink.sem[0][0];
  56. for (size_t i = 0; i < EL_RB_COUNT*2; i++) {
  57. s->lock = null_check(xSemaphoreCreateBinary());
  58. s++;
  59. }
  60. xSemaphoreGive(elink.mutex);
  61. }
  62. /*
  63. * This needs to be called from the FPGA service thread once before
  64. * any esplink functions can be called from any other thread. After that,
  65. * those functions are safe to call (but may fail) regardless of
  66. * shutdown and/or reinitialization of the link.
  67. *
  68. * Call this function with head == NULL to either shut the link down
  69. * or to initialize the data structures (see above)
  70. */
  71. void esplink_start(const struct esplink_head *head)
  72. {
  73. struct fpga_iov iov[4];
  74. static unsigned int gen_count;
  75. static bool started = false;
  76. xSemaphoreTake(elink.mutex, portMAX_DELAY);
  77. if (elink.ready)
  78. esplink_stop();
  79. if (!head)
  80. goto shutdown;
  81. /* At this point elink.mutex and all the ->lock mutexes are held */
  82. elink.head = head->rb;
  83. elink.head.count = Min(head->rb.count, EL_RB_COUNT);
  84. size_t desc_size = sizeof(*elink.desc) * elink.head.count;
  85. size_t dptr_size = sizeof(*elink.rb.d) * elink.head.count;
  86. /* Set wakeup thresholds */
  87. for (size_t i = 0; i < elink.head.count; i++) {
  88. elink.need[i][0] = RINGBUF_UNUSABLE + 1;
  89. elink.need[i][1] = 1;
  90. }
  91. iov[0].cmd = FPGA_CMD_RD;
  92. iov[0].addr = elink.head.desc;
  93. iov[0].rdata = (void *)&elink.desc;
  94. iov[0].len = desc_size;
  95. iov[1].cmd = FPGA_CMD_RD | FPGA_CMD_ACK(EL_UIRQ_RINGBUF);
  96. iov[1].addr = elink.head.dstr;
  97. iov[1].rdata = (void *)elink.rb.d;
  98. iov[1].len = dptr_size;
  99. /* Write back the same pointer values -> all buffers currently empty */
  100. iov[2].cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_RINGBUF);
  101. iov[2].addr = elink.head.ustr;
  102. iov[2].wdata = (void *)elink.rb.d; /* rb.d is correct */
  103. iov[2].len = dptr_size;
  104. /* Update board_info on the FPGA */
  105. iov[3].cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_BOARDINFO);
  106. iov[3].addr = head->board_info;
  107. iov[3].wdata = &board_info;
  108. iov[3].len = sizeof board_info;
  109. fpga_iov(iov, ARRAY_SIZE(iov));
  110. memcpy((void *)elink.rb.u, (void *)elink.rb.d, dptr_size);
  111. elink.ready = ++gen_count;
  112. xEventGroupClearBits(esplink_filled, ELQUEUE_ALL_MASK);
  113. struct esplink_sem *s = &elink.sem[0][0];
  114. for (size_t i = 0; i < EL_RB_COUNT*2; i++) {
  115. xSemaphoreGive(s->lock);
  116. s++;
  117. }
  118. xEventGroupSetBits(esplink_filled, ELWAIT_ONLINE);
  119. shutdown:
  120. xSemaphoreGive(elink.mutex);
  121. }
  122. /*
  123. * Called from the FPGA service thread when a ring buffer
  124. * interrupt is received
  125. */
  126. void esplink_poll(void)
  127. {
  128. if (!elink.ready)
  129. return;
  130. xSemaphoreTake(elink.mutex, portMAX_DELAY);
  131. if (elink.ready) {
  132. const size_t count = elink.head.count;
  133. const size_t dptr_size = sizeof(*elink.rb.d) * count;
  134. struct fpga_iov iov[1];
  135. iov[0].cmd = FPGA_CMD_RD | FPGA_CMD_ACK(EL_UIRQ_RINGBUF);
  136. iov[0].addr = elink.head.dstr;
  137. iov[0].rdata = (void *)elink.rb.d;
  138. iov[0].len = dptr_size;
  139. fpga_iov(iov, 1);
  140. EventBits_t wakeup = 0;
  141. EventBits_t tbit = 1;
  142. for (size_t i = 0; i < count; i++) {
  143. size_t need;
  144. need = atomic(elink.need[i][0]);
  145. if (((elink.rb.d[i].tail - atomic(elink.rb.u[i].head) - 1) &
  146. (elink.desc[i].dstr.size-1)) >= need)
  147. wakeup |= tbit;
  148. tbit <<= 1;
  149. need = atomic(elink.need[i][1]);
  150. if (((elink.rb.d[i].head - atomic(elink.rb.u[i].tail)) &
  151. (elink.desc[i].ustr.size-1)) >= need)
  152. wakeup |= tbit;
  153. tbit <<= 1;
  154. }
  155. xEventGroupSetBits(esplink_filled, wakeup);
  156. }
  157. xSemaphoreGive(elink.mutex);
  158. }
  159. /* ------------------------------------------------------------------------- *
  160. * Functions that can be called from a non-service thread.
  161. * ------------------------------------------------------------------------- */
  162. /*
  163. * Write/read data to/from a ring buffer. Block if necessary until at least
  164. * <mintx>/<minrx> bytes have been send/received, otherwise return.
  165. *
  166. * Returns the number of bytes send/received.
  167. *
  168. * If <atomic> is set, only advance the head/tail pointer after the
  169. * whole transaction has been performed (must be no larger than half
  170. * the ring buffer size.) If <mintx>/<minrx> < <len> and the return
  171. * value is less than <len>, then the pointer will NOT have been advanced
  172. * and the transaction was aborted. A new call will restart from the
  173. * previous pointer location.
  174. *
  175. * The wakeup "need" value in esplink_wait_for() is set appropriately
  176. * for a transaction the same size, depending on if the <atomic> flag
  177. * was set or not. Thus, for an atomic transaction, if a shorter
  178. * atomic or non-atomic transaction is then desired, it may be
  179. * necessary to issue it without waiting for esplink_wait_for().
  180. */
  181. size_t esplink_write(enum esplink_ringbuf_user ring, const void *data,
  182. size_t len, size_t mintx, bool atomic)
  183. {
  184. const size_t unusable = RINGBUF_UNUSABLE;
  185. size_t tx = 0;
  186. if (!len || ring >= EL_RB_COUNT || !elink.ready)
  187. return tx;
  188. mintx = Min(mintx, len);
  189. const char *p = data;
  190. struct esplink_sem *sem = &elink.sem[ring][0];
  191. xSemaphoreTake(sem->lock, portMAX_DELAY);
  192. const unsigned int ready_gen = elink.ready;
  193. if (unlikely(!ready_gen || ring >= elink.head.count))
  194. goto bail;
  195. const struct esplink_ringbuf * const desc = &elink.head.desc[ring].dstr;
  196. const size_t size = desc->size;
  197. if (unlikely(atomic && len > (size >> 1)))
  198. goto bail;
  199. size_t * const hptr = (size_t *)&elink.rb.u[ring].head;
  200. const volatile size_t * const tptr = &elink.rb.d[ring].tail;
  201. size_t head = *hptr;
  202. const size_t need = (atomic ? len : 1) + unusable;
  203. atomic(elink.need[ring][0]) = need; /* Minimum wakeup */
  204. char * const start = desc->start;
  205. while (elink.ready == ready_gen) {
  206. xEventGroupClearBits(esplink_filled, ELQUEUE_DL(ring));
  207. const size_t tail = *tptr;
  208. size_t space = (tail-head) & (size-1);
  209. if (!len) {
  210. if (space >= need)
  211. xEventGroupSetBits(esplink_filled, ELQUEUE_DL(ring));
  212. break;
  213. }
  214. if (space < need) {
  215. if (tx >= mintx)
  216. break;
  217. esplink_wait_for(ELQUEUE_DL(ring), false);
  218. continue;
  219. }
  220. size_t chunk = Min(space - unusable, len);
  221. struct fpga_iov iov[4], *iv = iov;
  222. while (chunk) {
  223. iv->cmd = FPGA_CMD_WR;
  224. iv->addr = start + head;
  225. iv->wdata = p;
  226. iv->len = Min(chunk, size - head);
  227. p += iv->len;
  228. tx += iv->len;
  229. len -= iv->len;
  230. chunk -= iv->len;
  231. head = (head+iv->len) & (size-1);
  232. iv++;
  233. }
  234. if (!len || !atomic) {
  235. /* Commit the data to the ring buffer */
  236. elink.rb.u[ring].head = head;
  237. iv->cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_RINGBUF);
  238. iv->addr = &elink.head.dstr[ring].head;
  239. iv->wdata = hptr;
  240. iv->len = sizeof *hptr;
  241. iv++;
  242. }
  243. /* Optimistically poll for tail pointer advance */
  244. iv->cmd = FPGA_CMD_RD;
  245. iv->addr = &elink.head.dstr[ring].tail;
  246. iv->rdata = (void *)tptr;
  247. iv->len = sizeof *tptr;
  248. iv++;
  249. fpga_iov(iov, iv - iov);
  250. }
  251. bail:
  252. xSemaphoreGive(sem->lock);
  253. return tx;
  254. }
  255. size_t esplink_read(enum esplink_ringbuf_user ring, void *data,
  256. size_t len, size_t minrx, bool atomic)
  257. {
  258. size_t rx = 0;
  259. if (!len || ring >= EL_RB_COUNT || !elink.ready)
  260. return rx;
  261. minrx = Min(minrx, len);
  262. char *p = data;
  263. struct esplink_sem *sem = &elink.sem[ring][1];
  264. xSemaphoreTake(sem->lock, portMAX_DELAY);
  265. const unsigned int ready_gen = elink.ready;
  266. if (unlikely(!ready_gen || ring >= elink.head.count))
  267. goto bail;
  268. const struct esplink_ringbuf * const desc = &elink.head.desc[ring].ustr;
  269. const size_t size = desc->size;
  270. if (unlikely(atomic && len > (size >> 1)))
  271. goto bail;
  272. size_t * const tptr = (size_t *)&elink.rb.u[ring].tail;
  273. const volatile size_t * const hptr = &elink.rb.d[ring].head;
  274. size_t tail = *tptr;
  275. const size_t need = atomic ? len : 1;
  276. atomic(elink.need[ring][1]) = need; /* Minimum wakeup */
  277. char * const start = desc->start;
  278. while (elink.ready == ready_gen) {
  279. xEventGroupClearBits(esplink_filled, ELQUEUE_UL(ring));
  280. const size_t head = *hptr;
  281. size_t avail = (head-tail) & (size-1);
  282. if (!len) {
  283. if (avail >= need)
  284. xEventGroupSetBits(esplink_filled, ELQUEUE_UL(ring));
  285. break;
  286. }
  287. if (avail < need) {
  288. if (rx >= minrx)
  289. break;
  290. esplink_wait_for(ELQUEUE_UL(ring), false);
  291. continue;
  292. }
  293. size_t chunk = Min(avail, len);
  294. struct fpga_iov iov[4], *iv = iov;
  295. while (chunk) {
  296. iv->cmd = FPGA_CMD_RD;
  297. iv->addr = start + tail;
  298. iv->rdata = p;
  299. iv->len = Min(chunk, size - tail);
  300. p += iv->len;
  301. rx += iv->len;
  302. len -= iv->len;
  303. chunk -= iv->len;
  304. tail = (tail+iv->len) & (size-1);
  305. iv++;
  306. }
  307. if (!len || !atomic) {
  308. /* Consume the data from the ring buffer */
  309. elink.rb.u[ring].tail = tail;
  310. iv->cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_RINGBUF);
  311. iv->addr = &elink.head.dstr[ring].tail;
  312. iv->wdata = tptr;
  313. iv->len = sizeof *tptr;
  314. iv++;
  315. }
  316. /* Optimistically poll for head pointer advance */
  317. iv->cmd = FPGA_CMD_RD;
  318. iv->addr = &elink.head.dstr[ring].head;
  319. iv->rdata = (void *)hptr;
  320. iv->len = sizeof *hptr;
  321. iv++;
  322. fpga_iov(iov, iv - iov);
  323. }
  324. bail:
  325. xSemaphoreGive(sem->lock);
  326. return rx;
  327. }