esplink.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /*
  2. * Handle ring buffer link between ESP32 and FPGA
  3. * This implements the ESP32 (upstream/active/initiator/master) side.
  4. */
  5. #define MODULE "esplink"
  6. #include "common.h"
  7. #include "esplink.h"
  8. #include "fpga.h"
  9. #include "boardinfo_esp.h"
  10. struct esplink_ringbuf_ptrs {
  11. const volatile struct esplink_ptrs_dstr *d;
  12. volatile struct esplink_ptrs_ustr *u;
  13. };
  14. struct esplink_sem {
  15. SemaphoreHandle_t lock;
  16. };
  17. /*
  18. * Event group indicating which ring buffers are ready for I/O
  19. */
  20. EventGroupHandle_t esplink_filled; /* Data requested in "fill" ready */
  21. static struct {
  22. volatile unsigned int ready;
  23. bool shutdown;
  24. SemaphoreHandle_t mutex; /* Configuration mutex */
  25. struct esplink_ringbuf_ptrs rb;
  26. struct esplink_ringbuf_desc *desc;
  27. struct esplink_ringbuf_head head;
  28. struct esplink_sem sem[EL_RB_COUNT][2];
  29. size_t need[EL_RB_COUNT][2]; /* Amount of data requested for wakeup */
  30. } elink;
  31. /* Leave at least this much space, to preserve alignment */
  32. #define RINGBUF_UNUSABLE 4
  33. #define ELQUEUE_ALL_MASK ((1UL << (EL_RB_COUNT*2)) - 1)
  34. static void esplink_stop(void)
  35. {
  36. elink.ready = 0;
  37. /* No waking up waiters that only want online wakeups */
  38. xEventGroupClearBits(esplink_filled, ELWAIT_ONLINE);
  39. /* Wake up all the internal waiters or others with online = false */
  40. xEventGroupSetBits(esplink_filled, ELQUEUE_ALL_MASK);
  41. struct esplink_sem *s = &elink.sem[0][0];
  42. for (size_t i = 0; i < EL_RB_COUNT*2; i++) {
  43. xSemaphoreTake(s->lock, portMAX_DELAY);
  44. s++;
  45. }
  46. }
  47. /* This must be called and completed before any other esplink functions! */
  48. void esplink_init(void)
  49. {
  50. elink.mutex = null_check(xSemaphoreCreateMutex());
  51. esplink_filled = null_check(xEventGroupCreate());
  52. elink.desc = xmalloc_dma(EL_RB_COUNT * sizeof *elink.desc);
  53. elink.rb.u = xmalloc_dma(EL_RB_COUNT * sizeof *elink.rb.u);
  54. elink.rb.d = xmalloc_dma(EL_RB_COUNT * sizeof *elink.rb.d);
  55. struct esplink_sem *s = &elink.sem[0][0];
  56. for (size_t i = 0; i < EL_RB_COUNT*2; i++) {
  57. s->lock = null_check(xSemaphoreCreateBinary());
  58. s++;
  59. }
  60. xSemaphoreGive(elink.mutex);
  61. }
  62. /*
  63. * This needs to be called from the FPGA service thread once before
  64. * any esplink functions can be called from any other thread. After that,
  65. * those functions are safe to call (but may fail) regardless of
  66. * shutdown and/or reinitialization of the link.
  67. *
  68. * Call this function with head == NULL to either shut the link down
  69. * or to initialize the data structures (see above)
  70. */
  71. void esplink_start(const struct esplink_head *head)
  72. {
  73. struct fpga_iov iov[4];
  74. static unsigned int gen_count;
  75. xSemaphoreTake(elink.mutex, portMAX_DELAY);
  76. if (elink.ready)
  77. esplink_stop();
  78. if (!head)
  79. goto shutdown;
  80. /* At this point elink.mutex and all the ->lock mutexes are held */
  81. elink.head = head->rb;
  82. elink.head.count = Min(head->rb.count, EL_RB_COUNT);
  83. size_t desc_size = sizeof(*elink.desc) * elink.head.count;
  84. size_t dptr_size = sizeof(*elink.rb.d) * elink.head.count;
  85. /* Set wakeup thresholds */
  86. for (size_t i = 0; i < elink.head.count; i++) {
  87. elink.need[i][0] = RINGBUF_UNUSABLE + 1;
  88. elink.need[i][1] = 1;
  89. }
  90. iov[0].cmd = FPGA_CMD_RD;
  91. iov[0].addr = elink.head.desc;
  92. iov[0].rdata = (void *)&elink.desc;
  93. iov[0].len = desc_size;
  94. iov[1].cmd = FPGA_CMD_RD | FPGA_CMD_ACK(EL_UIRQ_RINGBUF);
  95. iov[1].addr = elink.head.dstr;
  96. iov[1].rdata = (void *)elink.rb.d;
  97. iov[1].len = dptr_size;
  98. /* Write back the same pointer values -> all buffers currently empty */
  99. iov[2].cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_RINGBUF);
  100. iov[2].addr = elink.head.ustr;
  101. iov[2].wdata = (void *)elink.rb.d; /* rb.d is correct */
  102. iov[2].len = dptr_size;
  103. /* Update board_info on the FPGA */
  104. iov[3].cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_BOARDINFO);
  105. iov[3].addr = head->board_info;
  106. iov[3].wdata = &board_info;
  107. iov[3].len = sizeof board_info;
  108. fpga_iov(iov, ARRAY_SIZE(iov));
  109. memcpy((void *)elink.rb.u, (void *)elink.rb.d, dptr_size);
  110. elink.ready = ++gen_count;
  111. xEventGroupClearBits(esplink_filled, ELQUEUE_ALL_MASK);
  112. struct esplink_sem *s = &elink.sem[0][0];
  113. for (size_t i = 0; i < EL_RB_COUNT*2; i++) {
  114. xSemaphoreGive(s->lock);
  115. s++;
  116. }
  117. xEventGroupSetBits(esplink_filled, ELWAIT_ONLINE);
  118. shutdown:
  119. xSemaphoreGive(elink.mutex);
  120. }
  121. /*
  122. * Called from the FPGA service thread when a ring buffer
  123. * interrupt is received
  124. */
  125. void esplink_poll(void)
  126. {
  127. if (!elink.ready)
  128. return;
  129. xSemaphoreTake(elink.mutex, portMAX_DELAY);
  130. if (elink.ready) {
  131. const size_t count = elink.head.count;
  132. const size_t dptr_size = sizeof(*elink.rb.d) * count;
  133. struct fpga_iov iov[1];
  134. iov[0].cmd = FPGA_CMD_RD | FPGA_CMD_ACK(EL_UIRQ_RINGBUF);
  135. iov[0].addr = elink.head.dstr;
  136. iov[0].rdata = (void *)elink.rb.d;
  137. iov[0].len = dptr_size;
  138. fpga_iov(iov, 1);
  139. EventBits_t wakeup = 0;
  140. EventBits_t tbit = 1;
  141. for (size_t i = 0; i < count; i++) {
  142. size_t need;
  143. need = atomic(elink.need[i][0]);
  144. if (((elink.rb.d[i].tail - atomic(elink.rb.u[i].head) - 1) &
  145. (elink.desc[i].dstr.size-1)) >= need)
  146. wakeup |= tbit;
  147. tbit <<= 1;
  148. need = atomic(elink.need[i][1]);
  149. if (((elink.rb.d[i].head - atomic(elink.rb.u[i].tail)) &
  150. (elink.desc[i].ustr.size-1)) >= need)
  151. wakeup |= tbit;
  152. tbit <<= 1;
  153. }
  154. xEventGroupSetBits(esplink_filled, wakeup);
  155. }
  156. xSemaphoreGive(elink.mutex);
  157. }
  158. /* ------------------------------------------------------------------------- *
  159. * Functions that can be called from a non-service thread.
  160. * ------------------------------------------------------------------------- */
  161. /*
  162. * Write/read data to/from a ring buffer. Block if necessary until at least
  163. * <mintx>/<minrx> bytes have been send/received, otherwise return.
  164. *
  165. * Returns the number of bytes send/received.
  166. *
  167. * If <atomic> is set, only advance the head/tail pointer after the
  168. * whole transaction has been performed (must be no larger than half
  169. * the ring buffer size.) If <mintx>/<minrx> < <len> and the return
  170. * value is less than <len>, then the pointer will NOT have been advanced
  171. * and the transaction was aborted. A new call will restart from the
  172. * previous pointer location.
  173. *
  174. * The wakeup "need" value in esplink_wait_for() is set appropriately
  175. * for a transaction the same size, depending on if the <atomic> flag
  176. * was set or not. Thus, for an atomic transaction, if a shorter
  177. * atomic or non-atomic transaction is then desired, it may be
  178. * necessary to issue it without waiting for esplink_wait_for().
  179. */
  180. size_t esplink_write(enum esplink_ringbuf_user ring, const void *data,
  181. size_t len, size_t mintx, bool atomic)
  182. {
  183. const size_t unusable = RINGBUF_UNUSABLE;
  184. size_t tx = 0;
  185. if (!len || ring >= EL_RB_COUNT || !elink.ready)
  186. return tx;
  187. mintx = Min(mintx, len);
  188. const char *p = data;
  189. struct esplink_sem *sem = &elink.sem[ring][0];
  190. xSemaphoreTake(sem->lock, portMAX_DELAY);
  191. const unsigned int ready_gen = elink.ready;
  192. if (unlikely(!ready_gen || ring >= elink.head.count))
  193. goto bail;
  194. const struct esplink_ringbuf * const desc = &elink.head.desc[ring].dstr;
  195. const size_t size = desc->size;
  196. if (unlikely(atomic && len > (size >> 1)))
  197. goto bail;
  198. size_t * const hptr = (size_t *)&elink.rb.u[ring].head;
  199. const volatile size_t * const tptr = &elink.rb.d[ring].tail;
  200. size_t head = *hptr;
  201. const size_t need = (atomic ? len : 1) + unusable;
  202. atomic(elink.need[ring][0]) = need; /* Minimum wakeup */
  203. char * const start = desc->start;
  204. while (elink.ready == ready_gen) {
  205. xEventGroupClearBits(esplink_filled, ELQUEUE_DL(ring));
  206. const size_t tail = *tptr;
  207. size_t space = (tail-head) & (size-1);
  208. if (!len) {
  209. if (space >= need)
  210. xEventGroupSetBits(esplink_filled, ELQUEUE_DL(ring));
  211. break;
  212. }
  213. if (space < need) {
  214. if (tx >= mintx)
  215. break;
  216. esplink_wait_for(ELQUEUE_DL(ring), false);
  217. continue;
  218. }
  219. size_t chunk = Min(space - unusable, len);
  220. struct fpga_iov iov[4], *iv = iov;
  221. while (chunk) {
  222. iv->cmd = FPGA_CMD_WR;
  223. iv->addr = start + head;
  224. iv->wdata = p;
  225. iv->len = Min(chunk, size - head);
  226. p += iv->len;
  227. tx += iv->len;
  228. len -= iv->len;
  229. chunk -= iv->len;
  230. head = (head+iv->len) & (size-1);
  231. iv++;
  232. }
  233. if (!len || !atomic) {
  234. /* Commit the data to the ring buffer */
  235. elink.rb.u[ring].head = head;
  236. iv->cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_RINGBUF);
  237. iv->addr = &elink.head.dstr[ring].head;
  238. iv->wdata = hptr;
  239. iv->len = sizeof *hptr;
  240. iv++;
  241. }
  242. /* Optimistically poll for tail pointer advance */
  243. iv->cmd = FPGA_CMD_RD;
  244. iv->addr = &elink.head.dstr[ring].tail;
  245. iv->rdata = (void *)tptr;
  246. iv->len = sizeof *tptr;
  247. iv++;
  248. fpga_iov(iov, iv - iov);
  249. }
  250. bail:
  251. xSemaphoreGive(sem->lock);
  252. return tx;
  253. }
  254. size_t esplink_read(enum esplink_ringbuf_user ring, void *data,
  255. size_t len, size_t minrx, bool atomic)
  256. {
  257. size_t rx = 0;
  258. if (!len || ring >= EL_RB_COUNT || !elink.ready)
  259. return rx;
  260. minrx = Min(minrx, len);
  261. char *p = data;
  262. struct esplink_sem *sem = &elink.sem[ring][1];
  263. xSemaphoreTake(sem->lock, portMAX_DELAY);
  264. const unsigned int ready_gen = elink.ready;
  265. if (unlikely(!ready_gen || ring >= elink.head.count))
  266. goto bail;
  267. const struct esplink_ringbuf * const desc = &elink.head.desc[ring].ustr;
  268. const size_t size = desc->size;
  269. if (unlikely(atomic && len > (size >> 1)))
  270. goto bail;
  271. size_t * const tptr = (size_t *)&elink.rb.u[ring].tail;
  272. const volatile size_t * const hptr = &elink.rb.d[ring].head;
  273. size_t tail = *tptr;
  274. const size_t need = atomic ? len : 1;
  275. atomic(elink.need[ring][1]) = need; /* Minimum wakeup */
  276. char * const start = desc->start;
  277. while (elink.ready == ready_gen) {
  278. xEventGroupClearBits(esplink_filled, ELQUEUE_UL(ring));
  279. const size_t head = *hptr;
  280. size_t avail = (head-tail) & (size-1);
  281. if (!len) {
  282. if (avail >= need)
  283. xEventGroupSetBits(esplink_filled, ELQUEUE_UL(ring));
  284. break;
  285. }
  286. if (avail < need) {
  287. if (rx >= minrx)
  288. break;
  289. esplink_wait_for(ELQUEUE_UL(ring), false);
  290. continue;
  291. }
  292. size_t chunk = Min(avail, len);
  293. struct fpga_iov iov[4], *iv = iov;
  294. while (chunk) {
  295. iv->cmd = FPGA_CMD_RD;
  296. iv->addr = start + tail;
  297. iv->rdata = p;
  298. iv->len = Min(chunk, size - tail);
  299. p += iv->len;
  300. rx += iv->len;
  301. len -= iv->len;
  302. chunk -= iv->len;
  303. tail = (tail+iv->len) & (size-1);
  304. iv++;
  305. }
  306. if (!len || !atomic) {
  307. /* Consume the data from the ring buffer */
  308. elink.rb.u[ring].tail = tail;
  309. iv->cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_RINGBUF);
  310. iv->addr = &elink.head.dstr[ring].tail;
  311. iv->wdata = tptr;
  312. iv->len = sizeof *tptr;
  313. iv++;
  314. }
  315. /* Optimistically poll for head pointer advance */
  316. iv->cmd = FPGA_CMD_RD;
  317. iv->addr = &elink.head.dstr[ring].head;
  318. iv->rdata = (void *)hptr;
  319. iv->len = sizeof *hptr;
  320. iv++;
  321. fpga_iov(iov, iv - iov);
  322. }
  323. bail:
  324. xSemaphoreGive(sem->lock);
  325. return rx;
  326. }