esplink.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /*
  2. * Handle ring buffer link between ESP32 and FPGA
  3. * This implements the ESP32 (upstream/active/initiator/master) side.
  4. */
  5. #define MODULE "esplink"
  6. #include "common.h"
  7. #include "esplink.h"
  8. #include "fpga.h"
  9. struct esplink_ringbuf_ptrs {
  10. const volatile struct esplink_ptrs_dstr *d;
  11. volatile struct esplink_ptrs_ustr *u;
  12. };
  13. struct esplink_sem {
  14. SemaphoreHandle_t lock;
  15. };
  16. /*
  17. * Event group indicating which ring buffers are ready for I/O
  18. */
  19. EventGroupHandle_t esplink_filled; /* Data requested in "fill" ready */
  20. static struct {
  21. volatile unsigned int ready;
  22. bool shutdown;
  23. SemaphoreHandle_t mutex; /* Configuration mutex */
  24. struct esplink_ringbuf_ptrs rb;
  25. struct esplink_ringbuf_desc *desc;
  26. struct esplink_ringbuf_head head;
  27. struct esplink_sem sem[EL_RB_COUNT][2];
  28. size_t need[EL_RB_COUNT][2]; /* Amount of data requested for wakeup */
  29. } elink;
  30. /* Leave at least this much space, to preserve alignment */
  31. #define RINGBUF_UNUSABLE 4
  32. #define ELQUEUE_ALL_MASK ((1UL << (EL_RB_COUNT*2)) - 1)
  33. static void esplink_stop(void)
  34. {
  35. elink.ready = 0;
  36. /* No waking up waiters that only want online wakeups */
  37. xEventGroupClearBits(esplink_filled, ELWAIT_ONLINE);
  38. /* Wake up all the internal waiters or others with online = false */
  39. xEventGroupSetBits(esplink_filled, ELQUEUE_ALL_MASK);
  40. struct esplink_sem *s = &elink.sem[0][0];
  41. for (size_t i = 0; i < EL_RB_COUNT*2; i++) {
  42. xSemaphoreTake(s->lock, portMAX_DELAY);
  43. s++;
  44. }
  45. }
  46. /* This must be called and completed before any other esplink functions! */
  47. void esplink_init(void)
  48. {
  49. elink.mutex = null_check(xSemaphoreCreateMutex());
  50. esplink_filled = null_check(xEventGroupCreate());
  51. elink.desc = xmalloc_dma(EL_RB_COUNT * sizeof *elink.desc);
  52. elink.rb.u = xmalloc_dma(EL_RB_COUNT * sizeof *elink.rb.u);
  53. elink.rb.d = xmalloc_dma(EL_RB_COUNT * sizeof *elink.rb.d);
  54. struct esplink_sem *s = &elink.sem[0][0];
  55. for (size_t i = 0; i < EL_RB_COUNT*2; i++) {
  56. s->lock = null_check(xSemaphoreCreateBinary());
  57. s++;
  58. }
  59. xSemaphoreGive(elink.mutex);
  60. }
  61. /*
  62. * This needs to be called from the FPGA service thread once before
  63. * any esplink functions can be called from any other thread. After that,
  64. * those functions are safe to call (but may fail) regardless of
  65. * shutdown and/or reinitialization of the link.
  66. *
  67. * Call this function with head == NULL to either shut the link down
  68. * or to initialize the data structures (see above)
  69. */
  70. void esplink_start(const struct esplink_head *head)
  71. {
  72. struct fpga_iov iov[3];
  73. static unsigned int gen_count;
  74. static bool started = false;
  75. xSemaphoreTake(elink.mutex, portMAX_DELAY);
  76. if (elink.ready)
  77. esplink_stop();
  78. if (!head)
  79. goto shutdown;
  80. /* At this point elink.mutex and all the ->lock mutexes are held */
  81. elink.head = head->rb;
  82. elink.head.count = Min(head->rb.count, EL_RB_COUNT);
  83. size_t desc_size = sizeof(*elink.desc) * elink.head.count;
  84. size_t dptr_size = sizeof(*elink.rb.d) * elink.head.count;
  85. /* Set wakeup thresholds */
  86. for (size_t i = 0; i < elink.head.count; i++) {
  87. elink.need[i][0] = RINGBUF_UNUSABLE + 1;
  88. elink.need[i][1] = 1;
  89. }
  90. iov[0].cmd = FPGA_CMD_RD;
  91. iov[0].addr = elink.head.desc;
  92. iov[0].rdata = (void *)&elink.desc;
  93. iov[0].len = desc_size;
  94. iov[1].cmd = FPGA_CMD_RD | FPGA_CMD_ACK(EL_UIRQ_RINGBUF);
  95. iov[1].addr = elink.head.dstr;
  96. iov[1].rdata = (void *)elink.rb.d;
  97. iov[1].len = dptr_size;
  98. /* Write back the same pointer values -> all buffers currently empty */
  99. iov[2].cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_RINGBUF);
  100. iov[2].addr = elink.head.ustr;
  101. iov[2].wdata = (void *)elink.rb.d; /* rb.d is correct */
  102. iov[2].len = dptr_size;
  103. fpga_iov(iov, ARRAY_SIZE(iov));
  104. memcpy((void *)elink.rb.u, (void *)elink.rb.d, dptr_size);
  105. elink.ready = ++gen_count;
  106. xEventGroupClearBits(esplink_filled, ELQUEUE_ALL_MASK);
  107. struct esplink_sem *s = &elink.sem[0][0];
  108. for (size_t i = 0; i < EL_RB_COUNT*2; i++) {
  109. xSemaphoreGive(s->lock);
  110. s++;
  111. }
  112. xEventGroupSetBits(esplink_filled, ELWAIT_ONLINE);
  113. shutdown:
  114. xSemaphoreGive(elink.mutex);
  115. }
  116. /*
  117. * Called from the FPGA service thread when a ring buffer
  118. * interrupt is received
  119. */
  120. void esplink_poll(void)
  121. {
  122. if (!elink.ready)
  123. return;
  124. xSemaphoreTake(elink.mutex, portMAX_DELAY);
  125. if (elink.ready) {
  126. const size_t count = elink.head.count;
  127. const size_t dptr_size = sizeof(*elink.rb.d) * count;
  128. struct fpga_iov iov[1];
  129. iov[0].cmd = FPGA_CMD_RD | FPGA_CMD_ACK(EL_UIRQ_RINGBUF);
  130. iov[0].addr = elink.head.dstr;
  131. iov[0].rdata = (void *)elink.rb.d;
  132. iov[0].len = dptr_size;
  133. fpga_iov(iov, 1);
  134. EventBits_t wakeup = 0;
  135. EventBits_t tbit = 1;
  136. for (size_t i = 0; i < count; i++) {
  137. size_t need;
  138. need = atomic(elink.need[i][0]);
  139. if (((elink.rb.d[i].tail - atomic(elink.rb.u[i].head) - 1) &
  140. (elink.desc[i].dstr.size-1)) >= need)
  141. wakeup |= tbit;
  142. tbit <<= 1;
  143. need = atomic(elink.need[i][1]);
  144. if (((elink.rb.d[i].head - atomic(elink.rb.u[i].tail)) &
  145. (elink.desc[i].ustr.size-1)) >= need)
  146. wakeup |= tbit;
  147. tbit <<= 1;
  148. }
  149. xEventGroupSetBits(esplink_filled, wakeup);
  150. }
  151. xSemaphoreGive(elink.mutex);
  152. }
  153. /* ------------------------------------------------------------------------- *
  154. * Functions that can be called from a non-service thread.
  155. * ------------------------------------------------------------------------- */
  156. /*
  157. * Write/read data to/from a ring buffer. Block if necessary until at least
  158. * <mintx>/<minrx> bytes have been send/received, otherwise return.
  159. *
  160. * Returns the number of bytes send/received.
  161. *
  162. * If <atomic> is set, only advance the head/tail pointer after the
  163. * whole transaction has been performed (must be no larger than half
  164. * the ring buffer size.) If <mintx>/<minrx> < <len> and the return
  165. * value is less than <len>, then the pointer will NOT have been advanced
  166. * and the transaction was aborted. A new call will restart from the
  167. * previous pointer location.
  168. *
  169. * The wakeup "need" value in esplink_wait_for() is set appropriately
  170. * for a transaction the same size, depending on if the <atomic> flag
  171. * was set or not. Thus, for an atomic transaction, if a shorter
  172. * atomic or non-atomic transaction is then desired, it may be
  173. * necessary to issue it without waiting for esplink_wait_for().
  174. */
  175. size_t esplink_write(enum esplink_ringbuf_user ring, const void *data,
  176. size_t len, size_t mintx, bool atomic)
  177. {
  178. const size_t unusable = RINGBUF_UNUSABLE;
  179. size_t tx = 0;
  180. if (!len || ring >= EL_RB_COUNT || !elink.ready)
  181. return tx;
  182. mintx = Min(mintx, len);
  183. const char *p = data;
  184. struct esplink_sem *sem = &elink.sem[ring][0];
  185. xSemaphoreTake(sem->lock, portMAX_DELAY);
  186. const unsigned int ready_gen = elink.ready;
  187. if (unlikely(!ready_gen || ring >= elink.head.count))
  188. goto bail;
  189. const struct esplink_ringbuf * const desc = &elink.head.desc[ring].dstr;
  190. const size_t size = desc->size;
  191. if (unlikely(atomic && len > (size >> 1)))
  192. goto bail;
  193. size_t * const hptr = (size_t *)&elink.rb.u[ring].head;
  194. const volatile size_t * const tptr = &elink.rb.d[ring].tail;
  195. size_t head = *hptr;
  196. const size_t need = (atomic ? len : 1) + unusable;
  197. atomic(elink.need[ring][0]) = need; /* Minimum wakeup */
  198. char * const start = desc->start;
  199. while (elink.ready == ready_gen) {
  200. xEventGroupClearBits(esplink_filled, ELQUEUE_DL(ring));
  201. const size_t tail = *tptr;
  202. size_t space = (tail-head) & (size-1);
  203. if (!len) {
  204. if (space >= need)
  205. xEventGroupSetBits(esplink_filled, ELQUEUE_DL(ring));
  206. break;
  207. }
  208. if (space < need) {
  209. if (tx >= mintx)
  210. break;
  211. esplink_wait_for(ELQUEUE_DL(ring), false);
  212. continue;
  213. }
  214. size_t chunk = Min(space - unusable, len);
  215. struct fpga_iov iov[4], *iv = iov;
  216. while (chunk) {
  217. iv->cmd = FPGA_CMD_WR;
  218. iv->addr = start + head;
  219. iv->wdata = p;
  220. iv->len = Min(chunk, size - head);
  221. p += iv->len;
  222. tx += iv->len;
  223. len -= iv->len;
  224. chunk -= iv->len;
  225. head = (head+iv->len) & (size-1);
  226. iv++;
  227. }
  228. if (!len || !atomic) {
  229. /* Commit the data to the ring buffer */
  230. elink.rb.u[ring].head = head;
  231. iv->cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_RINGBUF);
  232. iv->addr = &elink.head.dstr[ring].head;
  233. iv->wdata = hptr;
  234. iv->len = sizeof *hptr;
  235. iv++;
  236. }
  237. /* Optimistically poll for tail pointer advance */
  238. iv->cmd = FPGA_CMD_RD;
  239. iv->addr = &elink.head.dstr[ring].tail;
  240. iv->rdata = (void *)tptr;
  241. iv->len = sizeof *tptr;
  242. iv++;
  243. fpga_iov(iov, iv - iov);
  244. }
  245. bail:
  246. xSemaphoreGive(sem->lock);
  247. return tx;
  248. }
  249. size_t esplink_read(enum esplink_ringbuf_user ring, void *data,
  250. size_t len, size_t minrx, bool atomic)
  251. {
  252. size_t rx = 0;
  253. if (!len || ring >= EL_RB_COUNT || !elink.ready)
  254. return rx;
  255. minrx = Min(minrx, len);
  256. char *p = data;
  257. struct esplink_sem *sem = &elink.sem[ring][1];
  258. xSemaphoreTake(sem->lock, portMAX_DELAY);
  259. const unsigned int ready_gen = elink.ready;
  260. if (unlikely(!ready_gen || ring >= elink.head.count))
  261. goto bail;
  262. const struct esplink_ringbuf * const desc = &elink.head.desc[ring].ustr;
  263. const size_t size = desc->size;
  264. if (unlikely(atomic && len > (size >> 1)))
  265. goto bail;
  266. size_t * const tptr = (size_t *)&elink.rb.u[ring].tail;
  267. const volatile size_t * const hptr = &elink.rb.d[ring].head;
  268. size_t tail = *tptr;
  269. const size_t need = atomic ? len : 1;
  270. atomic(elink.need[ring][1]) = need; /* Minimum wakeup */
  271. char * const start = desc->start;
  272. while (elink.ready == ready_gen) {
  273. xEventGroupClearBits(esplink_filled, ELQUEUE_UL(ring));
  274. const size_t head = *hptr;
  275. size_t avail = (head-tail) & (size-1);
  276. if (!len) {
  277. if (avail >= need)
  278. xEventGroupSetBits(esplink_filled, ELQUEUE_UL(ring));
  279. break;
  280. }
  281. if (avail < need) {
  282. if (rx >= minrx)
  283. break;
  284. esplink_wait_for(ELQUEUE_UL(ring), false);
  285. continue;
  286. }
  287. size_t chunk = Min(avail, len);
  288. struct fpga_iov iov[4], *iv = iov;
  289. while (chunk) {
  290. iv->cmd = FPGA_CMD_RD;
  291. iv->addr = start + tail;
  292. iv->rdata = p;
  293. iv->len = Min(chunk, size - tail);
  294. p += iv->len;
  295. rx += iv->len;
  296. len -= iv->len;
  297. chunk -= iv->len;
  298. tail = (tail+iv->len) & (size-1);
  299. iv++;
  300. }
  301. if (!len || !atomic) {
  302. /* Consume the data from the ring buffer */
  303. elink.rb.u[ring].tail = tail;
  304. iv->cmd = FPGA_CMD_WR | FPGA_CMD_IRQ(EL_DIRQ_RINGBUF);
  305. iv->addr = &elink.head.dstr[ring].tail;
  306. iv->wdata = tptr;
  307. iv->len = sizeof *tptr;
  308. iv++;
  309. }
  310. /* Optimistically poll for head pointer advance */
  311. iv->cmd = FPGA_CMD_RD;
  312. iv->addr = &elink.head.dstr[ring].head;
  313. iv->rdata = (void *)hptr;
  314. iv->len = sizeof *hptr;
  315. iv++;
  316. fpga_iov(iov, iv - iov);
  317. }
  318. bail:
  319. xSemaphoreGive(sem->lock);
  320. return rx;
  321. }