FEMU  原版 master 7e238cc
FEMU: Accurate, Scalable and Extensible NVMe SSD Emulator (FAST'18)
rte_ring.h
浏览该文件的文档.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in
15  * the documentation and/or other materials provided with the
16  * distribution.
17  * * Neither the name of Intel Corporation nor the names of its
18  * contributors may be used to endorse or promote products derived
19  * from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Derived from FreeBSD's bufring.h
36  *
37  **************************************************************************
38  *
39  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions are met:
44  *
45  * 1. Redistributions of source code must retain the above copyright notice,
46  * this list of conditions and the following disclaimer.
47  *
48  * 2. The name of Kip Macy nor the names of other
49  * contributors may be used to endorse or promote products derived from
50  * this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
53  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
56  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62  * POSSIBILITY OF SUCH DAMAGE.
63  *
64  ***************************************************************************/
65 
66 #ifndef _RTE_RING_H_
67 #define _RTE_RING_H_
68 
90 #ifdef __cplusplus
91 extern "C" {
92 #endif
93 
94 #include <stdio.h>
95 #include <stdint.h>
96 #include <sys/queue.h>
97 #include <errno.h>
98 #include <xmmintrin.h>
99 #include "rte_atomic_x86.h"
100 #include "rte_branch_prediction.h"
101 #define __rte_always_inline inline
102 
103 
104 #define RTE_RING_MZ_PREFIX "RG_"
105 
107  RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
108  RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
109 };
110 /* structure to hold a pair of head/tail values and other metadata */
112  volatile uint32_t head;
113  volatile uint32_t tail;
114  uint32_t single;
115 };
116 #define RTE_NAMESIZE 256
117 
127 struct rte_ring {
128  /*
129  * Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
130  * compatibility requirements, it could be changed to RTE_RING_NAMESIZE
131  * next time the ABI changes
132  */
134  int flags;
136  uint32_t size;
137  uint32_t mask;
138  uint32_t capacity;
142 
145 };
146 
147 #define RING_F_SP_ENQ 0x0001
148 #define RING_F_SC_DEQ 0x0002
157 #define RING_F_EXACT_SZ 0x0004
158 #define RTE_RING_SZ_MASK (unsigned)(0x0fffffff)
160 /* @internal defines for passing to the enqueue dequeue worker functions */
161 #define __IS_SP 1
162 #define __IS_MP 0
163 #define __IS_SC 1
164 #define __IS_MC 0
165 
166 static inline uint32_t
167 rte_align32pow2(uint32_t x)
168 {
169  x--;
170  x |= x >> 1;
171  x |= x >> 2;
172  x |= x >> 4;
173  x |= x >> 8;
174  x |= x >> 16;
175 
176  return x + 1;
177 }
178 
193 ssize_t rte_ring_get_memsize(unsigned count);
194 
229 int rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
230  unsigned flags);
231 
271 struct rte_ring *rte_ring_create(const char *name, unsigned count,unsigned flags);
278 void rte_ring_free(struct rte_ring *r);
279 
288 void rte_ring_dump(FILE *f, const struct rte_ring *r);
289 
290 /* the actual enqueue of pointers on the ring.
291  * Placed here since identical code needed in both
292  * single and multi producer enqueue functions */
293 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
294  unsigned int i; \
295  const uint32_t size = (r)->size; \
296  uint32_t idx = prod_head & (r)->mask; \
297  obj_type *ring = (obj_type *)ring_start; \
298  if (idx + n < size) { \
299  for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
300  ring[idx] = obj_table[i]; \
301  ring[idx+1] = obj_table[i+1]; \
302  ring[idx+2] = obj_table[i+2]; \
303  ring[idx+3] = obj_table[i+3]; \
304  } \
305  switch (n & 0x3) { \
306  case 3: \
307  ring[idx++] = obj_table[i++]; /* fallthrough */ \
308  case 2: \
309  ring[idx++] = obj_table[i++]; /* fallthrough */ \
310  case 1: \
311  ring[idx++] = obj_table[i++]; \
312  } \
313  } else { \
314  for (i = 0; idx < size; i++, idx++)\
315  ring[idx] = obj_table[i]; \
316  for (idx = 0; i < n; i++, idx++) \
317  ring[idx] = obj_table[i]; \
318  } \
319 } while (0)
320 
321 /* the actual copy of pointers on the ring to obj_table.
322  * Placed here since identical code needed in both
323  * single and multi consumer dequeue functions */
324 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
325  unsigned int i; \
326  uint32_t idx = cons_head & (r)->mask; \
327  const uint32_t size = (r)->size; \
328  obj_type *ring = (obj_type *)ring_start; \
329  if (idx + n < size) { \
330  for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
331  obj_table[i] = ring[idx]; \
332  obj_table[i+1] = ring[idx+1]; \
333  obj_table[i+2] = ring[idx+2]; \
334  obj_table[i+3] = ring[idx+3]; \
335  } \
336  switch (n & 0x3) { \
337  case 3: \
338  obj_table[i++] = ring[idx++]; /* fallthrough */ \
339  case 2: \
340  obj_table[i++] = ring[idx++]; /* fallthrough */ \
341  case 1: \
342  obj_table[i++] = ring[idx++]; \
343  } \
344  } else { \
345  for (i = 0; idx < size; i++, idx++) \
346  obj_table[i] = ring[idx]; \
347  for (idx = 0; i < n; i++, idx++) \
348  obj_table[i] = ring[idx]; \
349  } \
350 } while (0)
351 
352 static __rte_always_inline void
353 update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
354  uint32_t single)
355 {
356  /*
357  * If there are other enqueues/dequeues in progress that preceded us,
358  * we need to wait for them to complete
359  */
360  if (!single)
361  while (unlikely(ht->tail != old_val))
362  _mm_pause();
363 
364  ht->tail = new_val;
365 }
366 
390 static __rte_always_inline unsigned int
391 __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
392  unsigned int n, enum rte_ring_queue_behavior behavior,
393  uint32_t *old_head, uint32_t *new_head,
394  uint32_t *free_entries)
395 {
396  const uint32_t capacity = r->capacity;
397  unsigned int max = n;
398  int success;
399 
400  do {
401  /* Reset n to the initial burst count */
402  n = max;
403 
404  *old_head = r->prod.head;
405  const uint32_t cons_tail = r->cons.tail;
406  /*
407  * The subtraction is done between two unsigned 32bits value
408  * (the result is always modulo 32 bits even if we have
409  * *old_head > cons_tail). So 'free_entries' is always between 0
410  * and capacity (which is < size).
411  */
412  *free_entries = (capacity + cons_tail - *old_head);
413 
414  /* check that we have enough room in ring */
415  if (unlikely(n > *free_entries))
416  n = (behavior == RTE_RING_QUEUE_FIXED) ?
417  0 : *free_entries;
418 
419  if (n == 0)
420  return 0;
421 
422  *new_head = *old_head + n;
423  if (is_sp)
424  r->prod.head = *new_head, success = 1;
425  else
426  success = rte_atomic32_cmpset(&r->prod.head,
427  *old_head, *new_head);
428  } while (unlikely(success == 0));
429  return n;
430 }
431 
452 static __rte_always_inline unsigned int
453 __rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
454  unsigned int n, enum rte_ring_queue_behavior behavior,
455  int is_sp, unsigned int *free_space)
456 {
457  uint32_t prod_head, prod_next;
458  uint32_t free_entries;
459 
460  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
461  &prod_head, &prod_next, &free_entries);
462  if (n == 0)
463  goto end;
464 
465  ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
466  rte_smp_wmb();
467 
468  update_tail(&r->prod, prod_head, prod_next, is_sp);
469 end:
470  if (free_space != NULL)
471  *free_space = free_entries - n;
472  return n;
473 }
474 
498 static __rte_always_inline unsigned int
499 __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
500  unsigned int n, enum rte_ring_queue_behavior behavior,
501  uint32_t *old_head, uint32_t *new_head,
502  uint32_t *entries)
503 {
504  unsigned int max = n;
505  int success;
506 
507  /* move cons.head atomically */
508  do {
509  /* Restore n as it may change every loop */
510  n = max;
511 
512  *old_head = r->cons.head;
513  const uint32_t prod_tail = r->prod.tail;
514  /* The subtraction is done between two unsigned 32bits value
515  * (the result is always modulo 32 bits even if we have
516  * cons_head > prod_tail). So 'entries' is always between 0
517  * and size(ring)-1. */
518  *entries = (prod_tail - *old_head);
519 
520  /* Set the actual entries for dequeue */
521  if (n > *entries)
522  n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
523 
524  if (n == 0)
525  return 0;
526 
527  *new_head = *old_head + n;
528  if (is_sc)
529  r->cons.head = *new_head, success = 1;
530  else
531  success = rte_atomic32_cmpset(&r->cons.head, *old_head,
532  *new_head);
533  } while (success == 0);
534  return n;
535 }
536 
557 static __rte_always_inline unsigned int
558 __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
559  unsigned int n, enum rte_ring_queue_behavior behavior,
560  int is_sc, unsigned int *available)
561 {
562  uint32_t cons_head, cons_next;
563  uint32_t entries;
564 
565  n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
566  &cons_head, &cons_next, &entries);
567  if (n == 0)
568  goto end;
569 
570  DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
571  rte_smp_rmb();
572 
573  update_tail(&r->cons, cons_head, cons_next, is_sc);
574 
575 end:
576  if (available != NULL)
577  *available = entries - n;
578  return n;
579 }
580 
599 static __rte_always_inline unsigned int
600 rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
601  unsigned int n, unsigned int *free_space)
602 {
603  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
604  __IS_MP, free_space);
605 }
606 
622 static __rte_always_inline unsigned int
623 rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
624  unsigned int n, unsigned int *free_space)
625 {
626  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
627  __IS_SP, free_space);
628 }
629 
649 static __rte_always_inline unsigned int
650 rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
651  unsigned int n, unsigned int *free_space)
652 {
653  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
654  r->prod.single, free_space);
655 }
656 
671 static __rte_always_inline int
672 rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
673 {
674  return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
675 }
676 
688 static __rte_always_inline int
689 rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
690 {
691  return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
692 }
693 
709 static __rte_always_inline int
710 rte_ring_enqueue(struct rte_ring *r, void *obj)
711 {
712  return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
713 }
714 
733 static __rte_always_inline unsigned int
734 rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
735  unsigned int n, unsigned int *available)
736 {
737  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
738  __IS_MC, available);
739 }
740 
757 static __rte_always_inline unsigned int
758 rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
759  unsigned int n, unsigned int *available)
760 {
761  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
762  __IS_SC, available);
763 }
764 
784 static __rte_always_inline unsigned int
785 rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
786  unsigned int *available)
787 {
788  return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
789  r->cons.single, available);
790 }
791 
807 static __rte_always_inline int
808 rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
809 {
810  return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
811 }
812 
825 static __rte_always_inline int
826 rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
827 {
828  return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
829 }
830 
847 static __rte_always_inline int
848 rte_ring_dequeue(struct rte_ring *r, void **obj_p)
849 {
850  return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
851 }
852 
861 static inline unsigned
862 rte_ring_count(const struct rte_ring *r)
863 {
864  uint32_t prod_tail = r->prod.tail;
865  uint32_t cons_tail = r->cons.tail;
866  uint32_t count = (prod_tail - cons_tail) & r->mask;
867  return (count > r->capacity) ? r->capacity : count;
868 }
869 
878 static inline unsigned
880 {
881  return r->capacity - rte_ring_count(r);
882 }
883 
893 static inline int
894 rte_ring_full(const struct rte_ring *r)
895 {
896  return rte_ring_free_count(r) == 0;
897 }
898 
908 static inline int
909 rte_ring_empty(const struct rte_ring *r)
910 {
911  return rte_ring_count(r) == 0;
912 }
913 
924 static inline unsigned int
925 rte_ring_get_size(const struct rte_ring *r)
926 {
927  return r->size;
928 }
929 
938 static inline unsigned int
940 {
941  return r->capacity;
942 }
943 
950 void rte_ring_list_dump(FILE *f);
951 
962 struct rte_ring *rte_ring_lookup(const char *name);
963 
982 static __rte_always_inline unsigned
983 rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
984  unsigned int n, unsigned int *free_space)
985 {
986  return __rte_ring_do_enqueue(r, obj_table, n,
987  RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
988 }
989 
1005 static __rte_always_inline unsigned
1006 rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1007  unsigned int n, unsigned int *free_space)
1008 {
1009  return __rte_ring_do_enqueue(r, obj_table, n,
1010  RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
1011 }
1012 
1032 static __rte_always_inline unsigned
1033 rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
1034  unsigned int n, unsigned int *free_space)
1035 {
1036  return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
1037  r->prod.single, free_space);
1038 }
1039 
1060 static __rte_always_inline unsigned
1061 rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
1062  unsigned int n, unsigned int *available)
1063 {
1064  return __rte_ring_do_dequeue(r, obj_table, n,
1065  RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
1066 }
1067 
1085 static __rte_always_inline unsigned
1086 rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
1087  unsigned int n, unsigned int *available)
1088 {
1089  return __rte_ring_do_dequeue(r, obj_table, n,
1090  RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
1091 }
1092 
1112 static __rte_always_inline unsigned
1113 rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
1114  unsigned int n, unsigned int *available)
1115 {
1116  return __rte_ring_do_dequeue(r, obj_table, n,
1118  r->cons.single, available);
1119 }
1120 
1122  FEMU_RING_TYPE_SP_SC, /* Single-producer, single-consumer */
1123  FEMU_RING_TYPE_MP_SC, /* Multi-producer, single-consumer */
1124  FEMU_RING_TYPE_MP_MC, /* Multi-producer, multi-consumer */
1125 };
1126 
1137 struct rte_ring *femu_ring_create(enum femu_ring_type type, size_t count);
1138 
1144 void femu_ring_free(struct rte_ring *ring);
1145 
1153 size_t femu_ring_count(struct rte_ring *ring);
1154 
1164 size_t femu_ring_enqueue(struct rte_ring *ring, void **objs, size_t count);
1165 
1175 size_t femu_ring_dequeue(struct rte_ring *ring, void **objs, size_t count);
1176 
1177 
1178 
1179 #ifdef __cplusplus
1180 }
1181 #endif
1182 
1183 #endif /* _RTE_RING_H_ */
update_tail
static __rte_always_inline void update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val, uint32_t single)
Definition: rte_ring.h:353
__rte_ring_move_cons_head
static __rte_always_inline unsigned int __rte_ring_move_cons_head(struct rte_ring *r, int is_sc, unsigned int n, enum rte_ring_queue_behavior behavior, uint32_t *old_head, uint32_t *new_head, uint32_t *entries)
Definition: rte_ring.h:499
__IS_MC
#define __IS_MC
Definition: rte_ring.h:164
rte_ring_headtail
Definition: rte_ring.h:111
FEMU_RING_TYPE_MP_MC
@ FEMU_RING_TYPE_MP_MC
Definition: rte_ring.h:1124
rte_atomic32_cmpset
static int rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
Definition: rte_atomic_x86.h:17
rte_ring_mc_dequeue_bulk
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:734
__IS_SP
#define __IS_SP
Definition: rte_ring.h:161
unlikely
#define unlikely(x)
Definition: rte_branch_prediction.h:38
rte_ring_get_size
static unsigned int rte_ring_get_size(const struct rte_ring *r)
Definition: rte_ring.h:925
rte_ring::name
char name[RTE_NAMESIZE]
Definition: rte_ring.h:133
rte_ring_count
static unsigned rte_ring_count(const struct rte_ring *r)
Definition: rte_ring.h:862
rte_ring_enqueue_burst
static __rte_always_inline unsigned rte_ring_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:1033
rte_ring_dump
void rte_ring_dump(FILE *f, const struct rte_ring *r)
Definition: rte_ring.c:173
rte_ring_enqueue_bulk
static __rte_always_inline unsigned int rte_ring_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:650
rte_ring_empty
static int rte_ring_empty(const struct rte_ring *r)
Definition: rte_ring.h:909
ENQUEUE_PTRS
#define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type)
Definition: rte_ring.h:293
__rte_ring_move_prod_head
static __rte_always_inline unsigned int __rte_ring_move_prod_head(struct rte_ring *r, int is_sp, unsigned int n, enum rte_ring_queue_behavior behavior, uint32_t *old_head, uint32_t *new_head, uint32_t *free_entries)
Definition: rte_ring.h:391
rte_ring_mp_enqueue_bulk
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:600
rte_ring_init
int rte_ring_init(struct rte_ring *r, const char *name, unsigned count, unsigned flags)
Definition: rte_ring.c:99
rte_branch_prediction.h
RTE_RING_QUEUE_VARIABLE
@ RTE_RING_QUEUE_VARIABLE
Definition: rte_ring.h:108
FEMU_RING_TYPE_MP_SC
@ FEMU_RING_TYPE_MP_SC
Definition: rte_ring.h:1123
rte_ring_free
void rte_ring_free(struct rte_ring *r)
Definition: rte_ring.c:164
femu_ring_dequeue
size_t femu_ring_dequeue(struct rte_ring *ring, void **objs, size_t count)
Definition: rte_ring.c:228
rte_smp_rmb
#define rte_smp_rmb()
Definition: rte_atomic_x86.h:36
rte_ring_mc_dequeue
static __rte_always_inline int rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:808
rte_ring::size
uint32_t size
Definition: rte_ring.h:136
rte_ring_lookup
struct rte_ring * rte_ring_lookup(const char *name)
rte_ring_dequeue_burst
static __rte_always_inline unsigned rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:1113
femu_ring_create
struct rte_ring * femu_ring_create(enum femu_ring_type type, size_t count)
Definition: rte_ring.c:187
__rte_always_inline
#define __rte_always_inline
Definition: rte_ring.h:101
rte_ring_sc_dequeue_bulk
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:758
rte_smp_wmb
#define rte_smp_wmb()
Definition: rte_atomic_x86.h:34
rte_ring
Definition: rte_ring.h:127
rte_ring_create
struct rte_ring * rte_ring_create(const char *name, unsigned count, unsigned flags)
Definition: rte_ring.c:132
rte_ring_sp_enqueue_burst
static __rte_always_inline unsigned rte_ring_sp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:1006
rte_ring_headtail::head
volatile uint32_t head
Definition: rte_ring.h:112
rte_ring_dequeue_bulk
static __rte_always_inline unsigned int rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:785
FEMU_RING_TYPE_SP_SC
@ FEMU_RING_TYPE_SP_SC
Definition: rte_ring.h:1122
__IS_MP
#define __IS_MP
Definition: rte_ring.h:162
rte_ring::cons
struct rte_ring_headtail cons
Definition: rte_ring.h:144
DEQUEUE_PTRS
#define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type)
Definition: rte_ring.h:324
__IS_SC
#define __IS_SC
Definition: rte_ring.h:163
rte_ring_sc_dequeue_burst
static __rte_always_inline unsigned rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:1086
RTE_NAMESIZE
#define RTE_NAMESIZE
Definition: rte_ring.h:116
rte_ring_queue_behavior
rte_ring_queue_behavior
Definition: rte_ring.h:106
RTE_RING_QUEUE_FIXED
@ RTE_RING_QUEUE_FIXED
Definition: rte_ring.h:107
femu_ring_free
void femu_ring_free(struct rte_ring *ring)
Definition: rte_ring.c:213
rte_ring_free_count
static unsigned rte_ring_free_count(const struct rte_ring *r)
Definition: rte_ring.h:879
rte_atomic_x86.h
femu_ring_enqueue
size_t femu_ring_enqueue(struct rte_ring *ring, void **objs, size_t count)
Definition: rte_ring.c:223
rte_ring::capacity
uint32_t capacity
Definition: rte_ring.h:138
rte_ring_list_dump
void rte_ring_list_dump(FILE *f)
__rte_ring_do_dequeue
static __rte_always_inline unsigned int __rte_ring_do_dequeue(struct rte_ring *r, void **obj_table, unsigned int n, enum rte_ring_queue_behavior behavior, int is_sc, unsigned int *available)
Definition: rte_ring.h:558
rte_ring_dequeue
static __rte_always_inline int rte_ring_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:848
rte_ring::flags
int flags
Definition: rte_ring.h:134
rte_ring_sc_dequeue
static __rte_always_inline int rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
Definition: rte_ring.h:826
rte_ring_mp_enqueue
static __rte_always_inline int rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:672
rte_ring_get_memsize
ssize_t rte_ring_get_memsize(unsigned count)
Definition: rte_ring.c:87
femu_ring_type
femu_ring_type
Definition: rte_ring.h:1121
rte_ring_get_capacity
static unsigned int rte_ring_get_capacity(const struct rte_ring *r)
Definition: rte_ring.h:939
rte_ring_mp_enqueue_burst
static __rte_always_inline unsigned rte_ring_mp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:983
rte_ring_sp_enqueue
static __rte_always_inline int rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:689
rte_ring_headtail::tail
volatile uint32_t tail
Definition: rte_ring.h:113
rte_ring_enqueue
static __rte_always_inline int rte_ring_enqueue(struct rte_ring *r, void *obj)
Definition: rte_ring.h:710
rte_ring_full
static int rte_ring_full(const struct rte_ring *r)
Definition: rte_ring.h:894
femu_ring_count
size_t femu_ring_count(struct rte_ring *ring)
Definition: rte_ring.c:218
rte_ring::mask
uint32_t mask
Definition: rte_ring.h:137
__rte_ring_do_enqueue
static __rte_always_inline unsigned int __rte_ring_do_enqueue(struct rte_ring *r, void *const *obj_table, unsigned int n, enum rte_ring_queue_behavior behavior, int is_sp, unsigned int *free_space)
Definition: rte_ring.h:453
rte_ring_sp_enqueue_bulk
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:623
rte_align32pow2
static uint32_t rte_align32pow2(uint32_t x)
Definition: rte_ring.h:167
rte_ring::prod
struct rte_ring_headtail prod
Definition: rte_ring.h:141
rte_ring_headtail::single
uint32_t single
Definition: rte_ring.h:114
rte_ring_mc_dequeue_burst
static __rte_always_inline unsigned rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:1061