@@ -117,6 +117,21 @@ static __thread int sh_iov_static_fill_idx_share = 0;
117117 } \
118118} while (0)
119119
120+ /* Dirty read first, and then try to lock sc and real read. */
121+ #define ACQUIRE_ZONE_TRY_LOCK (exp ) do { \
122+ while (1) { \
123+ while (sc->status != exp) { \
124+ rte_pause(); \
125+ } \
126+ if (rte_spinlock_trylock(&sc->lock)) { \
127+ if (sc->status == exp) { \
128+ break; \
129+ } \
130+ rte_spinlock_unlock(&sc->lock); \
131+ } \
132+ } \
133+ } while (0)
134+
120135#define RELEASE_ZONE_LOCK (s ) do { \
121136 sc->status = s; \
122137 rte_spinlock_unlock(&sc->lock); \
@@ -1724,6 +1739,128 @@ ff_hook_epoll_ctl(int epfd, int op, int fd,
17241739 RETURN_NOFREE ();
17251740}
17261741
1742+ /*
1743+ * Epoll polling mode, we not use sem_wait.
1744+ *
1745+ * Notice: cpu usage is 100%, but the RTT delay is very low.
1746+ * The first version currently does not support ff_linux_epoll_wait,
1747+ * because ff_linux_epoll_wait would introduce latency impacts.
1748+ * This ff_linux_epoll_wait issue will be resolved in the future.
1749+ */
1750+ #if defined(FF_PRELOAD_POLLING_MODE ) && !defined(FF_KERNEL_EVENT )
1751+ int
1752+ ff_hook_epoll_wait (int epfd , struct epoll_event * events ,
1753+ int maxevents , int timeout )
1754+ {
1755+ DEBUG_LOG ("ff_hook_epoll_wait, epfd:%d, maxevents:%d, timeout:%d\n" , epfd , maxevents , timeout );
1756+ int fd = epfd ;
1757+
1758+ CHECK_FD_OWNERSHIP (epoll_wait , (epfd , events , maxevents , timeout ));
1759+
1760+ DEFINE_REQ_ARGS_STATIC (epoll_wait );
1761+ static __thread struct epoll_event * sh_events = NULL ;
1762+ static __thread int sh_events_len = 0 ;
1763+ struct timespec t_s , t_n ;
1764+ time_t now_time_ms = 0 ;
1765+ time_t end_time_ms = 0 ;
1766+
1767+ if (sh_events == NULL || sh_events_len < maxevents ) {
1768+ if (sh_events ) {
1769+ share_mem_free (sh_events );
1770+ }
1771+
1772+ sh_events_len = maxevents ;
1773+ sh_events = share_mem_alloc (sizeof (struct epoll_event ) * sh_events_len );
1774+ if (sh_events == NULL ) {
1775+ RETURN_ERROR_NOFREE (ENOMEM );
1776+ }
1777+ }
1778+
1779+ if (timeout > 0 ) {
1780+ if (clock_gettime (CLOCK_MONOTONIC_COARSE , & t_s ) == -1 ) {
1781+ ret = -1 ;
1782+ goto epoll_exit ;
1783+ }
1784+ end_time_ms = t_s .tv_sec * 1000 + t_s .tv_nsec / 1000000 + timeout ;
1785+ }
1786+
1787+ args -> epfd = fd ;
1788+ args -> events = sh_events ;
1789+ args -> maxevents = maxevents ;
1790+ args -> timeout = timeout ;
1791+
1792+ retry :
1793+ ACQUIRE_ZONE_LOCK (FF_SC_IDLE );
1794+ sc -> ops = FF_SO_EPOLL_WAIT ;
1795+ sc -> args = args ;
1796+
1797+ /*
1798+ * sc->result, sc->error must reset in epoll_wait and kevent.
1799+ * Otherwise can access last sc call's result.
1800+ */
1801+ sc -> result = 0 ;
1802+ sc -> error = 0 ;
1803+ errno = 0 ;
1804+ RELEASE_ZONE_LOCK (FF_SC_REQ );
1805+
1806+ do {
1807+ /*
1808+ * we call freebsd epoll.
1809+ */
1810+ ACQUIRE_ZONE_TRY_LOCK (FF_SC_REP );
1811+ ret = sc -> result ;
1812+ if (ret < 0 ) {
1813+ errno = sc -> error ;
1814+ }
1815+ RELEASE_ZONE_LOCK (FF_SC_IDLE );
1816+ if (ret < 0 ) {
1817+ DEBUG_LOG ("call ff_sys_epoll_wait occur error :%lu, ret:%d, errno:%d\n" , ret , errno );
1818+ goto epoll_exit ;
1819+ }
1820+ else if (ret > 0 ) {
1821+ goto epoll_exit ;
1822+ }
1823+
1824+ if (timeout == 0 ) {
1825+ goto epoll_exit ;
1826+ }
1827+ else {
1828+ if (timeout > 0 ) {
1829+ clock_gettime (CLOCK_MONOTONIC_COARSE , & t_n );
1830+ now_time_ms = t_n .tv_sec * 1000 + t_n .tv_nsec / 1000000 ;
1831+
1832+ if (now_time_ms >= end_time_ms ) {
1833+ goto epoll_exit ;
1834+ }
1835+ }
1836+
1837+ goto retry ;
1838+ }
1839+ } while (true);
1840+
1841+ epoll_exit :
1842+ if (likely (ret > 0 )) {
1843+ if (unlikely (ret > maxevents )) {
1844+ ERR_LOG ("return events:%d, maxevents:%d, set return events to maxevents, may be some error occur\n" ,
1845+ ret , maxevents );
1846+ ret = maxevents ;
1847+ }
1848+ rte_memcpy (events , sh_events , sizeof (struct epoll_event ) * ret );
1849+ }
1850+
1851+ /*
1852+ * Don't free, to improve proformance.
1853+ * Will cause memory leak while APP exit , but fstack adapter not exit.
1854+ * May set them as gloabl variable and free in thread_destructor.
1855+ */
1856+ /*if (sh_events) {
1857+ share_mem_free(sh_events);
1858+ sh_events = NULL;
1859+ }*/
1860+
1861+ RETURN_NOFREE ();
1862+ }
1863+ #else
17271864int
17281865ff_hook_epoll_wait (int epfd , struct epoll_event * events ,
17291866 int maxevents , int timeout )
@@ -1913,6 +2050,7 @@ ff_hook_epoll_wait(int epfd, struct epoll_event *events,
19132050
19142051 RETURN_NOFREE ();
19152052}
2053+ #endif
19162054
19172055pid_t
19182056ff_hook_fork (void )
0 commit comments