summaryrefslogtreecommitdiffstats
path: root/src/vnet/pg/input.c
blob: 6968cce19e75f884959e153c55203a4bbec0e50e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/*
 * pg_input.c: buffer generator input
 *
 * Copyright (c) 2008 Eliot Dresselhaus
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
 *  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 *  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

  /*
   * To be honest, the packet generator needs an extreme
   * makeover. Two key assumptions which drove the current implementation
   * are no longer true. First, buffer managers implement a
   * post-TX recycle list. Second, that packet generator performance
   * is first-order important.
   */

#include <vlib/vlib.h>
#include <vnet/pg/pg.h>
#include <vnet/vnet.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/feature/feature.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vnet/udp/udp_packet.h>
#include <vnet/devices/devices.h>

static int
validate_buffer_data2 (vlib_buffer_t * b, pg_stream_t * s,
		       u32 data_offset, u32 n_bytes)
{
  u8 *bd, *pd, *pm;
  u32 i;

  bd = b->data;
  pd = s->fixed_packet_data + data_offset;
  pm = s->fixed_packet_data_mask + data_offset;

  if (pd + n_bytes >= vec_end (s->fixed_packet_data))
    n_bytes = (pd < vec_end (s->fixed_packet_data)
	       ? vec_end (s->fixed_packet_data) - pd : 0);

  for (i = 0; i < n_bytes; i++)
    if ((bd[i] & pm[i]) != pd[i])
      break;

  if (i >= n_bytes)
    return 1;

  clib_warning ("buffer %U", format_vnet_buffer, b);
  clib_warning ("differ at index %d", i);
  clib_warning ("is     %U", format_hex_bytes, bd, n_bytes);
  clib_warning ("mask   %U", format_hex_bytes, pm, n_bytes);
  clib_warning ("expect %U", format_hex_bytes, pd, n_bytes);
  return 0;
}

static int
validate_buffer_data (vlib_buffer_t * b, pg_stream_t * s)
{
  return validate_buffer_data2 (b, s, 0, s->buffer_bytes);
}

always_inline void
set_1 (void *a0,
       u64 v0, u64 v_min, u64 v_max, u32 n_bits, u32 is_net_byte_order)
{
  ASSERT (v0 >= v_min && v0 <= v_max);
  if (n_bits == BITS (u8))
    {
      ((u8 *) a0)[0] = v0;
    }
  else if (n_bits == BITS (u16))
    {
      if (is_net_byte_order)
	v0 = clib_host_to_net_u16 (v0);
      clib_mem_unaligned (a0, u16) = v0;
    }
  else if (n_bits == BITS (u32))
    {
      if (is_net_byte_order)
	v0 = clib_host_to_net_u32 (v0);
      clib_mem_unaligned (a0, u32) = v0;
    }
  else if (n_bits == BITS (u64))
    {
      if (is_net_byte_order)
	v0 = clib_host_to_net_u64 (v0);
      clib_mem_unaligned (a0, u64) = v0;
    }
}

always_inline void
set_2 (void *a0, void *a1,
       u64 v0, u64 v1,
       u64 v_min, u64 v_max,
       u32 n_bits, u32 is_net_byte_order, u32 is_increment)
{
  ASSERT (v0 >= v_min && v0 <= v_max);
  ASSERT (v1 >= v_min && v1 <= (v_max + is_increment));
  if (n_bits == BITS (u8))
    {
      ((u8 *) a0)[0] = v0;
      ((u8 *) a1)[0] = v1;
    }
  else if (n_bits == BITS (u16))
    {
      if (is_net_byte_order)
	{
	  v0 = clib_host_to_net_u16 (v0);
	  v1 = clib_host_to_net_u16 (v1);
	}
      clib_mem_unaligned (a0, u16) = v0;
      clib_mem_unaligned (a1, u16) = v1;
    }
  else if (n_bits == BITS (u32))
    {
      if (is_net_byte_order)
	{
	  v0 = clib_host_to_net_u32 (v0);
	  v1 = clib_host_to_net_u32 (v1);
	}
      clib_mem_unaligned (a0, u32) = v0;
      clib_mem_unaligned (a1, u32) = v1;
    }
  else if (n_bits == BITS (u64))
    {
      if (is_net_byte_order)
	{
	  v0 = clib_host_to_net_u64 (v0);
	  v1 = clib_host_to_net_u64 (v1);
	}
      clib_mem_unaligned (a0, u64) = v0;
      clib_mem_unaligned (a1, u64) = v1;
    }
}

static_always_inline void
do_set_fixed (pg_main_t * pg,
	      pg_stream_t * s,
	      u32 * buffers,
	      u32 n_buffers,
	      u32 n_bits,
	      u32 byte_offset, u32 is_net_byte_order, u64 v_min, u64 v_max)
{
  vlib_main_t *vm = vlib_get_main ();

  while (n_buffers >= 4)
    {
      vlib_buffer_t *b0, *b1, *b2, *b3;
      void *a0, *a1;

      b0 = vlib_get_buffer (vm, buffers[0]);
      b1 = vlib_get_buffer (vm, buffers[1]);
      b2 = vlib_get_buffer (vm, buffers[2]);
      b3 = vlib_get_buffer (vm, buffers[3]);
      buffers += 2;
      n_buffers -= 2;

      a0 = (void *) b0 + byte_offset;
      a1 = (void *) b1 + byte_offset;
      CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
      CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);

      set_2 (a0, a1, v_min, v_min, v_min, v_max, n_bits, is_net_byte_order,
	     /* is_increment */ 0);

      ASSERT (validate_buffer_data (b0, s));
      ASSERT (validate_buffer_data (b1, s));
    }

  while (n_buffers > 0)
    {
      vlib_buffer_t *b0;
      void *a0;

      b0 = vlib_get_buffer (vm, buffers[0]);
      buffers += 1;
      n_buffers -= 1;

      a0 = (void *) b0 + byte_offset;

      set_1 (a0, v_min, v_min, v_max, n_bits, is_net_byte_order);

      ASSERT (validate_buffer_data (b0, s));
    }
}

static_always_inline u64
do_set_increment (pg_main_t * pg,
		  pg_stream_t * s,
		  u32 * buffers,
		  u32 n_buffers,
		  u32 n_bits,
		  u32 byte_offset,
		  u32 is_net_byte_order,
		  u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max, u64 v)
{
  vlib_main_t *vm = vlib_get_main ();
  u64 sum = 0;

  ASSERT (v >= v_min && v <= v_max);

  while (n_buffers >= 4)
    {
      vlib_buffer_t *b0, *b1, *b2, *b3;
      void *a0, *a1;
      u64 v_old;

      b0 = vlib_get_buffer (vm, buffers[0]);
      b1 = vlib_get_buffer (vm, buffers[1]);
      b2 = vlib_get_buffer (vm, buffers[2]);
      b3 = vlib_get_buffer (vm, buffers[3]);
      buffers += 2;
      n_buffers -= 2;

      a0 = (void *) b0 + byte_offset;
      a1 = (void *) b1 + byte_offset;
      CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
      CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);

      v_old = v;
      v = v_old + 2;
      v = v > v_max ? v_min : v;
      set_2 (a0, a1,
	     v_old + 0, v_old + 1, v_min, v_max, n_bits, is_net_byte_order,
	     /* is_increment */ 1);

      if (want_sum)
	sum += 2 * v_old + 1;

      if (PREDICT_FALSE (v_old + 1 > v_max))
	{
	  if (want_sum)
	    sum -= 2 * v_old + 1;

	  v = v_old;
	  set_1 (a0, v + 0, v_min, v_max, n_bits, is_net_byte_order);
	  if (want_sum)
	    sum += v;
	  v += 1;

	  v = v > v_max ? v_min : v;
	  set_1 (a1, v + 0, v_min, v_max, n_bits, is_net_byte_order);
	  if (want_sum)
	    sum += v;
	  v += 1;
	}

      ASSERT (validate_buffer_data (b0, s));
      ASSERT (validate_buffer_data (b1, s));
    }

  while (n_buffers > 0)
    {
      vlib_buffer_t *b0;
      void *a0;
      u64 v_old;

      b0 = vlib_get_buffer (vm, buffers[0]);
      buffers += 1;
      n_buffers -= 1;

      a0 = (void *) b0 + byte_offset;

      v_old = v;
      if (want_sum)
	sum += v_old;
      v += 1;
      v = v > v_max ? v_min : v;

      ASSERT (v_old >= v_min && v_old <= v_max);
      set_1 (a0, v_old, v_min, v_max, n_bits, is_net_byte_order);

      ASSERT (validate_buffer_data (b0, s));
    }

  if (want_sum)
    *sum_result = sum;

  return v;
}

static_always_inline void
do_set_random (pg_main_t * pg,
	       pg_stream_t * s,
	       u32 * buffers,
	       u32 n_buffers,
	       u32 n_bits,
	       u32 byte_offset,
	       u32 is_net_byte_order,
	       u32 want_sum, u64 * sum_result, u64 v_min, u64 v_max)
{
  vlib_main_t *vm = vlib_get_main ();
  u64 v_diff = v_max - v_min + 1;
  u64 r_mask = max_pow2 (v_diff) - 1;
  u64 v0, v1;
  u64 sum = 0;
  void *random_data;

  random_data = clib_random_buffer_get_data
    (&vm->random_buffer, n_buffers * n_bits / BITS (u8));

  v0 = v1 = v_min;

  while (n_buffers >= 4)
    {
      vlib_buffer_t *b0, *b1, *b2, *b3;
      void *a0, *a1;
      u64 r0 = 0, r1 = 0;	/* warnings be gone */

      b0 = vlib_get_buffer (vm, buffers[0]);
      b1 = vlib_get_buffer (vm, buffers[1]);
      b2 = vlib_get_buffer (vm, buffers[2]);
      b3 = vlib_get_buffer (vm, buffers[3]);
      buffers += 2;
      n_buffers -= 2;

      a0 = (void *) b0 + byte_offset;
      a1 = (void *) b1 + byte_offset;
      CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
      CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);

      switch (n_bits)
	{
#define _(n)					\
	  case BITS (u##n):			\
	    {					\
	      u##n * r = random_data;		\
	      r0 = r[0];			\
	      r1 = r[1];			\
	      random_data = r + 2;		\
	    }					\
	  break;

	  _(8);
	  _(16);
	  _(32);
	  _(64);

#undef _
	}

      /* Add power of 2 sized random number which may be out of range. */
      v0 += r0 & r_mask;
      v1 += r1 & r_mask;

      /* Twice should be enough to reduce to v_min .. v_max range. */
      v0 = v0 > v_max ? v0 - v_diff : v0;
      v1 = v1 > v_max ? v1 - v_diff : v1;
      v0 = v0 > v_max ? v0 - v_diff : v0;
      v1 = v1 > v_max ? v1 - v_diff : v1;

      if (want_sum)
	sum += v0 + v1;

      set_2 (a0, a1, v0, v1, v_min, v_max, n_bits, is_net_byte_order,
	     /* is_increment */ 0);

      ASSERT (validate_buffer_data (b0, s));
      ASSERT (validate_buffer_data (b1, s));
    }

  while (n_buffers > 0)
    {
      vlib_buffer_t *b0;
      void *a0;
      u64 r0 = 0;		/* warnings be gone */

      b0 = vlib_get_buffer (vm, buffers[0]);
      buffers += 1;
      n_buffers -= 1;

      a0 = (void *) b0 + byte_offset;

      switch (n_bits)
	{
#define _(n)					\
	  case BITS (u##n):			\
	    {					\
	      u##n * r = random_data;		\
	      r0 = r[0];			\
	      random_data = r + 1;		\
	    }					\
	  break;

	  _(8);
	  _(16);
	  _(32);
	  _(64);

#undef _
	}

      /* Add power of 2 sized random number which may be out of range. */
      v0 += r0 & r_mask;

      /* Twice should be enough to reduce to v_min .. v_max range. */
      v0 = v0 > v_max ? v0 - v_diff : v0;
      v0 = v0 > v_max ? v0 - v_diff : v0;

      if (want_sum)
	sum += v0;

      set_1 (a0, v0, v_min, v_max, n_bits, is_net_byte_order);

      ASSERT (validate_buffer_data (b0, s));
    }

  if (want_sum)
    *sum_result = sum;
}

#define _(i,t)							\
  clib_mem_unaligned (a##i, t) =				\
    clib_host_to_net_##t ((clib_net_to_host_mem_##t (a##i) &~ mask)	\
			  | (v##i << shift))

always_inline void
setbits_1 (void *a0,
	   u64 v0,
	   u64 v_min, u64 v_max,
	   u32 max_bits, u32 n_bits, u64 mask, u32 shift)
{
  ASSERT (v0 >= v_min && v0 <= v_max);
  if (max_bits == BITS (u8))
    ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);

  else if (max_bits == BITS (u16))
    {
      _(0, u16);
    }
  else if (max_bits == BITS (u32))
    {
      _(0, u32);
    }
  else if (max_bits == BITS (u64))
    {
      _(0, u64);
    }
}

always_inline void
setbits_2 (void *a0, void *a1,
	   u64 v0, u64 v1,
	   u64 v_min, u64 v_max,
	   u32 max_bits, u32 n_bits, u64 mask, u32 shift, u32 is_increment)
{
  ASSERT (v0 >= v_min && v0 <= v_max);
  ASSERT (v1 >= v_min && v1 <= v_max + is_increment);
  if (max_bits == BITS (u8))
    {
      ((u8 *) a0)[0] = (((u8 *) a0)[0] & ~mask) | (v0 << shift);
      ((u8 *) a1)[0] = (((u8 *) a1)[0] & ~mask) | (v1 << shift);
    }

  else if (max_bits == BITS (u16))
    {
      _(0, u16);
      _(1, u16);
    }
  else if (max_bits == BITS (u32))
    {
      _(0, u32);
      _(1, u32);
    }
  else if (max_bits == BITS (u64))
    {
      _(0, u64);
      _(1, u64);
    }
}

#undef _

static_always_inline void
do_setbits_fixed (pg_main_t * pg,
		  pg_stream_t * s,
		  u32 * buffers,
		  u32 n_buffers,
		  u32 max_bits,
		  u32 n_bits,
		  u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
{
  vlib_main_t *vm = vlib_get_main ();

  while (n_buffers >= 4)
    {
      vlib_buffer_t *b0, *b1, *b2, *b3;
      void *a0, *a1;

      b0 = vlib_get_buffer (vm, buffers[0]);
      b1 = vlib_get_buffer (vm, buffers[1]);
      b2 = vlib_get_buffer (vm, buffers[2]);
      b3 = vlib_get_buffer (vm, buffers[3]);
      buffers += 2;
      n_buffers -= 2;

      a0 = (void *) b0 + byte_offset;
      a1 = (void *) b1 + byte_offset;
      CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
      CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);

      setbits_2 (a0, a1,
		 v_min, v_min, v_min, v_max, max_bits, n_bits, mask, shift,
		 /* is_increment */ 0);

      ASSERT (validate_buffer_data (b0, s));
      ASSERT (validate_buffer_data (b1, s));
    }

  while (n_buffers > 0)
    {
      vlib_buffer_t *b0;
      void *a0;

      b0 = vlib_get_buffer (vm, buffers[0]);
      buffers += 1;
      n_buffers -= 1;

      a0 = (void *) b0 + byte_offset;

      setbits_1 (a0, v_min, v_min, v_max, max_bits, n_bits, mask, shift);
      ASSERT (validate_buffer_data (b0, s));
    }
}

static_always_inline u64
do_setbits_increment (pg_main_t * pg,
		      pg_stream_t * s,
		      u32 * buffers,
		      u32 n_buffers,
		      u32 max_bits,
		      u32 n_bits,
		      u32 byte_offset,
		      u64 v_min, u64 v_max, u64 v, u64 mask, u32 shift)
{
  vlib_main_t *vm = vlib_get_main ();

  ASSERT (v >= v_min && v <= v_max);

  while (n_buffers >= 4)
    {
      vlib_buffer_t *b0, *b1, *b2, *b3;
      void *a0, *a1;
      u64 v_old;

      b0 = vlib_get_buffer (vm, buffers[0]);
      b1 = vlib_get_buffer (vm, buffers[1]);
      b2 = vlib_get_buffer (vm, buffers[2]);
      b3 = vlib_get_buffer (vm, buffers[3]);
      buffers += 2;
      n_buffers -= 2;

      a0 = (void *) b0 + byte_offset;
      a1 = (void *) b1 + byte_offset;
      CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
      CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);

      v_old = v;
      v = v_old + 2;
      v = v > v_max ? v_min : v;
      setbits_2 (a0, a1,
		 v_old + 0, v_old + 1,
		 v_min, v_max, max_bits, n_bits, mask, shift,
		 /* is_increment */ 1);

      if (PREDICT_FALSE (v_old + 1 > v_max))
	{
	  v = v_old;
	  setbits_1 (a0, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
	  v += 1;

	  v = v > v_max ? v_min : v;
	  setbits_1 (a1, v + 0, v_min, v_max, max_bits, n_bits, mask, shift);
	  v += 1;
	}
      ASSERT (validate_buffer_data (b0, s));
      ASSERT (validate_buffer_data (b1, s));
    }

  while (n_buffers > 0)
    {
      vlib_buffer_t *b0;
      void *a0;
      u64 v_old;

      b0 = vlib_get_buffer (vm, buffers[0]);
      buffers += 1;
      n_buffers -= 1;

      a0 = (void *) b0 + byte_offset;

      v_old = v;
      v = v_old + 1;
      v = v > v_max ? v_min : v;

      ASSERT (v_old >= v_min && v_old <= v_max);
      setbits_1 (a0, v_old, v_min, v_max, max_bits, n_bits, mask, shift);

      ASSERT (validate_buffer_data (b0, s));
    }

  return v;
}

static_always_inline void
do_setbits_random (pg_main_t * pg,
		   pg_stream_t * s,
		   u32 * buffers,
		   u32 n_buffers,
		   u32 max_bits,
		   u32 n_bits,
		   u32 byte_offset, u64 v_min, u64 v_max, u64 mask, u32 shift)
{
  vlib_main_t *vm = vlib_get_main ();
  u64 v_diff = v_max - v_min + 1;
  u64 r_mask = max_pow2 (v_diff) - 1;
  u64 v0, v1;
  void *random_data;

  random_data = clib_random_buffer_get_data
    (&vm->random_buffer, n_buffers * max_bits / BITS (u8));
  v0 = v1 = v_min;

  while (n_buffers >= 4)
    {
      vlib_buffer_t *b0, *b1, *b2, *b3;
      void *a0, *a1;
      u64 r0 = 0, r1 = 0;	/* warnings be gone */

      b0 = vlib_get_buffer (vm, buffers[0]);
      b1 = vlib_get_buffer (vm, buffers[1]);
      b2 = vlib_get_buffer (vm, buffers[2]);
      b3 = vlib_get_buffer (vm, buffers[3]);
      buffers += 2;
      n_buffers -= 2;

      a0 = (void *) b0 + byte_offset;
      a1 = (void *) b1 + byte_offset;
      CLIB_PREFETCH ((void *) b2 + byte_offset, sizeof (v_min), WRITE);
      CLIB_PREFETCH ((void *) b3 + byte_offset, sizeof (v_min), WRITE);

      switch (max_bits)
	{
#define _(n)					\
	  case BITS (u##n):			\
	    {					\
	      u##n * r = random_data;		\
	      r0 = r[0];			\
	      r1 = r[1];			\
	      random_data = r + 2;		\
	    }					\
	  break;

	  _(8);
	  _(16);
	  _(32);
	  _(64);

#undef _
	}

      /* Add power of 2 sized random number which may be out of range. */
      v0 += r0 & r_mask;
      v1 += r1 & r_mask;

      /* Twice should be enough to reduce to v_min .. v_max range. */
      v0 = v0 > v_max ? v0 - v_diff : v0;
      v1 = v1 > v_max ? v1 - v_diff : v1;
      v0 = v0 > v_max ? v0 - v_diff : v0;
      v1 = v1 > v_max ? v1 - v_diff : v1;

      setbits_2 (a0, a1, v0, v1, v_min, v_max, max_bits, n_bits, mask, shift,
		 /* is_increment */ 0);

      ASSERT (validate_buffer_data (b0, s));
      ASSERT (validate_buffer_data (b1, s));
    }

  while (n_buffers > 0)
    {
      vlib_buffer_t *b0;
      void *a0;
      u64 r0 = 0;		/* warnings be gone */

      b0 = vlib_get_buffer (vm, buffers[0]);
      buffers += 1;
      n_buffers -= 1;

      a0 = (void *) b0 + byte_offset;

      switch (max_bits)
	{
#define _(n)					\
	  case BITS (u##n):			\
	    {					\
	      u##n * r = random_data;		\
	      r0 = r[0];			\
	      random_data = r + 1;		\
	    }					\
	  break;

	  _(8);
	  _(16);
	  _(32);
	  _(64);

#undef _
	}

      /* Add power of 2 sized random number which may be out of range. */
      v0 += r0 & r_mask;

      /* Twice should be enough to reduce to v_min .. v_max range. */
      v0 = v0 > v_max ? v0 - v_diff : v0;
      v0 = v0 > v_max ? v0 - v_diff : v0;

      setbits_1 (a0, v0, v_min, v_max, max_bits, n_bits, mask, shift);

      ASSERT (validate_buffer_data (b0, s));
    }
}

static u64
do_it (pg_main_t * pg,
       pg_stream_t * s,
       u32 * buffers,
       u32 n_buffers,
       u32 lo_bit, u32 hi_bit,
       u64 v_min, u64 v_max, u64 v, pg_edit_type_t edit_type)
{
  u32 max_bits, l0, l1, h1, start_bit;

  if (v_min == v_max)
    edit_type = PG_EDIT_FIXED;

  l0 = lo_bit / BITS (u8);
  l1 = lo_bit % BITS (u8);
  h1 = hi_bit % BITS (u8);

  start_bit = l0 * BITS (u8);

  max_bits = hi_bit - start_bit;
  ASSERT (max_bits <= 64);

#define _(n)						\
  case (n):						\
    if (edit_type == PG_EDIT_INCREMENT)			\
      v = do_set_increment (pg, s, buffers, n_buffers,	\
			    BITS (u##n),		\
			    l0,				\
			    /* is_net_byte_order */ 1,	\
			    /* want sum */ 0, 0,	\
			    v_min, v_max,		\
			    v);				\
    else if (edit_type == PG_EDIT_RANDOM)		\
      do_set_random (pg, s, buffers, n_buffers,		\
		     BITS (u##n),			\
		     l0,				\
		     /* is_net_byte_order */ 1,		\
		     /* want sum */ 0, 0,		\
		     v_min, v_max);			\
    else /* edit_type == PG_EDIT_FIXED */		\
      do_set_fixed (pg, s, buffers, n_buffers,		\
		    BITS (u##n),			\
		    l0,					\
		    /* is_net_byte_order */ 1,		\
		    v_min, v_max);			\
  goto done;

  if (l1 == 0 && h1 == 0)
    {
      switch (max_bits)
	{
	  _(8);
	  _(16);
	  _(32);
	  _(64);
	}
    }

#undef _

  {
    u64 mask;
    u32 shift = l1;
    u32 n_bits = max_bits;

    max_bits = clib_max (max_pow2 (n_bits), 8);

    mask = ((u64) 1 << (u64) n_bits) - 1;
    mask &= ~(((u64) 1 << (u64) shift) - 1);

    mask <<= max_bits - n_bits;
    shift += max_bits - n_bits;

    switch (max_bits)
      {
#define _(n)								\
	case (n):							\
	  if (edit_type == PG_EDIT_INCREMENT)				\
	    v = do_setbits_increment (pg, s, buffers, n_buffers,	\
				      BITS (u##n), n_bits,		\
				      l0, v_min, v_max, v,		\
				      mask, shift);			\
	  else if (edit_type == PG_EDIT_RANDOM)				\
	    do_setbits_random (pg, s, buffers, n_buffers,		\
			       BITS (u##n), n_bits,			\
			       l0, v_min, v_max,			\
			       mask, shift);				\
	  else /* edit_type == PG_EDIT_FIXED */				\
	    do_setbits_fixed (pg, s, buffers, n_buffers,		\
			      BITS (u##n), n_bits,			\
			      l0, v_min, v_max,				\
			      mask, shift);				\
	goto done;

	_(8);
	_(16);
	_(32);
	_(64);

#undef _
      }
  }

done:
  return v;
}

static void
pg_generate_set_lengths (pg_main_t * pg,
			 pg_stream_t * s, u32 * buffers, u32 n_buffers)
{
  u64 v_min, v_max, length_sum;
  pg_edit_type_t edit_type;

  v_min = s->min_packet_bytes;
  v_max = s->max_packet_bytes;
  edit_type = s->packet_size_edit_type;

  if (edit_type == PG_EDIT_INCREMENT)
    s->last_increment_packet_size
      = do_set_increment (pg, s, buffers, n_buffers,
			  8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
			  STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
			  /* is_net_byte_order */ 0,
			  /* want sum */ 1, &length_sum,
			  v_min, v_max, s->last_increment_packet_size);

  else if (edit_type == PG_EDIT_RANDOM)
    do_set_random (pg, s, buffers, n_buffers,
		   8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
		   STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
		   /* is_net_byte_order */ 0,
		   /* want sum */ 1, &length_sum,
		   v_min, v_max);

  else				/* edit_type == PG_EDIT_FIXED */
    {
      do_set_fixed (pg, s, buffers, n_buffers,
		    8 * STRUCT_SIZE_OF (vlib_buffer_t, current_length),
		    STRUCT_OFFSET_OF (vlib_buffer_t, current_length),
		    /* is_net_byte_order */ 0,
		    v_min, v_max);
      length_sum = v_min * n_buffers;
    }

  {
    vnet_main_t *vnm = vnet_get_main ();
    vnet_interface_main_t *im = &vnm->interface_main;
    vnet_sw_interface_t *si =
      vnet_get_sw_interface (vnm, s->sw_if_index[VLIB_RX]);

    vlib_increment_combined_counter (im->combined_sw_if_counters
				     + VNET_INTERFACE_COUNTER_RX,
				     vlib_get_thread_index (),
				     si->sw_if_index, n_buffers, length_sum);
  }

}

static void
pg_generate_fix_multi_buffer_lengths (pg_main_t * pg,
				      pg_stream_t * s,
				      u32 * buffers, u32 n_buffers)
{
  vlib_main_t *vm = vlib_get_main ();
  pg_buffer_index_t *pbi;
  uword n_bytes_left;
  static u32 *unused_buffers = 0;

  while (n_buffers > 0)
    {
      vlib_buffer_t *b;
      u32 bi;

      bi = buffers[0];
      b = vlib_get_buffer (vm, bi);

      /* Current length here is length of whole packet. */
      n_bytes_left = b->current_length;

      pbi = s->buffer_indices;
      while (1)
	{
	  uword n = clib_min (n_bytes_left, s->buffer_bytes);

	  b->current_length = n;
	  n_bytes_left -= n;
	  if (n_bytes_left > 0)
	    b->flags |= VLIB_BUFFER_NEXT_PRESENT;
	  else
	    b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;

	  /* Return unused buffers to fifos. */
	  if (n == 0)
	    vec_add1 (unused_buffers, bi);

	  pbi++;
	  if (pbi >= vec_end (s->buffer_indices))
	    break;

	  bi = b->next_buffer;
	  b = vlib_get_buffer (vm, bi);
	}
      ASSERT (n_bytes_left == 0);

      buffers += 1;
      n_buffers -= 1;
    }

  if (vec_len (unused_buffers) > 0)
    {
      vlib_buffer_free_no_next (vm, unused_buffers, vec_len (unused_buffers));
      _vec_len (unused_buffers) = 0;
    }
}

static void
pg_generate_edit (pg_main_t * pg,
		  pg_stream_t * s, u32 * buffers, u32 n_buffers)
{
  pg_edit_t *e;

  vec_foreach (e, s->non_fixed_edits)
  {
    switch (e->type)
      {
      case PG_EDIT_RANDOM:
      case PG_EDIT_INCREMENT:
	{
	  u32 lo_bit, hi_bit;
	  u64 v_min, v_max;

	  v_min = pg_edit_get_value (e, PG_EDIT_LO);
	  v_max = pg_edit_get_value (e, PG_EDIT_HI);

	  hi_bit = (BITS (u8) * STRUCT_OFFSET_OF (vlib_buffer_t, data)
		    + BITS (u8) + e->lsb_bit_offset);
	  lo_bit = hi_bit - e->n_bits;

	  e->last_increment_value
	    = do_it (pg, s, buffers, n_buffers, lo_bit, hi_bit, v_min, v_max,
		     e->last_increment_value, e->type);
	}
	break;

      case PG_EDIT_UNSPECIFIED:
	break;

      default:
	/* Should not be any fixed edits left. */
	ASSERT (0);
	break;
      }
  }

  /* Call any edit functions to e.g. completely IP lengths, checksums, ... */
  {
    int i;
    for (i = vec_len (s->edit_groups) - 1; i >= 0; i--)
      {
	pg_edit_group_t *g = s->edit_groups + i;
	if (g->edit_function)
	  g->edit_function (pg, s, g, buffers, n_buffers);
      }
  }
}

static void
pg_set_next_buffer_pointers (pg_main_t * pg,
			     pg_stream_t * s,
			     u32 * buffers, u32 * next_buffers, u32 n_buffers)
{
  vlib_main_t *vm = vlib_get_main ();

  while (n_buffers >= 4)
    {
      u32 ni0, ni1;
      vlib_buffer_t *b0, *b1;

      b0 = vlib_get_buffer (vm, buffers[0]);
      b1 = vlib_get_buffer (vm, buffers[1]);
      ni0 = next_buffers[0];
      ni1 = next_buffers[1];

      vlib_prefetch_buffer_with_index (vm, buffers[2], WRITE);
      vlib_prefetch_buffer_with_index (vm, buffers[3], WRITE);

      b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
      b1->flags |= VLIB_BUFFER_NEXT_PRESENT;
      b0->next_buffer = ni0;
      b1->next_buffer = ni1;

      buffers += 2;
      next_buffers += 2;
      n_buffers -= 2;
    }

  while (n_buffers > 0)
    {
      u32 ni0;
      vlib_buffer_t *b0;

      b0 = vlib_get_buffer (vm, buffers[0]);
      ni0 = next_buffers[0];
      buffers += 1;
      next_buffers += 1;
      n_buffers -= 1;

      b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
      b0->next_buffer = ni0;
    }
}

static_always_inline void
init_buffers_inline (vlib_main_t * vm,
		     pg_stream_t * s,
		     u32 * buffers,
		     u32 n_buffers, u32 data_offset, u32 n_data, u32 set_data)
{
  u32 n_left, *b;
  u8 *data, *mask;

  ASSERT (s->replay_packet_templates == 0);

  data = s->fixed_packet_data + data_offset;
  mask = s->fixed_packet_data_mask + data_offset;
  if (data + n_data >= vec_end (s->fixed_packet_data))
    n_data = (data < vec_end (s->fixed_packet_data)
	      ? vec_end (s->fixed_packet_data) - data : 0);
  if (n_data > 0)
    {
      ASSERT (data + n_data <= vec_end (s->fixed_packet_data));
      ASSERT (mask + n_data <= vec_end (s->fixed_packet_data_mask));
    }

  n_left = n_buffers;
  b = buffers;

  while (n_left >= 4)
    {
      u32 bi0, bi1;
      vlib_buffer_t *b0, *b1;

      /* Prefetch next iteration. */
      vlib_prefetch_buffer_with_index (vm, b[2], STORE);
      vlib_prefetch_buffer_with_index (vm, b[3], STORE);

      bi0 = b[0];
      bi1 = b[1];
      b += 2;
      n_left -= 2;

      b0 = vlib_get_buffer (vm, bi0);
      b1 = vlib_get_buffer (vm, bi1);
      b0->flags |= s->buffer_flags;
      b1->flags |= s->buffer_flags;

      vnet_buffer (b0)->sw_if_index[VLIB_RX] =
	vnet_buffer (b1)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];

      vnet_buffer (b0)->sw_if_index[VLIB_TX] =
	vnet_buffer (b1)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];

      if (set_data)
	{
	  clib_memcpy_fast (b0->data, data, n_data);
	  clib_memcpy_fast (b1->data, data, n_data);
	}
      else
	{
	  ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
	  ASSERT (validate_buffer_data2 (b1, s, data_offset, n_data));
	}
    }

  while (n_left >= 1)
    {
      u32 bi0;
      vlib_buffer_t *b0;

      bi0 = b[0];
      b += 1;
      n_left -= 1;

      b0 = vlib_get_buffer (vm, bi0);
      b0->flags |= s->buffer_flags;
      vnet_buffer (b0)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
      vnet_buffer (b0)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];

      if (set_data)
	clib_memcpy_fast (b0->data, data, n_data);
      else
	ASSERT (validate_buffer_data2 (b0, s, data_offset, n_data));
    }
}

static u32
pg_stream_fill_helper (pg_main_t * pg,
		       pg_stream_t * s,
		       pg_buffer_index_t * bi,
		       u32 * buffers, u32 * next_buffers, u32 n_alloc)
{
  vlib_main_t *vm = vlib_get_main ();
  uword is_start_of_packet = bi == s->buffer_indices;
  u32 n_allocated;

  ASSERT (vec_len (s->replay_packet_templates) == 0);

  n_allocated = vlib_buffer_alloc (vm, buffers, n_alloc);
  if (n_allocated == 0)
    return 0;

  /*
   * We can't assume we got all the buffers we asked for...
   * This never worked until recently.
   */
  n_alloc = n_allocated;

  /* Reinitialize buffers */
  init_buffers_inline
    (vm, s,
     buffers,
     n_alloc, (bi - s->buffer_indices) * s->buffer_bytes /* data offset */ ,
     s->buffer_bytes,
     /* set_data */ 1);

  if (next_buffers)
    pg_set_next_buffer_pointers (pg, s, buffers, next_buffers, n_alloc);

  if (is_start_of_packet)
    {
      pg_generate_set_lengths (pg, s, buffers, n_alloc);
      if (vec_len (s->buffer_indices) > 1)
	pg_generate_fix_multi_buffer_lengths (pg, s, buffers, n_alloc);

      pg_generate_edit (pg, s, buffers, n_alloc);
    }

  return n_alloc;
}

static u32
pg_stream_fill_replay (pg_main_t * pg, pg_stream_t * s, u32 n_alloc)
{
  pg_buffer_index_t *bi;
  u32 n_left, i, l;
  u32 buffer_alloc_request = 0;
  u32 buffer_alloc_result;
  u32 current_buffer_index;
  u32 *buffers;
  vlib_main_t *vm = vlib_get_main ();
  vnet_main_t *vnm = vnet_get_main ();
  u32 buf_sz = vlib_buffer_get_default_data_size (vm);
  vnet_interface_main_t *im = &vnm->interface_main;
  vnet_sw_interface_t *si;

  buffers = pg->replay_buffers_by_thread[vm->thread_index];
  vec_reset_length (buffers);
  bi = s->buffer_indices;

  n_left = n_alloc;
  i = s->current_replay_packet_index;
  l = vec_len (s->replay_packet_templates);

  /* Figure out how many buffers we need */
  while (n_left > 0)
    {
      u8 *d0;

      d0 = vec_elt (s->replay_packet_templates, i);
      buffer_alloc_request += (vec_len (d0) + (buf_sz - 1)) / buf_sz;

      i = ((i + 1) == l) ? 0 : i + 1;
      n_left--;
    }

  ASSERT (buffer_alloc_request > 0);
  vec_validate (buffers, buffer_alloc_request - 1);

  /* Allocate that many buffers */
  buffer_alloc_result = vlib_buffer_alloc (vm, buffers, buffer_alloc_request);
  if (buffer_alloc_result < buffer_alloc_request)
    {
      clib_warning ("alloc failure, got %d not %d", buffer_alloc_result,
		    buffer_alloc_request);
      vlib_buffer_free_no_next (vm, buffers, buffer_alloc_result);
      pg->replay_buffers_by_thread[vm->thread_index] = buffers;
      return 0;
    }

  /* Now go generate the buffers, and add them to the FIFO */
  n_left = n_alloc;

  current_buffer_index = 0;
  i = s->current_replay_packet_index;
  l = vec_len (s->replay_packet_templates);
  while (n_left > 0)
    {
      u8 *d0;
      int not_last;
      u32 data_offset;
      u32 bytes_to_copy, bytes_this_chunk;
      vlib_buffer_t *b;

      d0 = vec_elt (s->replay_packet_templates, i);
      data_offset = 0;
      bytes_to_copy = vec_len (d0);

      /* Add head chunk to pg fifo */
      clib_fifo_add1 (bi->buffer_fifo, buffers[current_buffer_index]);

      /* Copy the data */
      while (bytes_to_copy)
	{
	  bytes_this_chunk = clib_min (bytes_to_copy, buf_sz);
	  ASSERT (current_buffer_index < vec_len (buffers));
	  b = vlib_get_buffer (vm, buffers[current_buffer_index]);
	  clib_memcpy_fast (b->data, d0 + data_offset, bytes_this_chunk);
	  vnet_buffer (b)->sw_if_index[VLIB_RX] = s->sw_if_index[VLIB_RX];
	  vnet_buffer (b)->sw_if_index[VLIB_TX] = s->sw_if_index[VLIB_TX];
	  b->flags = s->buffer_flags;
	  b->next_buffer = 0;
	  b->current_data = 0;
	  b->current_length = bytes_this_chunk;

	  not_last = bytes_this_chunk < bytes_to_copy;
	  if (not_last)
	    {
	      ASSERT (current_buffer_index < (vec_len (buffers) - 1));
	      b->flags |= VLIB_BUFFER_NEXT_PRESENT;
	      b->next_buffer = buffers[current_buffer_index + 1];
	    }
	  bytes_to_copy -= bytes_this_chunk;
	  data_offset += bytes_this_chunk;
	  current_buffer_index++;
	}

      i = ((i + 1) == l) ? 0 : i + 1;
      n_left--;
    }

  /* Update the interface counters */
  si = vnet_get_sw_interface (vnm, s->sw_if_index[VLIB_RX]);
  l = 0;
  for (i = 0; i < n_alloc; i++)
    l += vlib_buffer_index_length_in_chain (vm, buffers[i]);
  vlib_increment_combined_counter (im->combined_sw_if_counters
				   + VNET_INTERFACE_COUNTER_RX,
				   vlib_get_thread_index (),
				   si->sw_if_index, n_alloc, l);

  s->current_replay_packet_index += n_alloc;
  s->current_replay_packet_index %= vec_len (s->replay_packet_templates);

  pg->replay_buffers_by_thread[vm->thread_index] = buffers;
  return n_alloc;
}


static u32
pg_stream_fill (pg_main_t * pg, pg_stream_t * s, u32 n_buffers)
{
  pg_buffer_index_t *bi;
  word i, n_in_fifo, n_alloc, n_free, n_added;
  u32 *tail, *start, *end, *last_tail, *last_start;

  bi = s->buffer_indices;

  n_in_fifo = clib_fifo_elts (bi->buffer_fifo);
  if (n_in_fifo >= n_buffers)
    return n_in_fifo;

  n_alloc = n_buffers - n_in_fifo;

  /* Round up, but never generate more than limit. */
  n_alloc = clib_max (VLIB_FRAME_SIZE, n_alloc);

  if (s->n_packets_limit > 0
      && s->n_packets_generated + n_in_fifo + n_alloc >= s->n_packets_limit)
    {
      n_alloc = s->n_packets_limit - s->n_packets_generated - n_in_fifo;
      if (n_alloc < 0)
	n_alloc = 0;
    }

  /*
   * Handle pcap replay directly
   */
  if (s->replay_packet_templates)
    return pg_stream_fill_replay (pg, s, n_alloc);

  /* All buffer fifos should have the same size. */
  if (CLIB_DEBUG > 0)
    {
      uword l = ~0, e;
      vec_foreach (bi, s->buffer_indices)
      {
	e = clib_fifo_elts (bi->buffer_fifo);
	if (bi == s->buffer_indices)
	  l = e;
	ASSERT (l == e);
      }
    }

  last_tail = last_start = 0;
  n_added = n_alloc;

  for (i = vec_len (s->buffer_indices) - 1; i >= 0; i--)
    {
      bi = vec_elt_at_index (s->buffer_indices, i);

      n_free = clib_fifo_free_elts (bi->buffer_fifo);
      if (n_free < n_alloc)
	clib_fifo_resize (bi->buffer_fifo, n_alloc - n_free);

      tail = clib_fifo_advance_tail (bi->buffer_fifo, n_alloc);
      start = bi->buffer_fifo;
      end = clib_fifo_end (bi->buffer_fifo);

      if (tail + n_alloc <= end)
	{
	  n_added =
	    pg_stream_fill_helper (pg, s, bi, tail, last_tail, n_alloc);
	}
      else
	{
	  u32 n = clib_min (end - tail, n_alloc);
	  n_added = pg_stream_fill_helper (pg, s, bi, tail, last_tail, n);

	  if (n_added == n && n_alloc > n_added)
	    {
	      n_added += pg_stream_fill_helper
		(pg, s, bi, start, last_start, n_alloc - n_added);
	    }
	}

      if (PREDICT_FALSE (n_added < n_alloc))
	tail = clib_fifo_advance_tail (bi->buffer_fifo, n_added - n_alloc);

      last_tail = tail;
      last_start = start;

      /* Verify that pkts in the fifo are properly allocated */
    }

  return n_in_fifo + n_added;
}

typedef struct
{
  u32 stream_index;

  u32 packet_length;
  u32 sw_if_index;

  /* Use pre data for packet data. */
  vlib_buffer_t buffer;
} pg_input_trace_t;

static u8 *
format_pg_input_trace (u8 * s, va_list * va)
{
  vlib_main_t *vm = va_arg (*va, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
  pg_input_trace_t *t = va_arg (*va, pg_input_trace_t *);
  pg_main_t *pg = &pg_main;
  pg_stream_t *stream;
  vlib_node_t *n;
  u32 indent = format_get_indent (s);

  stream = 0;
  if (!pool_is_free_index (pg->streams, t->stream_index))
    stream = pool_elt_at_index (pg->streams, t->stream_index);

  if (stream)
    s = format (s, "stream %v", pg->streams[t->stream_index].name);
  else
    s = format (s, "stream %d", t->stream_index);

  s = format (s, ", %d bytes", t->packet_length);
  s = format (s, ", sw_if_index %d", t->sw_if_index);

  s = format (s, "\n%U%U",
	      format_white_space, indent, format_vnet_buffer, &t->buffer);

  s = format (s, "\n%U", format_white_space, indent);

  n = 0;
  if (stream)
    n = vlib_get_node (vm, stream->node_index);

  if (n && n->format_buffer)
    s = format (s, "%U", n->format_buffer,
		t->buffer.pre_data, sizeof (t->buffer.pre_data));
  else
    s = format (s, "%U",
		format_hex_bytes, t->buffer.pre_data,
		ARRAY_LEN (t->buffer.pre_data));
  return s;
}

static void
pg_input_trace (pg_main_t * pg,
		vlib_node_runtime_t * node, u32 stream_index, u32 next_index,
		u32 * buffers, u32 n_buffers)
{
  vlib_main_t *vm = vlib_get_main ();
  u32 *b, n_left;

  n_left = n_buffers;
  b = buffers;

  while (n_left >= 2)
    {
      u32 bi0, bi1;
      vlib_buffer_t *b0, *b1;
      pg_input_trace_t *t0, *t1;

      bi0 = b[0];
      bi1 = b[1];
      b += 2;
      n_left -= 2;

      b0 = vlib_get_buffer (vm, bi0);
      b1 = vlib_get_buffer (vm, bi1);

      vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
      vlib_trace_buffer (vm, node, next_index, b1, /* follow_chain */ 1);

      t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
      t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));

      t0->stream_index = stream_index;
      t1->stream_index = stream_index;

      t0->packet_length = vlib_buffer_length_in_chain (vm, b0);
      t1->packet_length = vlib_buffer_length_in_chain (vm, b1);

      t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
      t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];

      clib_memcpy_fast (&t0->buffer, b0,
			sizeof (b0[0]) - sizeof (b0->pre_data));
      clib_memcpy_fast (&t1->buffer, b1,
			sizeof (b1[0]) - sizeof (b1->pre_data));

      clib_memcpy_fast (t0->buffer.pre_data, b0->data,
			sizeof (t0->buffer.pre_data));
      clib_memcpy_fast (t1->buffer.pre_data, b1->data,
			sizeof (t1->buffer.pre_data));
    }

  while (n_left >= 1)
    {
      u32 bi0;
      vlib_buffer_t *b0;
      pg_input_trace_t *t0;

      bi0 = b[0];
      b += 1;
      n_left -= 1;

      b0 = vlib_get_buffer (vm, bi0);

      vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 1);
      t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));

      t0->stream_index = stream_index;
      t0->packet_length = vlib_buffer_length_in_chain (vm, b0);
      t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
      clib_memcpy_fast (&t0->buffer, b0,
			sizeof (b0[0]) - sizeof (b0->pre_data));
      clib_memcpy_fast (t0->buffer.pre_data, b0->data,
			sizeof (t0->buffer.pre_data));
    }
}

static_always_inline void
fill_buffer_offload_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
			   int gso_enabled, u32 gso_size)
{
  for (int i = 0; i < n_buffers; i++)
    {
      vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[i]);
      u8 l4_proto = 0;

      ethernet_header_t *eh =
	(ethernet_header_t *) vlib_buffer_get_current (b0);
      u16 ethertype = clib_net_to_host_u16 (eh->type);
      u16 l2hdr_sz = sizeof (ethernet_header_t);

      if (ethernet_frame_is_tagged (ethertype))
	{
	  ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);

	  ethertype = clib_net_to_host_u16 (vlan->type);
	  l2hdr_sz += sizeof (*vlan);
	  if (ethertype == ETHERNET_TYPE_VLAN)
	    {
	      vlan++;
	      ethertype = clib_net_to_host_u16 (vlan->type);
	      l2hdr_sz += sizeof (*vlan);
	    }
	}

      vnet_buffer (b0)->l2_hdr_offset = 0;
      vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;

      if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
	{
	  ip4_header_t *ip4 =
	    (ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
	  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
	  l4_proto = ip4->protocol;
	  b0->flags |=
	    (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
	  b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID
			| VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
			VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
	}
      else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
	{
	  ip6_header_t *ip6 =
	    (ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
	  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
	  /* FIXME IPv6 EH traversal */
	  l4_proto = ip6->protocol;
	  b0->flags |=
	    (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
	     VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
	     VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
	}

      if (l4_proto == IP_PROTOCOL_TCP)
	{
	  b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;

	  /* only set GSO flag for chained buffers */
	  if (gso_enabled && (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
	    {
	      b0->flags |= VNET_BUFFER_F_GSO;
	      tcp_header_t *tcp =
		(tcp_header_t *) (vlib_buffer_get_current (b0) +
				  vnet_buffer (b0)->l4_hdr_offset);
	      vnet_buffer2 (b0)->gso_l4_hdr_sz = tcp_header_bytes (tcp);
	      vnet_buffer2 (b0)->gso_size = gso_size;
	    }
	}
      else if (l4_proto == IP_PROTOCOL_UDP)
	{
	  b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
	}
    }
}

static uword
pg_generate_packets (vlib_node_runtime_t * node,
		     pg_main_t * pg,
		     pg_stream_t * s, uword n_packets_to_generate)
{
  vlib_main_t *vm = vlib_get_main ();
  u32 *to_next, n_this_frame, n_left, n_trace, n_packets_in_fifo;
  uword n_packets_generated;
  pg_buffer_index_t *bi, *bi0;
  u32 next_index = s->next_index;
  vnet_feature_main_t *fm = &feature_main;
  vnet_feature_config_main_t *cm;
  u8 feature_arc_index = fm->device_input_feature_arc_index;
  cm = &fm->feature_config_mains[feature_arc_index];
  u32 current_config_index = ~(u32) 0;
  pg_interface_t *pi = pool_elt_at_index (pg->interfaces, s->pg_if_index);
  int i;

  bi0 = s->buffer_indices;

  n_packets_in_fifo = pg_stream_fill (pg, s, n_packets_to_generate);
  n_packets_to_generate = clib_min (n_packets_in_fifo, n_packets_to_generate);
  n_packets_generated = 0;

  if (PREDICT_FALSE
      (vnet_have_features (feature_arc_index, s->sw_if_index[VLIB_RX])))
    {
      current_config_index =
	vec_elt (cm->config_index_by_sw_if_index, s->sw_if_index[VLIB_RX]);
      vnet_get_config_data (&cm->config_main, &current_config_index,
			    &next_index, 0);
    }

  while (n_packets_to_generate > 0)
    {
      u32 *head, *start, *end;

      if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
	{
	  vlib_next_frame_t *nf;
	  vlib_frame_t *f;
	  ethernet_input_frame_t *ef;
	  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left);
	  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
	  f = vlib_get_frame (vm, nf->frame);
	  f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;

	  ef = vlib_frame_scalar_args (f);
	  ef->sw_if_index = pi->sw_if_index;
	  ef->hw_if_index = pi->hw_if_index;
	  vlib_frame_no_append (f);
	}
      else
	vlib_get_next_frame (vm, node, next_index, to_next, n_left);

      n_this_frame = n_packets_to_generate;
      if (n_this_frame > n_left)
	n_this_frame = n_left;

      start = bi0->buffer_fifo;
      end = clib_fifo_end (bi0->buffer_fifo);
      head = clib_fifo_head (bi0->buffer_fifo);

      if (head + n_this_frame <= end)
	vlib_buffer_copy_indices (to_next, head, n_this_frame);
      else
	{
	  u32 n = end - head;
	  vlib_buffer_copy_indices (to_next + 0, head, n);
	  vlib_buffer_copy_indices (to_next + n, start, n_this_frame - n);
	}

      if (s->replay_packet_templates == 0)
	{
	  vec_foreach (bi, s->buffer_indices)
	    clib_fifo_advance_head (bi->buffer_fifo, n_this_frame);
	}
      else
	{
	  clib_fifo_advance_head (bi0->buffer_fifo, n_this_frame);
	}

      if (current_config_index != ~(u32) 0)
	for (i = 0; i < n_this_frame; i++)
	  {
	    vlib_buffer_t *b;
	    b = vlib_get_buffer (vm, to_next[i]);
	    b->current_config_index = current_config_index;
	    vnet_buffer (b)->feature_arc_index = feature_arc_index;
	  }

      if (pi->gso_enabled ||
	  (s->buffer_flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
			      VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
			      VNET_BUFFER_F_OFFLOAD_IP_CKSUM)))
	{
	  fill_buffer_offload_flags (vm, to_next, n_this_frame,
				     pi->gso_enabled, pi->gso_size);
	}

      n_trace = vlib_get_trace_count (vm, node);
      if (n_trace > 0)
	{
	  u32 n = clib_min (n_trace, n_this_frame);
	  pg_input_trace (pg, node, s - pg->streams, next_index, to_next, n);
	  vlib_set_trace_count (vm, node, n_trace - n);
	}
      n_packets_to_generate -= n_this_frame;
      n_packets_generated += n_this_frame;
      n_left -= n_this_frame;
      if (CLIB_DEBUG > 0)
	{
	  int i;
	  vlib_buffer_t *b;

	  for (i = 0; i < n_this_frame; i++)
	    {
	      b = vlib_get_buffer (vm, to_next[i]);
	      ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0 ||
		      b->current_length >= VLIB_BUFFER_MIN_CHAIN_SEG_SIZE);
	    }
	}
      vlib_put_next_frame (vm, node, next_index, n_left);
    }

  return n_packets_generated;
}

static uword
pg_input_stream (vlib_node_runtime_t * node, pg_main_t * pg, pg_stream_t * s)
{
  vlib_main_t *vm = vlib_get_main ();
  uword n_packets;
  f64 time_now, dt;

  if (s->n_packets_limit > 0 && s->n_packets_generated >= s->n_packets_limit)
    {
      pg_stream_enable_disable (pg, s, /* want_enabled */ 0);
      return 0;
    }

  /* Apply rate limit. */
  time_now = vlib_time_now (vm);
  if (s->time_last_generate == 0)
    s->time_last_generate = time_now;

  dt = time_now - s->time_last_generate;
  s->time_last_generate = time_now;

  n_packets = VLIB_FRAME_SIZE;
  if (s->rate_packets_per_second > 0)
    {
      s->packet_accumulator += dt * s->rate_packets_per_second;
      n_packets = s->packet_accumulator;

      /* Never allow accumulator to grow if we get behind. */
      s->packet_accumulator -= n_packets;
    }

  /* Apply fixed limit. */
  if (s->n_packets_limit > 0
      && s->n_packets_generated + n_packets > s->n_packets_limit)
    n_packets = s->n_packets_limit - s->n_packets_generated;

  /* Generate up to one frame's worth of packets. */
  if (n_packets > s->n_max_frame)
    n_packets = s->n_max_frame;

  if (n_packets > 0)
    n_packets = pg_generate_packets (node, pg, s, n_packets);

  s->n_packets_generated += n_packets;

  return n_packets;
}

uword
pg_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
{
  uword i;
  pg_main_t *pg = &pg_main;
  uword n_packets = 0;
  u32 worker_index = 0;

  if (vlib_num_workers ())
    worker_index = vlib_get_current_worker_index ();

  /* *INDENT-OFF* */
  clib_bitmap_foreach (i, pg->enabled_streams[worker_index], ({
    pg_stream_t *s = vec_elt_at_index (pg->streams, i);
    n_packets += pg_input_stream (node, pg, s);
  }));
  /* *INDENT-ON* */

  return n_packets;
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (pg_input_node) = {
  .function = pg_input,
  .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
  .name = "pg-input",
  .sibling_of = "device-input",
  .type = VLIB_NODE_TYPE_INPUT,

  .format_trace = format_pg_input_trace,

  /* Input node will be left disabled until a stream is active. */
  .state = VLIB_NODE_STATE_DISABLED,
};
/* *INDENT-ON* */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */