summaryrefslogtreecommitdiffstats
path: root/test/test_ipsec_spd_flow_cache_input.py
blob: e8c560211bc104f7c07f590060c6b2bffcb685da (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21

@media only all and (prefers-color-scheme: dark) {
.highlight .hll { background-color: #49483e }
.highlight .c { color: #75715e } /* Comment */
.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
.highlight .k { color: #66d9ef } /* Keyword */
.highlight .l { color: #ae81ff } /* Literal */
.highlight .n { color: #f8f8f2 } /* Name */
.highlight .o { color: #f92672 } /* Operator */
.highlight .p { color: #f8f8f2 } /* Punctuation */
.highlight .ch { color: #75715e } /* Comment.Hashbang */
.highlight .cm { color: #75715e } /* Comment.Multiline */
.highlight .cp { color: #75715e } /* Comment.Preproc */
.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
.highlight .c1 { color: #75715e } /* Comment.Single */
.highlight .cs { color: #75715e } /* Comment.Special */
.highlight .gd { color: #f92672 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gi { color: #a6e22e } /* Generic.Inserted */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #75715e } /* Generic.Subheading */
.highlight .kc { color: #66d9ef } /* Keyword.Constant */
.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
.highlight .kn { color: #f92672 } /* Keyword.Namespace */
.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
.highlight .kt { color: #66d9ef } /* Keyword.Type */
.highlight .ld { color: #e6db74 } /* Literal.Date */
.highlight .m { color: #ae81ff } /* Literal.Number */
.highlight .s { color: #e6db74 } /* Literal.String */
.highlight .na { color: #a6e22e } /* Name.Attribute */
.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
.highlight .nc { color: #a6e22e } /* Name.Class */
.highlight .no { color: #66d9ef } /* Name.Constant */
.highlight .nd { color: #a6e22e } /* Name.Decorator */
.highlight .ni { color: #f8f8f2 } /* Name.Entity */
.highlight .ne { color: #a6e22e } /* Name.Exception */
.highlight .nf { color: #a6e22e } /* Name.Function */
.highlight .nl { color: #f8f8f2 } /* Name.Label */
.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
.highlight .nx { color: #a6e22e } /* Name.Other */
.highlight .py { color: #f8f8f2 } /* Name.Property */
.highlight .nt { color: #f92672 } /* Name.Tag */
.highlight .nv { color: #f8f8f2 } /* Name.Variable */
.highlight .ow { color: #f92672 } /* Operator.Word */
.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
.highlight .sc { color: #e6db74 } /* Literal.String.Char */
.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
.highlight .se { color: #ae81ff } /* Literal.String.Escape */
.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
.highlight .sx { color: #e6db74 } /* Literal.String.Other */
.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
}
@media (prefers-color-scheme: light) {
.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #aa0000 } /* Generic.Error */
.highlight .gh { color: #333333 } /* Generic.Heading */
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #555555 } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #666666 } /* Generic.Subheading */
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
.highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #008800 } /* Keyword.Pseudo */
.highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */
.highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */
.highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */
.highlight .na { color: #336699 } /* Name.Attribute */
.highlight .nb { color: #003388 } /* Name.Builtin */
.highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */
.highlight .no { color: #003366; font-weight: bold } /* Name.Constant */
.highlight .nd { color: #555555 } /* Name.Decorator */
.highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */
.highlight .nl { color: #336699; font-style: italic } /* Name.Label */
.highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */
.highlight .py { color: #336699; font-weight: bold } /* Name.Property */
.highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #336699 } /* Name.Variable */
.highlight .ow { color: #008800 } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */
.highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */
.highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */
.highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */
.highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */
.highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */
.highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */
.highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */
.highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */
.highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */
.highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */
.highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */
.highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */
.highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */
.highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */
.highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */
.highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */
.highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */
.highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */
.highlight .vc { color: #336699 } /* Name.Variable.Class */
.highlight .vg { color: #dd7700 } /* Name.Variable.Global */
.highlight .vi { color: #3333bb } /* Name.Variable.Instance */
.highlight .vm { color: #336699 } /* Name.Variable.Magic */
.highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
}
changelog
files
*debhelper*
*.substvars
*.install
vpp-dpdk-dkms*
vpp/
vpp-dev/
vpp-lib/
vpp-dpdk-dev/
vpp-dpdk-dkms/
vpp-dbg/
vppctl/
9 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
from os import remove
import socket
import unittest

from util import ppp
from framework import VppTestRunner
from template_ipsec import SpdFlowCacheTemplate


class SpdFlowCacheInbound(SpdFlowCacheTemplate):
    # Override setUpConstants to enable inbound flow cache in config
    @classmethod
    def setUpConstants(cls):
        super(SpdFlowCacheInbound, cls).setUpConstants()
        cls.vpp_cmdline.extend(["ipsec", "{", "ipv4-inbound-spd-flow-cache on", "}"])
        cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline))


class IPSec4SpdTestCaseBypass(SpdFlowCacheInbound):
    """ IPSec/IPv4 inbound: Policy mode test case with flow cache \
        (add bypass)"""

    def test_ipsec_spd_inbound_bypass(self):
        # In this test case, packets in IPv4 FWD path are configured
        # to go through IPSec inbound SPD policy lookup.
        #
        # 2 inbound SPD rules (1 HIGH and 1 LOW) are added.
        # - High priority rule action is set to DISCARD.
        # - Low priority rule action is set to BYPASS.
        #
        # Since BYPASS rules take precedence over DISCARD
        # (the order being PROTECT, BYPASS, DISCARD) we expect the
        # BYPASS rule to match and traffic to be correctly forwarded.
        self.create_interfaces(2)
        pkt_count = 5

        self.spd_create_and_intf_add(1, [self.pg1, self.pg0])

        # create input rules
        # bypass rule should take precedence over discard rule,
        # even though it's lower priority
        policy_0 = self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
        )
        policy_1 = self.spd_add_rem_policy(  # inbound, priority 15
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=15,
            policy_type="discard",
        )

        # create output rule so we can capture forwarded packets
        policy_2 = self.spd_add_rem_policy(  # outbound, priority 10
            1,
            self.pg0,
            self.pg1,
            socket.IPPROTO_UDP,
            is_out=1,
            priority=10,
            policy_type="bypass",
        )

        # check flow cache is empty before sending traffic
        self.verify_num_inbound_flow_cache_entries(0)
        # create the packet stream
        packets = self.create_stream(self.pg0, self.pg1, pkt_count)
        # add the stream to the source interface
        self.pg0.add_stream(packets)
        self.pg1.enable_capture()
        self.pg_start()

        # check capture on pg1
        capture = self.pg1.get_capture()
        for packet in capture:
            try:
                self.logger.debug(ppp("SPD Add - Got packet:", packet))
            except Exception:
                self.logger.error(ppp("Unexpected or invalid packet:", packet))
                raise
        self.logger.debug("SPD: Num packets: %s", len(capture.res))

        # verify captured packets
        self.verify_capture(self.pg0, self.pg1, capture)
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(0, policy_1)
        self.verify_policy_match(pkt_count, policy_2)
        # check input policy has been cached
        self.verify_num_inbound_flow_cache_entries(1)


class IPSec4SpdTestCaseDiscard(SpdFlowCacheInbound):
    """ IPSec/IPv4 inbound: Policy mode test case with flow cache \
        (add discard)"""

    def test_ipsec_spd_inbound_discard(self):
        # In this test case, packets in IPv4 FWD path are configured
        # to go through IPSec inbound SPD policy lookup.
        # 1 DISCARD rule is added, so all traffic should be dropped.
        self.create_interfaces(2)
        pkt_count = 5

        self.spd_create_and_intf_add(1, [self.pg1, self.pg0])

        # create input rule
        policy_0 = self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="discard",
        )

        # create output rule so we can capture forwarded packets
        policy_1 = self.spd_add_rem_policy(  # outbound, priority 10
            1,
            self.pg0,
            self.pg1,
            socket.IPPROTO_UDP,
            is_out=1,
            priority=10,
            policy_type="bypass",
        )

        # check flow cache is empty before sending traffic
        self.verify_num_inbound_flow_cache_entries(0)
        # create the packet stream
        packets = self.create_stream(self.pg0, self.pg1, pkt_count)
        # add the stream to the source interface
        self.pg0.add_stream(packets)
        self.pg1.enable_capture()
        self.pg_start()
        # inbound discard rule should have dropped traffic
        self.pg1.assert_nothing_captured()
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(0, policy_1)
        # only inbound discard rule should have been cached
        self.verify_num_inbound_flow_cache_entries(1)


class IPSec4SpdTestCaseRemoveInbound(SpdFlowCacheInbound):
    """ IPSec/IPv4 inbound: Policy mode test case with flow cache \
        (remove bypass)"""

    def test_ipsec_spd_inbound_remove(self):
        # In this test case, packets in IPv4 FWD path are configured
        # to go through IPSec inbound SPD policy lookup.
        #
        # 2 inbound SPD rules (1 HIGH and 1 LOW) are added.
        # - High priority rule action is set to DISCARD.
        # - Low priority rule action is set to BYPASS.
        #
        # Since BYPASS rules take precedence over DISCARD
        # (the order being PROTECT, BYPASS, DISCARD) we expect the
        # BYPASS rule to match and traffic to be correctly forwarded.
        #
        # The BYPASS rules is then removed, and we check that all traffic
        # is now correctly dropped.
        self.create_interfaces(2)
        pkt_count = 5

        self.spd_create_and_intf_add(1, [self.pg1, self.pg0])

        # create input rules
        # bypass rule should take precedence over discard rule,
        # even though it's lower priority
        policy_0 = self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
        )
        policy_1 = self.spd_add_rem_policy(  # inbound, priority 15
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=15,
            policy_type="discard",
        )

        # create output rule so we can capture forwarded packets
        policy_2 = self.spd_add_rem_policy(  # outbound, priority 10
            1,
            self.pg0,
            self.pg1,
            socket.IPPROTO_UDP,
            is_out=1,
            priority=10,
            policy_type="bypass",
        )

        # check flow cache is empty before sending traffic
        self.verify_num_inbound_flow_cache_entries(0)
        # create the packet stream
        packets = self.create_stream(self.pg0, self.pg1, pkt_count)
        # add the stream to the source interface
        self.pg0.add_stream(packets)
        self.pg1.enable_capture()
        self.pg_start()

        # check capture on pg1
        capture = self.pg1.get_capture()
        for packet in capture:
            try:
                self.logger.debug(ppp("SPD Add - Got packet:", packet))
            except Exception:
                self.logger.error(ppp("Unexpected or invalid packet:", packet))
                raise
        self.logger.debug("SPD: Num packets: %s", len(capture.res))

        # verify captured packets
        self.verify_capture(self.pg0, self.pg1, capture)
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(0, policy_1)
        self.verify_policy_match(pkt_count, policy_2)
        # check input policy has been cached
        self.verify_num_inbound_flow_cache_entries(1)

        # remove the input bypass rule
        self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
            remove=True,
        )
        # verify flow cache counter has been reset by rule removal
        self.verify_num_inbound_flow_cache_entries(0)

        # resend the same packets
        self.pg0.add_stream(packets)
        self.pg1.enable_capture()  # flush the old capture
        self.pg_start()

        # inbound discard rule should have dropped traffic
        self.pg1.assert_nothing_captured()
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(pkt_count, policy_1)
        self.verify_policy_match(pkt_count, policy_2)
        # by removing the bypass rule, we should have reset the flow cache
        # we only expect the discard rule to now be in the flow cache
        self.verify_num_inbound_flow_cache_entries(1)


class IPSec4SpdTestCaseReaddInbound(SpdFlowCacheInbound):
    """ IPSec/IPv4 inbound: Policy mode test case with flow cache \
        (add, remove, re-add bypass)"""

    def test_ipsec_spd_inbound_readd(self):
        # In this test case, packets in IPv4 FWD path are configured
        # to go through IPSec inbound SPD policy lookup.
        #
        # 2 inbound SPD rules (1 HIGH and 1 LOW) are added.
        # - High priority rule action is set to DISCARD.
        # - Low priority rule action is set to BYPASS.
        #
        # Since BYPASS rules take precedence over DISCARD
        # (the order being PROTECT, BYPASS, DISCARD) we expect the
        # BYPASS rule to match and traffic to be correctly forwarded.
        #
        # The BYPASS rules is then removed, and we check that all traffic
        # is now correctly dropped.
        #
        # The BYPASS rule is then readded, checking traffic is not forwarded
        # correctly again
        self.create_interfaces(2)
        pkt_count = 5

        self.spd_create_and_intf_add(1, [self.pg1, self.pg0])

        # create input rules
        # bypass rule should take precedence over discard rule,
        # even though it's lower priority
        policy_0 = self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
        )
        policy_1 = self.spd_add_rem_policy(  # inbound, priority 15
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=15,
            policy_type="discard",
        )

        # create output rule so we can capture forwarded packets
        policy_2 = self.spd_add_rem_policy(  # outbound, priority 10
            1,
            self.pg0,
            self.pg1,
            socket.IPPROTO_UDP,
            is_out=1,
            priority=10,
            policy_type="bypass",
        )

        # check flow cache is empty before sending traffic
        self.verify_num_inbound_flow_cache_entries(0)
        # create the packet stream
        packets = self.create_stream(self.pg0, self.pg1, pkt_count)
        # add the stream to the source interface
        self.pg0.add_stream(packets)
        self.pg1.enable_capture()
        self.pg_start()

        # check capture on pg1
        capture = self.pg1.get_capture()
        for packet in capture:
            try:
                self.logger.debug(ppp("SPD Add - Got packet:", packet))
            except Exception:
                self.logger.error(ppp("Unexpected or invalid packet:", packet))
                raise
        self.logger.debug("SPD: Num packets: %s", len(capture.res))

        # verify captured packets
        self.verify_capture(self.pg0, self.pg1, capture)
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(0, policy_1)
        self.verify_policy_match(pkt_count, policy_2)
        # check input policy has been cached
        self.verify_num_inbound_flow_cache_entries(1)

        # remove the input bypass rule
        self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
            remove=True,
        )
        # verify flow cache counter has been reset by rule removal
        self.verify_num_inbound_flow_cache_entries(0)

        # resend the same packets
        self.pg0.add_stream(packets)
        self.pg1.enable_capture()  # flush the old capture
        self.pg_start()

        # inbound discard rule should have dropped traffic
        self.pg1.assert_nothing_captured()
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(pkt_count, policy_1)
        self.verify_policy_match(pkt_count, policy_2)
        # by removing the bypass rule, flow cache was reset
        # we only expect the discard rule to now be in the flow cache
        self.verify_num_inbound_flow_cache_entries(1)

        # readd the input bypass rule
        policy_0 = self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
        )
        # verify flow cache counter has been reset by rule addition
        self.verify_num_inbound_flow_cache_entries(0)

        # resend the same packets
        self.pg0.add_stream(packets)
        self.pg1.enable_capture()  # flush the old capture
        self.pg_start()

        # check capture on pg1
        capture = self.pg1.get_capture()
        for packet in capture:
            try:
                self.logger.debug(ppp("SPD Add - Got packet:", packet))
            except Exception:
                self.logger.error(ppp("Unexpected or invalid packet:", packet))
                raise

        # verify captured packets
        self.verify_capture(self.pg0, self.pg1, capture)
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(pkt_count, policy_1)
        self.verify_policy_match(pkt_count * 2, policy_2)
        # by readding the bypass rule, we reset the flow cache
        # we only expect the bypass rule to now be in the flow cache
        self.verify_num_inbound_flow_cache_entries(1)


class IPSec4SpdTestCaseMultipleInbound(SpdFlowCacheInbound):
    """ IPSec/IPv4 inbound: Policy mode test case with flow cache \
        (multiple interfaces, multiple rules)"""

    def test_ipsec_spd_inbound_multiple(self):
        # In this test case, packets in IPv4 FWD path are configured to go
        # through IPSec outbound SPD policy lookup.
        #
        # Multiples rules on multiple interfaces are tested at the same time.
        # 3x interfaces are configured, binding the same SPD to each.
        # Each interface has 1 SPD rule- 2x BYPASS and 1x DISCARD
        #
        # Traffic should be forwarded with destinations pg1 & pg2
        # and dropped to pg0.
        self.create_interfaces(3)
        pkt_count = 5
        # bind SPD to all interfaces
        self.spd_create_and_intf_add(1, self.pg_interfaces)
        # add input rules on all interfaces
        # pg0 -> pg1
        policy_0 = self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
        )
        # pg1 -> pg2
        policy_1 = self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg2,
            self.pg1,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
        )
        # pg2 -> pg0
        policy_2 = self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg0,
            self.pg2,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="discard",
        )

        # create output rules covering the the full ip range
        # 0.0.0.0 -> 255.255.255.255, so we can capture forwarded packets
        policy_3 = self.spd_add_rem_policy(  # outbound, priority 10
            1,
            self.pg0,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=1,
            priority=10,
            policy_type="bypass",
            all_ips=True,
        )

        # check flow cache is empty (0 active elements) before sending traffic
        self.verify_num_inbound_flow_cache_entries(0)

        # create the packet streams
        packets0 = self.create_stream(self.pg0, self.pg1, pkt_count)
        packets1 = self.create_stream(self.pg1, self.pg2, pkt_count)
        packets2 = self.create_stream(self.pg2, self.pg0, pkt_count)
        # add the streams to the source interfaces
        self.pg0.add_stream(packets0)
        self.pg1.add_stream(packets1)
        self.pg2.add_stream(packets2)
        # enable capture on all interfaces
        for pg in self.pg_interfaces:
            pg.enable_capture()
        # start the packet generator
        self.pg_start()

        # get captures from ifs
        if_caps = []
        for pg in [self.pg1, self.pg2]:  # we are expecting captures on pg1/pg2
            if_caps.append(pg.get_capture())
            for packet in if_caps[-1]:
                try:
                    self.logger.debug(ppp("SPD Add - Got packet:", packet))
                except Exception:
                    self.logger.error(ppp("Unexpected or invalid packet:", packet))
                    raise

        # verify captures that matched BYPASS rules
        self.verify_capture(self.pg0, self.pg1, if_caps[0])
        self.verify_capture(self.pg1, self.pg2, if_caps[1])
        # verify that traffic to pg0 matched DISCARD rule and was dropped
        self.pg0.assert_nothing_captured()
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(pkt_count, policy_1)
        self.verify_policy_match(pkt_count, policy_2)
        # check flow/policy match was cached for: 3x input policies
        self.verify_num_inbound_flow_cache_entries(3)


class IPSec4SpdTestCaseOverwriteStaleInbound(SpdFlowCacheInbound):
    """ IPSec/IPv4 inbound: Policy mode test case with flow cache \
        (overwrite stale entries)"""

    def test_ipsec_spd_inbound_overwrite(self):
        # The operation of the flow cache is setup so that the entire cache
        # is invalidated when adding or removing an SPD policy rule.
        # For performance, old cache entries are not zero'd, but remain
        # in the table as "stale" entries. If a flow matches a stale entry,
        # and the epoch count does NOT match the current count, the entry
        # is overwritten.
        # In this test, 3 active rules are created and matched to enter
        # them into the flow cache.
        # A single entry is removed to invalidate the entire cache.
        # We then readd the rule and test that overwriting of the previous
        # stale entries occurs as expected, and that the flow cache entry
        # counter is updated correctly.
        self.create_interfaces(3)
        pkt_count = 5
        # bind SPD to all interfaces
        self.spd_create_and_intf_add(1, self.pg_interfaces)
        # add input rules on all interfaces
        # pg0 -> pg1
        policy_0 = self.spd_add_rem_policy(  # inbound
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
        )
        # pg1 -> pg2
        policy_1 = self.spd_add_rem_policy(  # inbound
            1,
            self.pg2,
            self.pg1,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
        )
        # pg2 -> pg0
        policy_2 = self.spd_add_rem_policy(  # inbound
            1,
            self.pg0,
            self.pg2,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="discard",
        )

        # create output rules covering the the full ip range
        # 0.0.0.0 -> 255.255.255.255, so we can capture forwarded packets
        policy_3 = self.spd_add_rem_policy(  # outbound
            1,
            self.pg0,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=1,
            priority=10,
            policy_type="bypass",
            all_ips=True,
        )

        # check flow cache is empty (0 active elements) before sending traffic
        self.verify_num_inbound_flow_cache_entries(0)

        # create the packet streams
        packets0 = self.create_stream(self.pg0, self.pg1, pkt_count)
        packets1 = self.create_stream(self.pg1, self.pg2, pkt_count)
        packets2 = self.create_stream(self.pg2, self.pg0, pkt_count)
        # add the streams to the source interfaces
        self.pg0.add_stream(packets0)
        self.pg1.add_stream(packets1)
        self.pg2.add_stream(packets2)
        # enable capture on all interfaces
        for pg in self.pg_interfaces:
            pg.enable_capture()
        # start the packet generator
        self.pg_start()

        # get captures from ifs
        if_caps = []
        for pg in [self.pg1, self.pg2]:  # we are expecting captures on pg1/pg2
            if_caps.append(pg.get_capture())
            for packet in if_caps[-1]:
                try:
                    self.logger.debug(ppp("SPD Add - Got packet:", packet))
                except Exception:
                    self.logger.error(ppp("Unexpected or invalid packet:", packet))
                    raise

        # verify captures that matched BYPASS rules
        self.verify_capture(self.pg0, self.pg1, if_caps[0])
        self.verify_capture(self.pg1, self.pg2, if_caps[1])
        # verify that traffic to pg0 matched DISCARD rule and was dropped
        self.pg0.assert_nothing_captured()
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(pkt_count, policy_1)
        self.verify_policy_match(pkt_count, policy_2)
        # check flow/policy match was cached for: 3x input policies
        self.verify_num_inbound_flow_cache_entries(3)

        # adding an outbound policy should not invalidate output flow cache
        self.spd_add_rem_policy(  # outbound
            1,
            self.pg0,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=1,
            priority=1,
            policy_type="bypass",
            all_ips=True,
        )
        # check inbound flow cache counter has not been reset
        self.verify_num_inbound_flow_cache_entries(3)

        # remove + readd bypass policy - flow cache counter will be reset,
        # and there will be 3x stale entries in flow cache
        self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
            remove=True,
        )
        # readd policy
        policy_0 = self.spd_add_rem_policy(  # inbound, priority 10
            1,
            self.pg1,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=0,
            priority=10,
            policy_type="bypass",
        )
        # check counter was reset
        self.verify_num_inbound_flow_cache_entries(0)

        # resend the same packets
        self.pg0.add_stream(packets0)
        self.pg1.add_stream(packets1)
        self.pg2.add_stream(packets2)
        for pg in self.pg_interfaces:
            pg.enable_capture()  # flush previous captures
        self.pg_start()

        # get captures from ifs
        if_caps = []
        for pg in [self.pg1, self.pg2]:  # we are expecting captures on pg1/pg2
            if_caps.append(pg.get_capture())
            for packet in if_caps[-1]:
                try:
                    self.logger.debug(ppp("SPD Add - Got packet:", packet))
                except Exception:
                    self.logger.error(ppp("Unexpected or invalid packet:", packet))
                    raise

        # verify captures that matched BYPASS rules
        self.verify_capture(self.pg0, self.pg1, if_caps[0])
        self.verify_capture(self.pg1, self.pg2, if_caps[1])
        # verify that traffic to pg0 matched DISCARD rule and was dropped
        self.pg0.assert_nothing_captured()
        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_0)
        self.verify_policy_match(pkt_count * 2, policy_1)
        self.verify_policy_match(pkt_count * 2, policy_2)
        # we are overwriting 3x stale entries - check flow cache counter
        # is correct
        self.verify_num_inbound_flow_cache_entries(3)


class IPSec4SpdTestCaseCollisionInbound(SpdFlowCacheInbound):
    """ IPSec/IPv4 inbound: Policy mode test case with flow cache \
        (hash collision)"""

    # Override class setup to restrict hash table size to 16 buckets.
    # This forces using only the lower 4 bits of the hash as a key,
    # making hash collisions easy to find.
    @classmethod
    def setUpConstants(cls):
        super(SpdFlowCacheInbound, cls).setUpConstants()
        cls.vpp_cmdline.extend(
            [
                "ipsec",
                "{",
                "ipv4-inbound-spd-flow-cache on",
                "ipv4-inbound-spd-hash-buckets 16",
                "}",
            ]
        )
        cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline))

    def test_ipsec_spd_inbound_collision(self):
        # The flow cache operation is setup to overwrite an entry
        # if a hash collision occurs.
        # In this test, 2 packets are configured that result in a
        # hash with the same lower 4 bits.
        # After the first packet is received, there should be one
        # active entry in the flow cache.
        # After the second packet with the same lower 4 bit hash
        # is received, this should overwrite the same entry.
        # Therefore there will still be a total of one (1) entry,
        # in the flow cache with two matching policies.
        # crc32_supported() method is used to check cpu for crc32
        # intrinsic support for hashing.
        # If crc32 is not supported, we fall back to clib_xxhash()
        self.create_interfaces(4)
        pkt_count = 5
        # bind SPD to all interfaces
        self.spd_create_and_intf_add(1, self.pg_interfaces)

        # create output rules covering the the full ip range
        # 0.0.0.0 -> 255.255.255.255, so we can capture forwarded packets
        policy_0 = self.spd_add_rem_policy(  # outbound
            1,
            self.pg0,
            self.pg0,
            socket.IPPROTO_UDP,
            is_out=1,
            priority=10,
            policy_type="bypass",
            all_ips=True,
        )

        capture_intfs = []
        if self.crc32_supported():  # create crc32 collision on last 4 bits
            hashed_with_crc32 = True
            # add matching rules
            policy_1 = self.spd_add_rem_policy(  # inbound, priority 10
                1,
                self.pg1,
                self.pg2,
                socket.IPPROTO_UDP,
                is_out=0,
                priority=10,
                policy_type="bypass",
            )
            policy_2 = self.spd_add_rem_policy(  # inbound, priority 10
                1,
                self.pg3,
                self.pg0,
                socket.IPPROTO_UDP,
                is_out=0,
                priority=10,
                policy_type="bypass",
            )

            # we expect to get captures on pg1 + pg3
            capture_intfs.append(self.pg1)
            capture_intfs.append(self.pg3)

            # check flow cache is empty before sending traffic
            self.verify_num_inbound_flow_cache_entries(0)

            # create the packet streams
            # packet hashes to:
            # ad727628
            packets1 = self.create_stream(self.pg2, self.pg1, pkt_count, 1, 1)
            # b5512898
            packets2 = self.create_stream(self.pg0, self.pg3, pkt_count, 1, 1)
            # add the streams to the source interfaces
            self.pg2.add_stream(packets1)
            self.pg0.add_stream(packets2)
        else:  # create xxhash collision on last 4 bits
            hashed_with_crc32 = False
            # add matching rules
            policy_1 = self.spd_add_rem_policy(  # inbound, priority 10
                1,
                self.pg1,
                self.pg2,
                socket.IPPROTO_UDP,
                is_out=0,
                priority=10,
                policy_type="bypass",
            )
            policy_2 = self.spd_add_rem_policy(  # inbound, priority 10
                1,
                self.pg2,
                self.pg3,
                socket.IPPROTO_UDP,
                is_out=0,
                priority=10,
                policy_type="bypass",
            )

            capture_intfs.append(self.pg1)
            capture_intfs.append(self.pg2)

            # check flow cache is empty before sending traffic
            self.verify_num_inbound_flow_cache_entries(0)

            # create the packet streams
            # 2f8f90f557eef12c
            packets1 = self.create_stream(self.pg2, self.pg1, pkt_count, 1, 1)
            # 6b7f9987719ffc1c
            packets2 = self.create_stream(self.pg3, self.pg2, pkt_count, 1, 1)
            # add the streams to the source interfaces
            self.pg2.add_stream(packets1)
            self.pg3.add_stream(packets2)

        # enable capture on interfaces we expect capture on & send pkts
        for pg in capture_intfs:
            pg.enable_capture()
        self.pg_start()

        # get captures
        if_caps = []
        for pg in capture_intfs:
            if_caps.append(pg.get_capture())
            for packet in if_caps[-1]:
                try:
                    self.logger.debug(ppp("SPD Add - Got packet:", packet))
                except Exception:
                    self.logger.error(ppp("Unexpected or invalid packet:", packet))
                    raise

        # verify captures that matched BYPASS rule
        if hashed_with_crc32:
            self.verify_capture(self.pg2, self.pg1, if_caps[0])
            self.verify_capture(self.pg0, self.pg3, if_caps[1])
        else:  # hashed with xxhash
            self.verify_capture(self.pg2, self.pg1, if_caps[0])
            self.verify_capture(self.pg3, self.pg2, if_caps[1])

        # verify all policies matched the expected number of times
        self.verify_policy_match(pkt_count, policy_1)
        self.verify_policy_match(pkt_count, policy_2)
        self.verify_policy_match(pkt_count * 2, policy_0)  # output policy
        # we have matched 2 policies, but due to the hash collision
        # one active entry is expected
        self.verify_num_inbound_flow_cache_entries(1)


if __name__ == "__main__":
    unittest.main(testRunner=VppTestRunner)