aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/gtpu/gtpu_encap.c
diff options
context:
space:
mode:
authorChristian Hopps <chopps@labn.net>2019-11-03 07:02:15 -0500
committerDamjan Marion <dmarion@me.com>2020-09-07 09:43:27 +0000
commitfb7e7ed2cd10446d5ecd1b1e8df470e706c448ed (patch)
tree2590e834f09c2d936f83d9bc766af99e084a522f /src/plugins/gtpu/gtpu_encap.c
parentdce44e4e2302042890dc8b579d4ff6f99509ae7a (diff)
ipsec: fix padding/alignment for native IPsec encryption
Not all ESP crypto algorithms require padding/alignment to be the same as AES block/IV size. CCM, CTR and GCM all have no padding/alignment requirements, and the RFCs indicate that no padding (beyond ESPs 4 octet alignment requirement) should be used unless TFC (traffic flow confidentiality) has been requested. CTR: https://tools.ietf.org/html/rfc3686#section-3.2 GCM: https://tools.ietf.org/html/rfc4106#section-3.2 CCM: https://tools.ietf.org/html/rfc4309#section-3.2 - VPP is incorrectly using the IV/AES block size to pad CTR and GCM. These modes do not require padding (beyond ESPs 4 octet requirement), as a result packets will have unnecessary padding, which will waste bandwidth at least and possibly fail certain network configurations that have finely tuned MTU configurations at worst. Fix this as well as changing the field names from ".*block_size" to ".*block_align" to better represent their actual (and only) use. Rename "block_sz" in esp_encrypt to "esp_align" and set it correctly as well. test: ipsec: Add unit-test to test for RFC correct padding/alignment test: patch scapy to not incorrectly pad ccm, ctr, gcm modes as well - Scapy is also incorrectly using the AES block size of 16 to pad CCM, CTR, and GCM cipher modes. A bug report has been opened with the and acknowledged with the upstream scapy project as well: https://github.com/secdev/scapy/issues/2322 Ticket: VPP-1928 Type: fix Signed-off-by: Christian Hopps <chopps@labn.net> Change-Id: Iaa4d6a325a2e99fdcb2c375a3395bcfe7947770e
Diffstat (limited to 'src/plugins/gtpu/gtpu_encap.c')
0 files changed, 0 insertions, 0 deletions
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/fib/fib_entry_cover.h>
#include <vnet/fib/fib_entry_src.h>
#include <vnet/fib/fib_node_list.h>

u32
fib_entry_cover_track (fib_entry_t* cover,
		       fib_node_index_t covered)
{
    fib_entry_delegate_t *fed;

    FIB_ENTRY_DBG(cover, "cover-track %d", covered);

    ASSERT(fib_entry_get_index(cover) != covered);

    fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);

    if (NULL == fed)
    {
        fed = fib_entry_delegate_find_or_add(cover, FIB_ENTRY_DELEGATE_COVERED);
        fed->fd_list = fib_node_list_create();
    }

    return (fib_node_list_push_front(fed->fd_list,
                                     0, FIB_NODE_TYPE_ENTRY,
                                     covered));
}

void
fib_entry_cover_untrack (fib_entry_t* cover,
			 u32 tracked_index)
{
    fib_entry_delegate_t *fed;

    FIB_ENTRY_DBG(cover, "cover-untrack @ %d", tracked_index);

    fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);

    if (NULL == fed)
        return;

    fib_node_list_remove(fed->fd_list, tracked_index);

    if (0 == fib_node_list_get_size(fed->fd_list))
    {
        fib_node_list_destroy(&fed->fd_list);
        fib_entry_delegate_remove(cover, FIB_ENTRY_DELEGATE_COVERED);        
    }
}

/**
 * Internal struct to hold user supplied paraneters for the cover walk
 */
typedef struct fib_enty_cover_walk_ctx_t_ {
    fib_entry_t *cover;
    fib_entry_covered_walk_t walk;
    void *ctx;
} fib_enty_cover_walk_ctx_t;

static int
fib_entry_cover_walk_node_ptr (fib_node_ptr_t *depend,
			       void *args)
{
    fib_enty_cover_walk_ctx_t *ctx = args;

    ctx->walk(ctx->cover, depend->fnp_index, ctx->ctx);

    /* continue */
    return (1);
}

void
fib_entry_cover_walk (fib_entry_t *cover,
		      fib_entry_covered_walk_t walk,
		      void *args)
{
    fib_entry_delegate_t *fed;

    fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);

    if (NULL == fed)
        return;

    fib_enty_cover_walk_ctx_t ctx = {
        .cover = cover,
        .walk = walk,
        .ctx = args,
    };

    fib_node_list_walk(fed->fd_list,
                       fib_entry_cover_walk_node_ptr,
                       &ctx);
}

u32
fib_entry_cover_get_size (fib_entry_t *cover)
{
    fib_entry_delegate_t *fed;

    fed = fib_entry_delegate_get(cover, FIB_ENTRY_DELEGATE_COVERED);

    if (NULL == fed)
        return (0);

    return (fib_node_list_get_size(fed->fd_list));
}

typedef struct fib_entry_cover_list_format_ctx_t_ {
    u8 *s;
} fib_entry_cover_list_format_ctx_t;

static int
fib_entry_covered_list_format_one (fib_entry_t *cover,
				   fib_node_index_t covered,
				   void *args)
{
    fib_entry_cover_list_format_ctx_t * ctx = args;

    ctx->s = format(ctx->s, "%d, ", covered);

    /* continue */
    return (1);
}

u8*
fib_entry_cover_list_format (fib_entry_t *fib_entry,
			     u8 *s)
{
    fib_entry_cover_list_format_ctx_t ctx = {
	.s = s,
    };

    fib_entry_cover_walk(fib_entry, 
			 fib_entry_covered_list_format_one,
			 &ctx);

    return (ctx.s);
}

static int
fib_entry_cover_change_one (fib_entry_t *cover,
			    fib_node_index_t covered,
			    void *args)
{
    fib_node_index_t new_cover;

    /*
     * The 3 entries involved here are:
     *   cover - the least specific. It will cover both the others
     *  new_cover - the enty just inserted below the cover
     *  covered - the entry that was tracking the cover.
     *
     * The checks below are to determine if new_cover is a cover for covered.
     */
    new_cover = pointer_to_uword(args);

    if (FIB_NODE_INDEX_INVALID == new_cover)
    {
	/*
	 * nothing has been inserted, which implies the cover was removed.
	 * 'cover' is thus the new cover.
	 */
	fib_entry_cover_changed(covered);
    }
    else if (new_cover != covered)
    {
	fib_prefix_t pfx_covered, pfx_new_cover;

	fib_entry_get_prefix(covered, &pfx_covered);
	fib_entry_get_prefix(new_cover, &pfx_new_cover);

	if (fib_prefix_is_cover(&pfx_new_cover, &pfx_covered))
	{
	    fib_entry_cover_changed(covered);
	}
    }
    /* continue */
    return (1);
}

void
fib_entry_cover_change_notify (fib_node_index_t cover_index,
			       fib_node_index_t covered)
{
    fib_entry_t *cover;

    cover = fib_entry_get(cover_index);

    fib_entry_cover_walk(cover, 
			 fib_entry_cover_change_one,
			 uword_to_pointer(covered, void*));
}

static int
fib_entry_cover_update_one (fib_entry_t *cover,
			    fib_node_index_t covered,
			    void *args)
{
    fib_entry_cover_updated(covered);

    /* continue */
    return (1);
}

void
fib_entry_cover_update_notify (fib_entry_t *fib_entry)
{
    fib_entry_cover_walk(fib_entry, 
			 fib_entry_cover_update_one,
			 NULL);
}