summaryrefslogtreecommitdiffstats
path: root/src/plugins/ioam/lib-pot/math64.h
blob: 2084c25fa58e84e6b44a7d60056682a26ccecb46 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
/* 
 * math64.h provides the 64 bit unsigned integer add, multiply followed by  modulo operation
 * The linux/math64.h provides divide and multiply 64 bit integers but:
 * 1. multiply: mul_u64_u64_shr - only returns 64 bits of the result and has to be called
 *                     twice to get the complete 128 bits of the result.
 * 2. Modulo operation of the result of  addition and multiplication of u64 that may result 
 *                        in integers > 64 bits is not supported
 * Hence this header to combine add/multiply followed by modulo of u64 integrers
 * always resulting in u64.
 * 
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef include_vnet_math64_h
#define include_vnet_math64_h
#include <stdint.h>

/* 
 * multiplies and returns result in hi and lo 
 */
static inline void mul64by64(u64 a, u64 b, u64 * hi, u64 * lo)
{
    u64 a_lo = (u64) (uint32_t) a;
    u64 a_hi = a >> 32;
    u64 b_lo = (u64) (u32) b;
    u64 b_hi = b >> 32;

    u64 p0 = a_lo * b_lo;
    u64 p1 = a_lo * b_hi;
    u64 p2 = a_hi * b_lo;
    u64 p3 = a_hi * b_hi;

    u32 cy = (u32) (((p0 >> 32) + (u32) p1 + (u32) p2) >> 32);

    *lo = p0 + (p1 << 32) + (p2 << 32);
    *hi = p3 + (p1 >> 32) + (p2 >> 32) + cy;
    return;
}

#define TWO64 18446744073709551616.0

static inline u64 mod128by64(u64 x, u64 y, u64 m, double di)
{
  u64 q1, q2;
  u64 p1, p0;
  double dq;

  /* calculate quotient first pass 53 bits */
  dq = (TWO64 * (double) x + (double) y) * di;

  if (dq >= TWO64)
    q1 = 0xfffffffffffff800L;
  else
    q1 = dq;

  /* q1 * m to compare the product to the dividend.  */
  mul64by64 (q1, m, &p1, &p0);

  /* Adjust quotient. is it > actual result: */
  if (x < p1 || (x == p1 && y < p0))
    {
        /* q1 > quotient.  calculate abs remainder */
        x = p1 - (x + (p0 < y));
        y = p0 - y;

        /* use the remainder as new dividend to adjust quotient */
        q2 = (u64) ((TWO64 * (double)x + (double)y) * di);
        mul64by64(q2, m, &p1, &p0);

        if (x < p1 || (x == p1 && y <= p0))
        {
            y = p0 - y;
        }
        else
        {
            y = p0 - y;
            y += m;
        }
    }
    else
    {
        x = x - (p1 + (y < p0));
        y = y - p0;

        q2 = (u64) ((TWO64 * (double)x + (double)y) * di);
        mul64by64(q2, m, &p1, &p0);

        if (x < p1 || (x == p1 && y < p0))
        {
            y = y - p0;
            y += m;
        }
        else
        {
            y = y - p0;
            if (y >= m)
            {
                y -= m;
            }
        }
    }

    return y;
}

/* 
 * returns a % p
 */
static inline u64 mod64by64(u64 a, u64 p, u64 primeinv)
{
    return (mod128by64(0, a, p, primeinv));
}

static inline void add64(u64 a, u64 b, u64 * whi, u64 * wlo)
{
    *wlo = a + b;
    if (*wlo < a)
        *whi = 1;

}

/* 
 * returns (a + b)%p
 */
static inline u64 add64_mod(u64 a, u64 b, u64 p, double pi)
{
    u64 shi = 0, slo = 0;

    add64(a, b, &shi, &slo);
    return (mod128by64(shi, slo, p, pi));
}

/* 
 * returns (ab) % p
 */
static inline u64 mul64_mod(u64 a, u64 b, u64 p, double pi)
{
    u64 phi = 0, plo = 0;

    mul64by64(a, b, &phi, &plo);
    return (mod128by64(phi, plo, p, pi));
}

#endif