BadVPN – Blame information for rev 1

Subversion Repositories:
Rev:
Rev Author Line No. Line
1 office 1 /**
2 * @file ChunkBuffer2.h
3 * @author Ambroz Bizjak <ambrop7@gmail.com>
4 *
5 * @section LICENSE
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the author nor the
15 * names of its contributors may be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * @section DESCRIPTION
30 *
31 * Circular packet buffer
32 */
33  
34 #ifndef BADVPN_STRUCTURE_CHUNKBUFFER2_H
35 #define BADVPN_STRUCTURE_CHUNKBUFFER2_H
36  
37 #include <stdint.h>
38 #include <stdlib.h>
39 #include <limits.h>
40  
41 #include <misc/balign.h>
42 #include <misc/debug.h>
43  
44 #ifndef NDEBUG
45 #define CHUNKBUFFER2_ASSERT_BUFFER(_buf) _ChunkBuffer2_assert_buffer(_buf);
46 #define CHUNKBUFFER2_ASSERT_IO(_buf) _ChunkBuffer2_assert_io(_buf);
47 #else
48 #define CHUNKBUFFER2_ASSERT_BUFFER(_buf)
49 #define CHUNKBUFFER2_ASSERT_IO(_buf)
50 #endif
51  
52 struct ChunkBuffer2_block {
53 int len;
54 };
55  
56 typedef struct {
57 struct ChunkBuffer2_block *buffer;
58 int size;
59 int wrap;
60 int start;
61 int used;
62 int mtu;
63 uint8_t *input_dest;
64 int input_avail;
65 uint8_t *output_dest;
66 int output_avail;
67 } ChunkBuffer2;
68  
69 // calculates a buffer size needed to hold at least 'num' packets long at least 'chunk_len'
70 static int ChunkBuffer2_calc_blocks (int chunk_len, int num);
71  
72 // initialize
73 static void ChunkBuffer2_Init (ChunkBuffer2 *buf, struct ChunkBuffer2_block *buffer, int blocks, int mtu);
74  
75 // submit a packet written to the buffer
76 static void ChunkBuffer2_SubmitPacket (ChunkBuffer2 *buf, int len);
77  
78 // remove the first packet
79 static void ChunkBuffer2_ConsumePacket (ChunkBuffer2 *buf);
80  
81 static int _ChunkBuffer2_end (ChunkBuffer2 *buf)
82 {
83 if (buf->used >= buf->wrap - buf->start) {
84 return (buf->used - (buf->wrap - buf->start));
85 } else {
86 return (buf->start + buf->used);
87 }
88 }
89  
90 #ifndef NDEBUG
91  
92 static void _ChunkBuffer2_assert_buffer (ChunkBuffer2 *buf)
93 {
94 ASSERT(buf->size > 0)
95 ASSERT(buf->wrap > 0)
96 ASSERT(buf->wrap <= buf->size)
97 ASSERT(buf->start >= 0)
98 ASSERT(buf->start < buf->wrap)
99 ASSERT(buf->used >= 0)
100 ASSERT(buf->used <= buf->wrap)
101 ASSERT(buf->wrap == buf->size || buf->used >= buf->wrap - buf->start)
102 ASSERT(buf->mtu >= 0)
103 }
104  
105 static void _ChunkBuffer2_assert_io (ChunkBuffer2 *buf)
106 {
107 // check input
108  
109 int end = _ChunkBuffer2_end(buf);
110  
111 if (buf->size - end - 1 < buf->mtu) {
112 // it will never be possible to write a MTU long packet here
113 ASSERT(!buf->input_dest)
114 ASSERT(buf->input_avail == -1)
115 } else {
116 // calculate number of free blocks
117 int free;
118 if (buf->used >= buf->wrap - buf->start) {
119 free = buf->start - end;
120 } else {
121 free = buf->size - end;
122 }
123  
124 if (free > 0) {
125 // got space at least for a header. More space will become available as packets are
126 // read from the buffer, up to MTU.
127 ASSERT(buf->input_dest == (uint8_t *)&buf->buffer[end + 1])
128 ASSERT(buf->input_avail == (free - 1) * sizeof(struct ChunkBuffer2_block))
129 } else {
130 // no space
131 ASSERT(!buf->input_dest)
132 ASSERT(buf->input_avail == -1)
133 }
134 }
135  
136 // check output
137  
138 if (buf->used > 0) {
139 int datalen = buf->buffer[buf->start].len;
140 ASSERT(datalen >= 0)
141 int blocklen = bdivide_up(datalen, sizeof(struct ChunkBuffer2_block));
142 ASSERT(blocklen <= buf->used - 1)
143 ASSERT(blocklen <= buf->wrap - buf->start - 1)
144 ASSERT(buf->output_dest == (uint8_t *)&buf->buffer[buf->start + 1])
145 ASSERT(buf->output_avail == datalen)
146 } else {
147 ASSERT(!buf->output_dest)
148 ASSERT(buf->output_avail == -1)
149 }
150 }
151  
152 #endif
153  
154 static void _ChunkBuffer2_update_input (ChunkBuffer2 *buf)
155 {
156 int end = _ChunkBuffer2_end(buf);
157  
158 if (buf->size - end - 1 < buf->mtu) {
159 // it will never be possible to write a MTU long packet here
160 buf->input_dest = NULL;
161 buf->input_avail = -1;
162 return;
163 }
164  
165 // calculate number of free blocks
166 int free;
167 if (buf->used >= buf->wrap - buf->start) {
168 free = buf->start - end;
169 } else {
170 free = buf->size - end;
171 }
172  
173 if (free > 0) {
174 // got space at least for a header. More space will become available as packets are
175 // read from the buffer, up to MTU.
176 buf->input_dest = (uint8_t *)&buf->buffer[end + 1];
177 buf->input_avail = (free - 1) * sizeof(struct ChunkBuffer2_block);
178 } else {
179 // no space
180 buf->input_dest = NULL;
181 buf->input_avail = -1;
182 }
183 }
184  
185 static void _ChunkBuffer2_update_output (ChunkBuffer2 *buf)
186 {
187 if (buf->used > 0) {
188 int datalen = buf->buffer[buf->start].len;
189 ASSERT(datalen >= 0)
190 #ifndef NDEBUG
191 int blocklen = bdivide_up(datalen, sizeof(struct ChunkBuffer2_block));
192 ASSERT(blocklen <= buf->used - 1)
193 ASSERT(blocklen <= buf->wrap - buf->start - 1)
194 #endif
195 buf->output_dest = (uint8_t *)&buf->buffer[buf->start + 1];
196 buf->output_avail = datalen;
197 } else {
198 buf->output_dest = NULL;
199 buf->output_avail = -1;
200 }
201 }
202  
203 int ChunkBuffer2_calc_blocks (int chunk_len, int num)
204 {
205 int chunk_data_blocks = bdivide_up(chunk_len, sizeof(struct ChunkBuffer2_block));
206  
207 if (chunk_data_blocks > INT_MAX - 1) {
208 return -1;
209 }
210 int chunk_blocks = 1 + chunk_data_blocks;
211  
212 if (num > INT_MAX - 1) {
213 return -1;
214 }
215 int num_chunks = num + 1;
216  
217 if (chunk_blocks > INT_MAX / num_chunks) {
218 return -1;
219 }
220 int blocks = chunk_blocks * num_chunks;
221  
222 return blocks;
223 }
224  
225 void ChunkBuffer2_Init (ChunkBuffer2 *buf, struct ChunkBuffer2_block *buffer, int blocks, int mtu)
226 {
227 ASSERT(blocks > 0)
228 ASSERT(mtu >= 0)
229  
230 buf->buffer = buffer;
231 buf->size = blocks;
232 buf->wrap = blocks;
233 buf->start = 0;
234 buf->used = 0;
235 buf->mtu = bdivide_up(mtu, sizeof(struct ChunkBuffer2_block));
236  
237 CHUNKBUFFER2_ASSERT_BUFFER(buf)
238  
239 _ChunkBuffer2_update_input(buf);
240 _ChunkBuffer2_update_output(buf);
241  
242 CHUNKBUFFER2_ASSERT_IO(buf)
243 }
244  
245 void ChunkBuffer2_SubmitPacket (ChunkBuffer2 *buf, int len)
246 {
247 ASSERT(buf->input_dest)
248 ASSERT(len >= 0)
249 ASSERT(len <= buf->input_avail)
250  
251 CHUNKBUFFER2_ASSERT_BUFFER(buf)
252 CHUNKBUFFER2_ASSERT_IO(buf)
253  
254 int end = _ChunkBuffer2_end(buf);
255 int blocklen = bdivide_up(len, sizeof(struct ChunkBuffer2_block));
256  
257 ASSERT(blocklen <= buf->size - end - 1)
258 ASSERT(buf->used < buf->wrap - buf->start || blocklen <= buf->start - end - 1)
259  
260 buf->buffer[end].len = len;
261 buf->used += 1 + blocklen;
262  
263 if (buf->used <= buf->wrap - buf->start && buf->mtu > buf->size - (end + 1 + blocklen) - 1) {
264 buf->wrap = end + 1 + blocklen;
265 }
266  
267 CHUNKBUFFER2_ASSERT_BUFFER(buf)
268  
269 // update input
270 _ChunkBuffer2_update_input(buf);
271  
272 // update output
273 if (buf->used == 1 + blocklen) {
274 _ChunkBuffer2_update_output(buf);
275 }
276  
277 CHUNKBUFFER2_ASSERT_IO(buf)
278 }
279  
280 void ChunkBuffer2_ConsumePacket (ChunkBuffer2 *buf)
281 {
282 ASSERT(buf->output_dest)
283  
284 CHUNKBUFFER2_ASSERT_BUFFER(buf)
285 CHUNKBUFFER2_ASSERT_IO(buf)
286  
287 ASSERT(1 <= buf->wrap - buf->start)
288 ASSERT(1 <= buf->used)
289  
290 int blocklen = bdivide_up(buf->buffer[buf->start].len, sizeof(struct ChunkBuffer2_block));
291  
292 ASSERT(blocklen <= buf->wrap - buf->start - 1)
293 ASSERT(blocklen <= buf->used - 1)
294  
295 int data_wrapped = (buf->used >= buf->wrap - buf->start);
296  
297 buf->start += 1 + blocklen;
298 buf->used -= 1 + blocklen;
299 if (buf->start == buf->wrap) {
300 buf->start = 0;
301 buf->wrap = buf->size;
302 }
303  
304 CHUNKBUFFER2_ASSERT_BUFFER(buf)
305  
306 // update input
307 if (data_wrapped) {
308 _ChunkBuffer2_update_input(buf);
309 }
310  
311 // update output
312 _ChunkBuffer2_update_output(buf);
313  
314 CHUNKBUFFER2_ASSERT_IO(buf)
315 }
316  
317 #endif