GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / hv / ring_buffer.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (c) 2009, Microsoft Corporation.
5  *
6  * Authors:
7  *   Haiyang Zhang <haiyangz@microsoft.com>
8  *   Hank Janssen  <hjanssen@microsoft.com>
9  *   K. Y. Srinivasan <kys@microsoft.com>
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/hyperv.h>
16 #include <linux/uio.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/prefetch.h>
20 #include <linux/io.h>
21 #include <asm/mshyperv.h>
22
23 #include "hyperv_vmbus.h"
24
25 #define VMBUS_PKT_TRAILER       8
26
27 /*
28  * When we write to the ring buffer, check if the host needs to
29  * be signaled. Here is the details of this protocol:
30  *
31  *      1. The host guarantees that while it is draining the
32  *         ring buffer, it will set the interrupt_mask to
33  *         indicate it does not need to be interrupted when
34  *         new data is placed.
35  *
36  *      2. The host guarantees that it will completely drain
37  *         the ring buffer before exiting the read loop. Further,
38  *         once the ring buffer is empty, it will clear the
39  *         interrupt_mask and re-check to see if new data has
40  *         arrived.
41  *
42  * KYS: Oct. 30, 2016:
43  * It looks like Windows hosts have logic to deal with DOS attacks that
44  * can be triggered if it receives interrupts when it is not expecting
45  * the interrupt. The host expects interrupts only when the ring
46  * transitions from empty to non-empty (or full to non full on the guest
47  * to host ring).
48  * So, base the signaling decision solely on the ring state until the
49  * host logic is fixed.
50  */
51
52 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
53 {
54         struct hv_ring_buffer_info *rbi = &channel->outbound;
55
56         virt_mb();
57         if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
58                 return;
59
60         /* check interrupt_mask before read_index */
61         virt_rmb();
62         /*
63          * This is the only case we need to signal when the
64          * ring transitions from being empty to non-empty.
65          */
66         if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
67                 ++channel->intr_out_empty;
68                 vmbus_setevent(channel);
69         }
70 }
71
72 /* Get the next write location for the specified ring buffer. */
73 static inline u32
74 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
75 {
76         u32 next = ring_info->ring_buffer->write_index;
77
78         return next;
79 }
80
81 /* Set the next write location for the specified ring buffer. */
82 static inline void
83 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
84                      u32 next_write_location)
85 {
86         ring_info->ring_buffer->write_index = next_write_location;
87 }
88
89 /* Get the size of the ring buffer. */
90 static inline u32
91 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
92 {
93         return ring_info->ring_datasize;
94 }
95
96 /* Get the read and write indices as u64 of the specified ring buffer. */
97 static inline u64
98 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
99 {
100         return (u64)ring_info->ring_buffer->write_index << 32;
101 }
102
103 /*
104  * Helper routine to copy from source to ring buffer.
105  * Assume there is enough room. Handles wrap-around in dest case only!!
106  */
107 static u32 hv_copyto_ringbuffer(
108         struct hv_ring_buffer_info      *ring_info,
109         u32                             start_write_offset,
110         const void                      *src,
111         u32                             srclen)
112 {
113         void *ring_buffer = hv_get_ring_buffer(ring_info);
114         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
115
116         memcpy(ring_buffer + start_write_offset, src, srclen);
117
118         start_write_offset += srclen;
119         if (start_write_offset >= ring_buffer_size)
120                 start_write_offset -= ring_buffer_size;
121
122         return start_write_offset;
123 }
124
125 /*
126  *
127  * hv_get_ringbuffer_availbytes()
128  *
129  * Get number of bytes available to read and to write to
130  * for the specified ring buffer
131  */
132 static void
133 hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
134                              u32 *read, u32 *write)
135 {
136         u32 read_loc, write_loc, dsize;
137
138         /* Capture the read/write indices before they changed */
139         read_loc = READ_ONCE(rbi->ring_buffer->read_index);
140         write_loc = READ_ONCE(rbi->ring_buffer->write_index);
141         dsize = rbi->ring_datasize;
142
143         *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
144                 read_loc - write_loc;
145         *read = dsize - *write;
146 }
147
148 /* Get various debug metrics for the specified ring buffer. */
149 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
150                                 struct hv_ring_buffer_debug_info *debug_info)
151 {
152         u32 bytes_avail_towrite;
153         u32 bytes_avail_toread;
154
155         mutex_lock(&ring_info->ring_buffer_mutex);
156
157         if (!ring_info->ring_buffer) {
158                 mutex_unlock(&ring_info->ring_buffer_mutex);
159                 return -EINVAL;
160         }
161
162         hv_get_ringbuffer_availbytes(ring_info,
163                                      &bytes_avail_toread,
164                                      &bytes_avail_towrite);
165         debug_info->bytes_avail_toread = bytes_avail_toread;
166         debug_info->bytes_avail_towrite = bytes_avail_towrite;
167         debug_info->current_read_index = ring_info->ring_buffer->read_index;
168         debug_info->current_write_index = ring_info->ring_buffer->write_index;
169         debug_info->current_interrupt_mask
170                 = ring_info->ring_buffer->interrupt_mask;
171         mutex_unlock(&ring_info->ring_buffer_mutex);
172
173         return 0;
174 }
175 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
176
177 /* Initialize a channel's ring buffer info mutex locks */
178 void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
179 {
180         mutex_init(&channel->inbound.ring_buffer_mutex);
181         mutex_init(&channel->outbound.ring_buffer_mutex);
182 }
183
184 /* Initialize the ring buffer. */
185 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
186                        struct page *pages, u32 page_cnt, u32 max_pkt_size)
187 {
188         struct page **pages_wraparound;
189         unsigned long *pfns_wraparound;
190         u64 pfn;
191         int i;
192
193         BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
194
195         /*
196          * First page holds struct hv_ring_buffer, do wraparound mapping for
197          * the rest.
198          */
199         if (hv_isolation_type_snp()) {
200                 pfn = page_to_pfn(pages) +
201                         PFN_DOWN(ms_hyperv.shared_gpa_boundary);
202
203                 pfns_wraparound = kcalloc(page_cnt * 2 - 1,
204                         sizeof(unsigned long), GFP_KERNEL);
205                 if (!pfns_wraparound)
206                         return -ENOMEM;
207
208                 pfns_wraparound[0] = pfn;
209                 for (i = 0; i < 2 * (page_cnt - 1); i++)
210                         pfns_wraparound[i + 1] = pfn + i % (page_cnt - 1) + 1;
211
212                 ring_info->ring_buffer = (struct hv_ring_buffer *)
213                         vmap_pfn(pfns_wraparound, page_cnt * 2 - 1,
214                                  PAGE_KERNEL);
215                 kfree(pfns_wraparound);
216
217                 if (!ring_info->ring_buffer)
218                         return -ENOMEM;
219
220                 /* Zero ring buffer after setting memory host visibility. */
221                 memset(ring_info->ring_buffer, 0x00, PAGE_SIZE * page_cnt);
222         } else {
223                 pages_wraparound = kcalloc(page_cnt * 2 - 1,
224                                            sizeof(struct page *),
225                                            GFP_KERNEL);
226                 if (!pages_wraparound)
227                         return -ENOMEM;
228
229                 pages_wraparound[0] = pages;
230                 for (i = 0; i < 2 * (page_cnt - 1); i++)
231                         pages_wraparound[i + 1] =
232                                 &pages[i % (page_cnt - 1) + 1];
233
234                 ring_info->ring_buffer = (struct hv_ring_buffer *)
235                         vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
236                                 PAGE_KERNEL);
237
238                 kfree(pages_wraparound);
239                 if (!ring_info->ring_buffer)
240                         return -ENOMEM;
241         }
242
243
244         ring_info->ring_buffer->read_index =
245                 ring_info->ring_buffer->write_index = 0;
246
247         /* Set the feature bit for enabling flow control. */
248         ring_info->ring_buffer->feature_bits.value = 1;
249
250         ring_info->ring_size = page_cnt << PAGE_SHIFT;
251         ring_info->ring_size_div10_reciprocal =
252                 reciprocal_value(ring_info->ring_size / 10);
253         ring_info->ring_datasize = ring_info->ring_size -
254                 sizeof(struct hv_ring_buffer);
255         ring_info->priv_read_index = 0;
256
257         /* Initialize buffer that holds copies of incoming packets */
258         if (max_pkt_size) {
259                 ring_info->pkt_buffer = kzalloc(max_pkt_size, GFP_KERNEL);
260                 if (!ring_info->pkt_buffer)
261                         return -ENOMEM;
262                 ring_info->pkt_buffer_size = max_pkt_size;
263         }
264
265         spin_lock_init(&ring_info->ring_lock);
266
267         return 0;
268 }
269
270 /* Cleanup the ring buffer. */
271 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
272 {
273         mutex_lock(&ring_info->ring_buffer_mutex);
274         vunmap(ring_info->ring_buffer);
275         ring_info->ring_buffer = NULL;
276         mutex_unlock(&ring_info->ring_buffer_mutex);
277
278         kfree(ring_info->pkt_buffer);
279         ring_info->pkt_buffer = NULL;
280         ring_info->pkt_buffer_size = 0;
281 }
282
283 /* Write to the ring buffer. */
284 int hv_ringbuffer_write(struct vmbus_channel *channel,
285                         const struct kvec *kv_list, u32 kv_count,
286                         u64 requestid, u64 *trans_id)
287 {
288         int i;
289         u32 bytes_avail_towrite;
290         u32 totalbytes_towrite = sizeof(u64);
291         u32 next_write_location;
292         u32 old_write;
293         u64 prev_indices;
294         unsigned long flags;
295         struct hv_ring_buffer_info *outring_info = &channel->outbound;
296         struct vmpacket_descriptor *desc = kv_list[0].iov_base;
297         u64 __trans_id, rqst_id = VMBUS_NO_RQSTOR;
298
299         if (channel->rescind)
300                 return -ENODEV;
301
302         for (i = 0; i < kv_count; i++)
303                 totalbytes_towrite += kv_list[i].iov_len;
304
305         spin_lock_irqsave(&outring_info->ring_lock, flags);
306
307         bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
308
309         /*
310          * If there is only room for the packet, assume it is full.
311          * Otherwise, the next time around, we think the ring buffer
312          * is empty since the read index == write index.
313          */
314         if (bytes_avail_towrite <= totalbytes_towrite) {
315                 ++channel->out_full_total;
316
317                 if (!channel->out_full_flag) {
318                         ++channel->out_full_first;
319                         channel->out_full_flag = true;
320                 }
321
322                 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
323                 return -EAGAIN;
324         }
325
326         channel->out_full_flag = false;
327
328         /* Write to the ring buffer */
329         next_write_location = hv_get_next_write_location(outring_info);
330
331         old_write = next_write_location;
332
333         for (i = 0; i < kv_count; i++) {
334                 next_write_location = hv_copyto_ringbuffer(outring_info,
335                                                      next_write_location,
336                                                      kv_list[i].iov_base,
337                                                      kv_list[i].iov_len);
338         }
339
340         /*
341          * Allocate the request ID after the data has been copied into the
342          * ring buffer.  Once this request ID is allocated, the completion
343          * path could find the data and free it.
344          */
345
346         if (desc->flags == VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED) {
347                 if (channel->next_request_id_callback != NULL) {
348                         rqst_id = channel->next_request_id_callback(channel, requestid);
349                         if (rqst_id == VMBUS_RQST_ERROR) {
350                                 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
351                                 return -EAGAIN;
352                         }
353                 }
354         }
355         desc = hv_get_ring_buffer(outring_info) + old_write;
356         __trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id;
357         /*
358          * Ensure the compiler doesn't generate code that reads the value of
359          * the transaction ID from the ring buffer, which is shared with the
360          * Hyper-V host and subject to being changed at any time.
361          */
362         WRITE_ONCE(desc->trans_id, __trans_id);
363         if (trans_id)
364                 *trans_id = __trans_id;
365
366         /* Set previous packet start */
367         prev_indices = hv_get_ring_bufferindices(outring_info);
368
369         next_write_location = hv_copyto_ringbuffer(outring_info,
370                                              next_write_location,
371                                              &prev_indices,
372                                              sizeof(u64));
373
374         /* Issue a full memory barrier before updating the write index */
375         virt_mb();
376
377         /* Now, update the write location */
378         hv_set_next_write_location(outring_info, next_write_location);
379
380
381         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
382
383         hv_signal_on_write(old_write, channel);
384
385         if (channel->rescind) {
386                 if (rqst_id != VMBUS_NO_RQSTOR) {
387                         /* Reclaim request ID to avoid leak of IDs */
388                         if (channel->request_addr_callback != NULL)
389                                 channel->request_addr_callback(channel, rqst_id);
390                 }
391                 return -ENODEV;
392         }
393
394         return 0;
395 }
396
397 int hv_ringbuffer_read(struct vmbus_channel *channel,
398                        void *buffer, u32 buflen, u32 *buffer_actual_len,
399                        u64 *requestid, bool raw)
400 {
401         struct vmpacket_descriptor *desc;
402         u32 packetlen, offset;
403
404         if (unlikely(buflen == 0))
405                 return -EINVAL;
406
407         *buffer_actual_len = 0;
408         *requestid = 0;
409
410         /* Make sure there is something to read */
411         desc = hv_pkt_iter_first(channel);
412         if (desc == NULL) {
413                 /*
414                  * No error is set when there is even no header, drivers are
415                  * supposed to analyze buffer_actual_len.
416                  */
417                 return 0;
418         }
419
420         offset = raw ? 0 : (desc->offset8 << 3);
421         packetlen = (desc->len8 << 3) - offset;
422         *buffer_actual_len = packetlen;
423         *requestid = desc->trans_id;
424
425         if (unlikely(packetlen > buflen))
426                 return -ENOBUFS;
427
428         /* since ring is double mapped, only one copy is necessary */
429         memcpy(buffer, (const char *)desc + offset, packetlen);
430
431         /* Advance ring index to next packet descriptor */
432         __hv_pkt_iter_next(channel, desc);
433
434         /* Notify host of update */
435         hv_pkt_iter_close(channel);
436
437         return 0;
438 }
439
440 /*
441  * Determine number of bytes available in ring buffer after
442  * the current iterator (priv_read_index) location.
443  *
444  * This is similar to hv_get_bytes_to_read but with private
445  * read index instead.
446  */
447 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
448 {
449         u32 priv_read_loc = rbi->priv_read_index;
450         u32 write_loc;
451
452         /*
453          * The Hyper-V host writes the packet data, then uses
454          * store_release() to update the write_index.  Use load_acquire()
455          * here to prevent loads of the packet data from being re-ordered
456          * before the read of the write_index and potentially getting
457          * stale data.
458          */
459         write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
460
461         if (write_loc >= priv_read_loc)
462                 return write_loc - priv_read_loc;
463         else
464                 return (rbi->ring_datasize - priv_read_loc) + write_loc;
465 }
466
467 /*
468  * Get first vmbus packet from ring buffer after read_index
469  *
470  * If ring buffer is empty, returns NULL and no other action needed.
471  */
472 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
473 {
474         struct hv_ring_buffer_info *rbi = &channel->inbound;
475         struct vmpacket_descriptor *desc, *desc_copy;
476         u32 bytes_avail, pkt_len, pkt_offset;
477
478         hv_debug_delay_test(channel, MESSAGE_DELAY);
479
480         bytes_avail = hv_pkt_iter_avail(rbi);
481         if (bytes_avail < sizeof(struct vmpacket_descriptor))
482                 return NULL;
483         bytes_avail = min(rbi->pkt_buffer_size, bytes_avail);
484
485         desc = (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
486
487         /*
488          * Ensure the compiler does not use references to incoming Hyper-V values (which
489          * could change at any moment) when reading local variables later in the code
490          */
491         pkt_len = READ_ONCE(desc->len8) << 3;
492         pkt_offset = READ_ONCE(desc->offset8) << 3;
493
494         /*
495          * If pkt_len is invalid, set it to the smaller of hv_pkt_iter_avail() and
496          * rbi->pkt_buffer_size
497          */
498         if (pkt_len < sizeof(struct vmpacket_descriptor) || pkt_len > bytes_avail)
499                 pkt_len = bytes_avail;
500
501         /*
502          * If pkt_offset is invalid, arbitrarily set it to
503          * the size of vmpacket_descriptor
504          */
505         if (pkt_offset < sizeof(struct vmpacket_descriptor) || pkt_offset > pkt_len)
506                 pkt_offset = sizeof(struct vmpacket_descriptor);
507
508         /* Copy the Hyper-V packet out of the ring buffer */
509         desc_copy = (struct vmpacket_descriptor *)rbi->pkt_buffer;
510         memcpy(desc_copy, desc, pkt_len);
511
512         /*
513          * Hyper-V could still change len8 and offset8 after the earlier read.
514          * Ensure that desc_copy has legal values for len8 and offset8 that
515          * are consistent with the copy we just made
516          */
517         desc_copy->len8 = pkt_len >> 3;
518         desc_copy->offset8 = pkt_offset >> 3;
519
520         return desc_copy;
521 }
522 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
523
524 /*
525  * Get next vmbus packet from ring buffer.
526  *
527  * Advances the current location (priv_read_index) and checks for more
528  * data. If the end of the ring buffer is reached, then return NULL.
529  */
530 struct vmpacket_descriptor *
531 __hv_pkt_iter_next(struct vmbus_channel *channel,
532                    const struct vmpacket_descriptor *desc)
533 {
534         struct hv_ring_buffer_info *rbi = &channel->inbound;
535         u32 packetlen = desc->len8 << 3;
536         u32 dsize = rbi->ring_datasize;
537
538         hv_debug_delay_test(channel, MESSAGE_DELAY);
539         /* bump offset to next potential packet */
540         rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
541         if (rbi->priv_read_index >= dsize)
542                 rbi->priv_read_index -= dsize;
543
544         /* more data? */
545         return hv_pkt_iter_first(channel);
546 }
547 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
548
549 /* How many bytes were read in this iterator cycle */
550 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
551                                         u32 start_read_index)
552 {
553         if (rbi->priv_read_index >= start_read_index)
554                 return rbi->priv_read_index - start_read_index;
555         else
556                 return rbi->ring_datasize - start_read_index +
557                         rbi->priv_read_index;
558 }
559
560 /*
561  * Update host ring buffer after iterating over packets. If the host has
562  * stopped queuing new entries because it found the ring buffer full, and
563  * sufficient space is being freed up, signal the host. But be careful to
564  * only signal the host when necessary, both for performance reasons and
565  * because Hyper-V protects itself by throttling guests that signal
566  * inappropriately.
567  *
568  * Determining when to signal is tricky. There are three key data inputs
569  * that must be handled in this order to avoid race conditions:
570  *
571  * 1. Update the read_index
572  * 2. Read the pending_send_sz
573  * 3. Read the current write_index
574  *
575  * The interrupt_mask is not used to determine when to signal. The
576  * interrupt_mask is used only on the guest->host ring buffer when
577  * sending requests to the host. The host does not use it on the host->
578  * guest ring buffer to indicate whether it should be signaled.
579  */
580 void hv_pkt_iter_close(struct vmbus_channel *channel)
581 {
582         struct hv_ring_buffer_info *rbi = &channel->inbound;
583         u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
584
585         /*
586          * Make sure all reads are done before we update the read index since
587          * the writer may start writing to the read area once the read index
588          * is updated.
589          */
590         virt_rmb();
591         start_read_index = rbi->ring_buffer->read_index;
592         rbi->ring_buffer->read_index = rbi->priv_read_index;
593
594         /*
595          * Older versions of Hyper-V (before WS2102 and Win8) do not
596          * implement pending_send_sz and simply poll if the host->guest
597          * ring buffer is full.  No signaling is needed or expected.
598          */
599         if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
600                 return;
601
602         /*
603          * Issue a full memory barrier before making the signaling decision.
604          * If reading pending_send_sz were to be reordered and happen
605          * before we commit the new read_index, a race could occur.  If the
606          * host were to set the pending_send_sz after we have sampled
607          * pending_send_sz, and the ring buffer blocks before we commit the
608          * read index, we could miss sending the interrupt. Issue a full
609          * memory barrier to address this.
610          */
611         virt_mb();
612
613         /*
614          * If the pending_send_sz is zero, then the ring buffer is not
615          * blocked and there is no need to signal.  This is far by the
616          * most common case, so exit quickly for best performance.
617          */
618         pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
619         if (!pending_sz)
620                 return;
621
622         /*
623          * Ensure the read of write_index in hv_get_bytes_to_write()
624          * happens after the read of pending_send_sz.
625          */
626         virt_rmb();
627         curr_write_sz = hv_get_bytes_to_write(rbi);
628         bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
629
630         /*
631          * We want to signal the host only if we're transitioning
632          * from a "not enough free space" state to a "enough free
633          * space" state.  For example, it's possible that this function
634          * could run and free up enough space to signal the host, and then
635          * run again and free up additional space before the host has a
636          * chance to clear the pending_send_sz.  The 2nd invocation would
637          * be a null transition from "enough free space" to "enough free
638          * space", which doesn't warrant a signal.
639          *
640          * Exactly filling the ring buffer is treated as "not enough
641          * space". The ring buffer always must have at least one byte
642          * empty so the empty and full conditions are distinguishable.
643          * hv_get_bytes_to_write() doesn't fully tell the truth in
644          * this regard.
645          *
646          * So first check if we were in the "enough free space" state
647          * before we began the iteration. If so, the host was not
648          * blocked, and there's no need to signal.
649          */
650         if (curr_write_sz - bytes_read > pending_sz)
651                 return;
652
653         /*
654          * Similarly, if the new state is "not enough space", then
655          * there's no need to signal.
656          */
657         if (curr_write_sz <= pending_sz)
658                 return;
659
660         ++channel->intr_in_full;
661         vmbus_setevent(channel);
662 }
663 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);