1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Char device for device raw access
5 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
9 #include <linux/compat.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/firewire.h>
16 #include <linux/firewire-cdev.h>
17 #include <linux/idr.h>
18 #include <linux/irqflags.h>
19 #include <linux/jiffies.h>
20 #include <linux/kernel.h>
21 #include <linux/kref.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/poll.h>
26 #include <linux/sched.h> /* required for linux/wait.h */
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/time.h>
31 #include <linux/uaccess.h>
32 #include <linux/vmalloc.h>
33 #include <linux/wait.h>
34 #include <linux/workqueue.h>
40 * ABI version history is documented in linux/firewire-cdev.h.
42 #define FW_CDEV_KERNEL_VERSION 5
43 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
44 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
45 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
49 struct fw_device *device;
53 struct idr resource_idr;
54 struct list_head event_list;
55 wait_queue_head_t wait;
56 wait_queue_head_t tx_flush_wait;
57 u64 bus_reset_closure;
59 struct fw_iso_context *iso_context;
61 struct fw_iso_buffer buffer;
62 unsigned long vm_start;
63 bool buffer_is_mapped;
65 struct list_head phy_receiver_link;
66 u64 phy_receiver_closure;
68 struct list_head link;
72 static inline void client_get(struct client *client)
74 kref_get(&client->kref);
77 static void client_release(struct kref *kref)
79 struct client *client = container_of(kref, struct client, kref);
81 fw_device_put(client->device);
85 static void client_put(struct client *client)
87 kref_put(&client->kref, client_release);
90 struct client_resource;
91 typedef void (*client_resource_release_fn_t)(struct client *,
92 struct client_resource *);
93 struct client_resource {
94 client_resource_release_fn_t release;
98 struct address_handler_resource {
99 struct client_resource resource;
100 struct fw_address_handler handler;
102 struct client *client;
105 struct outbound_transaction_resource {
106 struct client_resource resource;
107 struct fw_transaction transaction;
110 struct inbound_transaction_resource {
111 struct client_resource resource;
112 struct fw_card *card;
113 struct fw_request *request;
118 struct descriptor_resource {
119 struct client_resource resource;
120 struct fw_descriptor descriptor;
124 struct iso_resource {
125 struct client_resource resource;
126 struct client *client;
127 /* Schedule work and access todo only with client->lock held. */
128 struct delayed_work work;
129 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
130 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
134 struct iso_resource_event *e_alloc, *e_dealloc;
137 static void release_iso_resource(struct client *, struct client_resource *);
139 static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
141 client_get(r->client);
142 if (!queue_delayed_work(fw_workqueue, &r->work, delay))
143 client_put(r->client);
146 static void schedule_if_iso_resource(struct client_resource *resource)
148 if (resource->release == release_iso_resource)
149 schedule_iso_resource(container_of(resource,
150 struct iso_resource, resource), 0);
154 * dequeue_event() just kfree()'s the event, so the event has to be
155 * the first field in a struct XYZ_event.
158 struct { void *data; size_t size; } v[2];
159 struct list_head link;
162 struct bus_reset_event {
164 struct fw_cdev_event_bus_reset reset;
167 struct outbound_transaction_event {
169 struct client *client;
170 struct outbound_transaction_resource r;
171 struct fw_cdev_event_response response;
174 struct inbound_transaction_event {
177 struct fw_cdev_event_request request;
178 struct fw_cdev_event_request2 request2;
182 struct iso_interrupt_event {
184 struct fw_cdev_event_iso_interrupt interrupt;
187 struct iso_interrupt_mc_event {
189 struct fw_cdev_event_iso_interrupt_mc interrupt;
192 struct iso_resource_event {
194 struct fw_cdev_event_iso_resource iso_resource;
197 struct outbound_phy_packet_event {
199 struct client *client;
201 struct fw_cdev_event_phy_packet phy_packet;
204 struct inbound_phy_packet_event {
206 struct fw_cdev_event_phy_packet phy_packet;
210 static void __user *u64_to_uptr(u64 value)
212 if (in_compat_syscall())
213 return compat_ptr(value);
215 return (void __user *)(unsigned long)value;
218 static u64 uptr_to_u64(void __user *ptr)
220 if (in_compat_syscall())
221 return ptr_to_compat(ptr);
223 return (u64)(unsigned long)ptr;
226 static inline void __user *u64_to_uptr(u64 value)
228 return (void __user *)(unsigned long)value;
231 static inline u64 uptr_to_u64(void __user *ptr)
233 return (u64)(unsigned long)ptr;
235 #endif /* CONFIG_COMPAT */
237 static int fw_device_op_open(struct inode *inode, struct file *file)
239 struct fw_device *device;
240 struct client *client;
242 device = fw_device_get_by_devt(inode->i_rdev);
246 if (fw_device_is_shutdown(device)) {
247 fw_device_put(device);
251 client = kzalloc(sizeof(*client), GFP_KERNEL);
252 if (client == NULL) {
253 fw_device_put(device);
257 client->device = device;
258 spin_lock_init(&client->lock);
259 idr_init(&client->resource_idr);
260 INIT_LIST_HEAD(&client->event_list);
261 init_waitqueue_head(&client->wait);
262 init_waitqueue_head(&client->tx_flush_wait);
263 INIT_LIST_HEAD(&client->phy_receiver_link);
264 INIT_LIST_HEAD(&client->link);
265 kref_init(&client->kref);
267 file->private_data = client;
269 return nonseekable_open(inode, file);
272 static void queue_event(struct client *client, struct event *event,
273 void *data0, size_t size0, void *data1, size_t size1)
277 event->v[0].data = data0;
278 event->v[0].size = size0;
279 event->v[1].data = data1;
280 event->v[1].size = size1;
282 spin_lock_irqsave(&client->lock, flags);
283 if (client->in_shutdown)
286 list_add_tail(&event->link, &client->event_list);
287 spin_unlock_irqrestore(&client->lock, flags);
289 wake_up_interruptible(&client->wait);
292 static int dequeue_event(struct client *client,
293 char __user *buffer, size_t count)
299 ret = wait_event_interruptible(client->wait,
300 !list_empty(&client->event_list) ||
301 fw_device_is_shutdown(client->device));
305 if (list_empty(&client->event_list) &&
306 fw_device_is_shutdown(client->device))
309 spin_lock_irq(&client->lock);
310 event = list_first_entry(&client->event_list, struct event, link);
311 list_del(&event->link);
312 spin_unlock_irq(&client->lock);
315 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
316 size = min(event->v[i].size, count - total);
317 if (copy_to_user(buffer + total, event->v[i].data, size)) {
331 static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
332 size_t count, loff_t *offset)
334 struct client *client = file->private_data;
336 return dequeue_event(client, buffer, count);
339 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
340 struct client *client)
342 struct fw_card *card = client->device->card;
344 spin_lock_irq(&card->lock);
346 event->closure = client->bus_reset_closure;
347 event->type = FW_CDEV_EVENT_BUS_RESET;
348 event->generation = client->device->generation;
349 event->node_id = client->device->node_id;
350 event->local_node_id = card->local_node->node_id;
351 event->bm_node_id = card->bm_node_id;
352 event->irm_node_id = card->irm_node->node_id;
353 event->root_node_id = card->root_node->node_id;
355 spin_unlock_irq(&card->lock);
358 static void for_each_client(struct fw_device *device,
359 void (*callback)(struct client *client))
363 mutex_lock(&device->client_list_mutex);
364 list_for_each_entry(c, &device->client_list, link)
366 mutex_unlock(&device->client_list_mutex);
369 static int schedule_reallocations(int id, void *p, void *data)
371 schedule_if_iso_resource(p);
376 static void queue_bus_reset_event(struct client *client)
378 struct bus_reset_event *e;
380 e = kzalloc(sizeof(*e), GFP_KERNEL);
384 fill_bus_reset_event(&e->reset, client);
386 queue_event(client, &e->event,
387 &e->reset, sizeof(e->reset), NULL, 0);
389 spin_lock_irq(&client->lock);
390 idr_for_each(&client->resource_idr, schedule_reallocations, client);
391 spin_unlock_irq(&client->lock);
394 void fw_device_cdev_update(struct fw_device *device)
396 for_each_client(device, queue_bus_reset_event);
399 static void wake_up_client(struct client *client)
401 wake_up_interruptible(&client->wait);
404 void fw_device_cdev_remove(struct fw_device *device)
406 for_each_client(device, wake_up_client);
410 struct fw_cdev_get_info get_info;
411 struct fw_cdev_send_request send_request;
412 struct fw_cdev_allocate allocate;
413 struct fw_cdev_deallocate deallocate;
414 struct fw_cdev_send_response send_response;
415 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
416 struct fw_cdev_add_descriptor add_descriptor;
417 struct fw_cdev_remove_descriptor remove_descriptor;
418 struct fw_cdev_create_iso_context create_iso_context;
419 struct fw_cdev_queue_iso queue_iso;
420 struct fw_cdev_start_iso start_iso;
421 struct fw_cdev_stop_iso stop_iso;
422 struct fw_cdev_get_cycle_timer get_cycle_timer;
423 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
424 struct fw_cdev_send_stream_packet send_stream_packet;
425 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
426 struct fw_cdev_send_phy_packet send_phy_packet;
427 struct fw_cdev_receive_phy_packets receive_phy_packets;
428 struct fw_cdev_set_iso_channels set_iso_channels;
429 struct fw_cdev_flush_iso flush_iso;
432 static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
434 struct fw_cdev_get_info *a = &arg->get_info;
435 struct fw_cdev_event_bus_reset bus_reset;
436 unsigned long ret = 0;
438 client->version = a->version;
439 a->version = FW_CDEV_KERNEL_VERSION;
440 a->card = client->device->card->index;
442 down_read(&fw_device_rwsem);
445 size_t want = a->rom_length;
446 size_t have = client->device->config_rom_length * 4;
448 ret = copy_to_user(u64_to_uptr(a->rom),
449 client->device->config_rom, min(want, have));
451 a->rom_length = client->device->config_rom_length * 4;
453 up_read(&fw_device_rwsem);
458 mutex_lock(&client->device->client_list_mutex);
460 client->bus_reset_closure = a->bus_reset_closure;
461 if (a->bus_reset != 0) {
462 fill_bus_reset_event(&bus_reset, client);
463 /* unaligned size of bus_reset is 36 bytes */
464 ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
466 if (ret == 0 && list_empty(&client->link))
467 list_add_tail(&client->link, &client->device->client_list);
469 mutex_unlock(&client->device->client_list_mutex);
471 return ret ? -EFAULT : 0;
474 static int add_client_resource(struct client *client,
475 struct client_resource *resource, gfp_t gfp_mask)
477 bool preload = gfpflags_allow_blocking(gfp_mask);
482 idr_preload(gfp_mask);
483 spin_lock_irqsave(&client->lock, flags);
485 if (client->in_shutdown)
488 ret = idr_alloc(&client->resource_idr, resource, 0, 0,
491 resource->handle = ret;
493 schedule_if_iso_resource(resource);
496 spin_unlock_irqrestore(&client->lock, flags);
500 return ret < 0 ? ret : 0;
503 static int release_client_resource(struct client *client, u32 handle,
504 client_resource_release_fn_t release,
505 struct client_resource **return_resource)
507 struct client_resource *resource;
509 spin_lock_irq(&client->lock);
510 if (client->in_shutdown)
513 resource = idr_find(&client->resource_idr, handle);
514 if (resource && resource->release == release)
515 idr_remove(&client->resource_idr, handle);
516 spin_unlock_irq(&client->lock);
518 if (!(resource && resource->release == release))
522 *return_resource = resource;
524 resource->release(client, resource);
531 static void release_transaction(struct client *client,
532 struct client_resource *resource)
536 static void complete_transaction(struct fw_card *card, int rcode,
537 void *payload, size_t length, void *data)
539 struct outbound_transaction_event *e = data;
540 struct fw_cdev_event_response *rsp = &e->response;
541 struct client *client = e->client;
544 if (length < rsp->length)
545 rsp->length = length;
546 if (rcode == RCODE_COMPLETE)
547 memcpy(rsp->data, payload, rsp->length);
549 spin_lock_irqsave(&client->lock, flags);
550 idr_remove(&client->resource_idr, e->r.resource.handle);
551 if (client->in_shutdown)
552 wake_up(&client->tx_flush_wait);
553 spin_unlock_irqrestore(&client->lock, flags);
555 rsp->type = FW_CDEV_EVENT_RESPONSE;
559 * In the case that sizeof(*rsp) doesn't align with the position of the
560 * data, and the read is short, preserve an extra copy of the data
561 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
562 * for short reads and some apps depended on it, this is both safe
563 * and prudent for compatibility.
565 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
566 queue_event(client, &e->event, rsp, sizeof(*rsp),
567 rsp->data, rsp->length);
569 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
572 /* Drop the idr's reference */
576 static int init_request(struct client *client,
577 struct fw_cdev_send_request *request,
578 int destination_id, int speed)
580 struct outbound_transaction_event *e;
583 if (request->tcode != TCODE_STREAM_DATA &&
584 (request->length > 4096 || request->length > 512 << speed))
587 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
591 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
596 e->response.length = request->length;
597 e->response.closure = request->closure;
600 copy_from_user(e->response.data,
601 u64_to_uptr(request->data), request->length)) {
606 e->r.resource.release = release_transaction;
607 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
611 fw_send_request(client->device->card, &e->r.transaction,
612 request->tcode, destination_id, request->generation,
613 speed, request->offset, e->response.data,
614 request->length, complete_transaction, e);
623 static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
625 switch (arg->send_request.tcode) {
626 case TCODE_WRITE_QUADLET_REQUEST:
627 case TCODE_WRITE_BLOCK_REQUEST:
628 case TCODE_READ_QUADLET_REQUEST:
629 case TCODE_READ_BLOCK_REQUEST:
630 case TCODE_LOCK_MASK_SWAP:
631 case TCODE_LOCK_COMPARE_SWAP:
632 case TCODE_LOCK_FETCH_ADD:
633 case TCODE_LOCK_LITTLE_ADD:
634 case TCODE_LOCK_BOUNDED_ADD:
635 case TCODE_LOCK_WRAP_ADD:
636 case TCODE_LOCK_VENDOR_DEPENDENT:
642 return init_request(client, &arg->send_request, client->device->node_id,
643 client->device->max_speed);
646 static inline bool is_fcp_request(struct fw_request *request)
648 return request == NULL;
651 static void release_request(struct client *client,
652 struct client_resource *resource)
654 struct inbound_transaction_resource *r = container_of(resource,
655 struct inbound_transaction_resource, resource);
657 if (is_fcp_request(r->request))
660 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
662 fw_card_put(r->card);
666 static void handle_request(struct fw_card *card, struct fw_request *request,
667 int tcode, int destination, int source,
668 int generation, unsigned long long offset,
669 void *payload, size_t length, void *callback_data)
671 struct address_handler_resource *handler = callback_data;
672 struct inbound_transaction_resource *r;
673 struct inbound_transaction_event *e;
675 void *fcp_frame = NULL;
678 /* card may be different from handler->client->device->card */
681 r = kmalloc(sizeof(*r), GFP_ATOMIC);
682 e = kmalloc(sizeof(*e), GFP_ATOMIC);
683 if (r == NULL || e == NULL)
687 r->request = request;
691 if (is_fcp_request(request)) {
693 * FIXME: Let core-transaction.c manage a
694 * single reference-counted copy?
696 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
697 if (fcp_frame == NULL)
703 r->resource.release = release_request;
704 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
708 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
709 struct fw_cdev_event_request *req = &e->req.request;
712 tcode = TCODE_LOCK_REQUEST;
714 req->type = FW_CDEV_EVENT_REQUEST;
716 req->offset = offset;
717 req->length = length;
718 req->handle = r->resource.handle;
719 req->closure = handler->closure;
720 event_size0 = sizeof(*req);
722 struct fw_cdev_event_request2 *req = &e->req.request2;
724 req->type = FW_CDEV_EVENT_REQUEST2;
726 req->offset = offset;
727 req->source_node_id = source;
728 req->destination_node_id = destination;
729 req->card = card->index;
730 req->generation = generation;
731 req->length = length;
732 req->handle = r->resource.handle;
733 req->closure = handler->closure;
734 event_size0 = sizeof(*req);
737 queue_event(handler->client, &e->event,
738 &e->req, event_size0, r->data, length);
746 if (!is_fcp_request(request))
747 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
752 static void release_address_handler(struct client *client,
753 struct client_resource *resource)
755 struct address_handler_resource *r =
756 container_of(resource, struct address_handler_resource, resource);
758 fw_core_remove_address_handler(&r->handler);
762 static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
764 struct fw_cdev_allocate *a = &arg->allocate;
765 struct address_handler_resource *r;
766 struct fw_address_region region;
769 r = kmalloc(sizeof(*r), GFP_KERNEL);
773 region.start = a->offset;
774 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
775 region.end = a->offset + a->length;
777 region.end = a->region_end;
779 r->handler.length = a->length;
780 r->handler.address_callback = handle_request;
781 r->handler.callback_data = r;
782 r->closure = a->closure;
785 ret = fw_core_add_address_handler(&r->handler, ®ion);
790 a->offset = r->handler.offset;
792 r->resource.release = release_address_handler;
793 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
795 release_address_handler(client, &r->resource);
798 a->handle = r->resource.handle;
803 static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
805 return release_client_resource(client, arg->deallocate.handle,
806 release_address_handler, NULL);
809 static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
811 struct fw_cdev_send_response *a = &arg->send_response;
812 struct client_resource *resource;
813 struct inbound_transaction_resource *r;
816 if (release_client_resource(client, a->handle,
817 release_request, &resource) < 0)
820 r = container_of(resource, struct inbound_transaction_resource,
822 if (is_fcp_request(r->request))
825 if (a->length != fw_get_response_length(r->request)) {
830 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
835 fw_send_response(r->card, r->request, a->rcode);
837 fw_card_put(r->card);
843 static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
845 fw_schedule_bus_reset(client->device->card, true,
846 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
850 static void release_descriptor(struct client *client,
851 struct client_resource *resource)
853 struct descriptor_resource *r =
854 container_of(resource, struct descriptor_resource, resource);
856 fw_core_remove_descriptor(&r->descriptor);
860 static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
862 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
863 struct descriptor_resource *r;
866 /* Access policy: Allow this ioctl only on local nodes' device files. */
867 if (!client->device->is_local)
873 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
877 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
882 r->descriptor.length = a->length;
883 r->descriptor.immediate = a->immediate;
884 r->descriptor.key = a->key;
885 r->descriptor.data = r->data;
887 ret = fw_core_add_descriptor(&r->descriptor);
891 r->resource.release = release_descriptor;
892 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
894 fw_core_remove_descriptor(&r->descriptor);
897 a->handle = r->resource.handle;
906 static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
908 return release_client_resource(client, arg->remove_descriptor.handle,
909 release_descriptor, NULL);
912 static void iso_callback(struct fw_iso_context *context, u32 cycle,
913 size_t header_length, void *header, void *data)
915 struct client *client = data;
916 struct iso_interrupt_event *e;
918 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
922 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
923 e->interrupt.closure = client->iso_closure;
924 e->interrupt.cycle = cycle;
925 e->interrupt.header_length = header_length;
926 memcpy(e->interrupt.header, header, header_length);
927 queue_event(client, &e->event, &e->interrupt,
928 sizeof(e->interrupt) + header_length, NULL, 0);
931 static void iso_mc_callback(struct fw_iso_context *context,
932 dma_addr_t completed, void *data)
934 struct client *client = data;
935 struct iso_interrupt_mc_event *e;
937 e = kmalloc(sizeof(*e), GFP_ATOMIC);
941 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
942 e->interrupt.closure = client->iso_closure;
943 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
945 queue_event(client, &e->event, &e->interrupt,
946 sizeof(e->interrupt), NULL, 0);
949 static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
951 if (context->type == FW_ISO_CONTEXT_TRANSMIT)
952 return DMA_TO_DEVICE;
954 return DMA_FROM_DEVICE;
957 static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
958 fw_iso_mc_callback_t callback,
961 struct fw_iso_context *ctx;
963 ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
964 0, 0, 0, NULL, callback_data);
966 ctx->callback.mc = callback;
971 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
973 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
974 struct fw_iso_context *context;
975 union fw_iso_callback cb;
978 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
979 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
980 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
981 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
984 case FW_ISO_CONTEXT_TRANSMIT:
985 if (a->speed > SCODE_3200 || a->channel > 63)
988 cb.sc = iso_callback;
991 case FW_ISO_CONTEXT_RECEIVE:
992 if (a->header_size < 4 || (a->header_size & 3) ||
996 cb.sc = iso_callback;
999 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1000 cb.mc = iso_mc_callback;
1007 if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
1008 context = fw_iso_mc_context_create(client->device->card, cb.mc,
1011 context = fw_iso_context_create(client->device->card, a->type,
1012 a->channel, a->speed,
1013 a->header_size, cb.sc, client);
1014 if (IS_ERR(context))
1015 return PTR_ERR(context);
1016 if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1017 context->drop_overflow_headers = true;
1019 /* We only support one context at this time. */
1020 spin_lock_irq(&client->lock);
1021 if (client->iso_context != NULL) {
1022 spin_unlock_irq(&client->lock);
1023 fw_iso_context_destroy(context);
1027 if (!client->buffer_is_mapped) {
1028 ret = fw_iso_buffer_map_dma(&client->buffer,
1029 client->device->card,
1030 iso_dma_direction(context));
1032 spin_unlock_irq(&client->lock);
1033 fw_iso_context_destroy(context);
1037 client->buffer_is_mapped = true;
1039 client->iso_closure = a->closure;
1040 client->iso_context = context;
1041 spin_unlock_irq(&client->lock);
1048 static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1050 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1051 struct fw_iso_context *ctx = client->iso_context;
1053 if (ctx == NULL || a->handle != 0)
1056 return fw_iso_context_set_channels(ctx, &a->channels);
1059 /* Macros for decoding the iso packet control header. */
1060 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1061 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1062 #define GET_SKIP(v) (((v) >> 17) & 0x01)
1063 #define GET_TAG(v) (((v) >> 18) & 0x03)
1064 #define GET_SY(v) (((v) >> 20) & 0x0f)
1065 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1067 static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1069 struct fw_cdev_queue_iso *a = &arg->queue_iso;
1070 struct fw_cdev_iso_packet __user *p, *end, *next;
1071 struct fw_iso_context *ctx = client->iso_context;
1072 unsigned long payload, buffer_end, transmit_header_bytes = 0;
1076 struct fw_iso_packet packet;
1080 if (ctx == NULL || a->handle != 0)
1084 * If the user passes a non-NULL data pointer, has mmap()'ed
1085 * the iso buffer, and the pointer points inside the buffer,
1086 * we setup the payload pointers accordingly. Otherwise we
1087 * set them both to 0, which will still let packets with
1088 * payload_length == 0 through. In other words, if no packets
1089 * use the indirect payload, the iso buffer need not be mapped
1090 * and the a->data pointer is ignored.
1092 payload = (unsigned long)a->data - client->vm_start;
1093 buffer_end = client->buffer.page_count << PAGE_SHIFT;
1094 if (a->data == 0 || client->buffer.pages == NULL ||
1095 payload >= buffer_end) {
1100 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1103 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1105 end = (void __user *)p + a->size;
1108 if (get_user(control, &p->control))
1110 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1111 u.packet.interrupt = GET_INTERRUPT(control);
1112 u.packet.skip = GET_SKIP(control);
1113 u.packet.tag = GET_TAG(control);
1114 u.packet.sy = GET_SY(control);
1115 u.packet.header_length = GET_HEADER_LENGTH(control);
1117 switch (ctx->type) {
1118 case FW_ISO_CONTEXT_TRANSMIT:
1119 if (u.packet.header_length & 3)
1121 transmit_header_bytes = u.packet.header_length;
1124 case FW_ISO_CONTEXT_RECEIVE:
1125 if (u.packet.header_length == 0 ||
1126 u.packet.header_length % ctx->header_size != 0)
1130 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1131 if (u.packet.payload_length == 0 ||
1132 u.packet.payload_length & 3)
1137 next = (struct fw_cdev_iso_packet __user *)
1138 &p->header[transmit_header_bytes / 4];
1142 (u.packet.header, p->header, transmit_header_bytes))
1144 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1145 u.packet.header_length + u.packet.payload_length > 0)
1147 if (payload + u.packet.payload_length > buffer_end)
1150 if (fw_iso_context_queue(ctx, &u.packet,
1151 &client->buffer, payload))
1155 payload += u.packet.payload_length;
1158 fw_iso_context_queue_flush(ctx);
1160 a->size -= uptr_to_u64(p) - a->packets;
1161 a->packets = uptr_to_u64(p);
1162 a->data = client->vm_start + payload;
1167 static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1169 struct fw_cdev_start_iso *a = &arg->start_iso;
1172 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1173 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1174 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1175 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1176 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1178 if (client->iso_context == NULL || a->handle != 0)
1181 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1182 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1185 return fw_iso_context_start(client->iso_context,
1186 a->cycle, a->sync, a->tags);
1189 static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1191 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1193 if (client->iso_context == NULL || a->handle != 0)
1196 return fw_iso_context_stop(client->iso_context);
1199 static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1201 struct fw_cdev_flush_iso *a = &arg->flush_iso;
1203 if (client->iso_context == NULL || a->handle != 0)
1206 return fw_iso_context_flush_completions(client->iso_context);
1209 static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1211 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1212 struct fw_card *card = client->device->card;
1213 struct timespec64 ts = {0, 0};
1217 local_irq_disable();
1219 ret = fw_card_read_cycle_time(card, &cycle_time);
1223 switch (a->clk_id) {
1224 case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
1225 case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
1226 case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
1233 a->tv_sec = ts.tv_sec;
1234 a->tv_nsec = ts.tv_nsec;
1235 a->cycle_timer = cycle_time;
1240 static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1242 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1243 struct fw_cdev_get_cycle_timer2 ct2;
1245 ct2.clk_id = CLOCK_REALTIME;
1246 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1248 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1249 a->cycle_timer = ct2.cycle_timer;
1254 static void iso_resource_work(struct work_struct *work)
1256 struct iso_resource_event *e;
1257 struct iso_resource *r =
1258 container_of(work, struct iso_resource, work.work);
1259 struct client *client = r->client;
1260 int generation, channel, bandwidth, todo;
1261 bool skip, free, success;
1263 spin_lock_irq(&client->lock);
1264 generation = client->device->generation;
1266 /* Allow 1000ms grace period for other reallocations. */
1267 if (todo == ISO_RES_ALLOC &&
1268 time_before64(get_jiffies_64(),
1269 client->device->card->reset_jiffies + HZ)) {
1270 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1273 /* We could be called twice within the same generation. */
1274 skip = todo == ISO_RES_REALLOC &&
1275 r->generation == generation;
1277 free = todo == ISO_RES_DEALLOC ||
1278 todo == ISO_RES_ALLOC_ONCE ||
1279 todo == ISO_RES_DEALLOC_ONCE;
1280 r->generation = generation;
1281 spin_unlock_irq(&client->lock);
1286 bandwidth = r->bandwidth;
1288 fw_iso_resource_manage(client->device->card, generation,
1289 r->channels, &channel, &bandwidth,
1290 todo == ISO_RES_ALLOC ||
1291 todo == ISO_RES_REALLOC ||
1292 todo == ISO_RES_ALLOC_ONCE);
1294 * Is this generation outdated already? As long as this resource sticks
1295 * in the idr, it will be scheduled again for a newer generation or at
1298 if (channel == -EAGAIN &&
1299 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1302 success = channel >= 0 || bandwidth > 0;
1304 spin_lock_irq(&client->lock);
1306 * Transit from allocation to reallocation, except if the client
1307 * requested deallocation in the meantime.
1309 if (r->todo == ISO_RES_ALLOC)
1310 r->todo = ISO_RES_REALLOC;
1312 * Allocation or reallocation failure? Pull this resource out of the
1313 * idr and prepare for deletion, unless the client is shutting down.
1315 if (r->todo == ISO_RES_REALLOC && !success &&
1316 !client->in_shutdown &&
1317 idr_remove(&client->resource_idr, r->resource.handle)) {
1321 spin_unlock_irq(&client->lock);
1323 if (todo == ISO_RES_ALLOC && channel >= 0)
1324 r->channels = 1ULL << channel;
1326 if (todo == ISO_RES_REALLOC && success)
1329 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1334 r->e_dealloc = NULL;
1336 e->iso_resource.handle = r->resource.handle;
1337 e->iso_resource.channel = channel;
1338 e->iso_resource.bandwidth = bandwidth;
1340 queue_event(client, &e->event,
1341 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1344 cancel_delayed_work(&r->work);
1346 kfree(r->e_dealloc);
1353 static void release_iso_resource(struct client *client,
1354 struct client_resource *resource)
1356 struct iso_resource *r =
1357 container_of(resource, struct iso_resource, resource);
1359 spin_lock_irq(&client->lock);
1360 r->todo = ISO_RES_DEALLOC;
1361 schedule_iso_resource(r, 0);
1362 spin_unlock_irq(&client->lock);
1365 static int init_iso_resource(struct client *client,
1366 struct fw_cdev_allocate_iso_resource *request, int todo)
1368 struct iso_resource_event *e1, *e2;
1369 struct iso_resource *r;
1372 if ((request->channels == 0 && request->bandwidth == 0) ||
1373 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
1376 r = kmalloc(sizeof(*r), GFP_KERNEL);
1377 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1378 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1379 if (r == NULL || e1 == NULL || e2 == NULL) {
1384 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1388 r->channels = request->channels;
1389 r->bandwidth = request->bandwidth;
1393 e1->iso_resource.closure = request->closure;
1394 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1395 e2->iso_resource.closure = request->closure;
1396 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1398 if (todo == ISO_RES_ALLOC) {
1399 r->resource.release = release_iso_resource;
1400 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1404 r->resource.release = NULL;
1405 r->resource.handle = -1;
1406 schedule_iso_resource(r, 0);
1408 request->handle = r->resource.handle;
1419 static int ioctl_allocate_iso_resource(struct client *client,
1420 union ioctl_arg *arg)
1422 return init_iso_resource(client,
1423 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1426 static int ioctl_deallocate_iso_resource(struct client *client,
1427 union ioctl_arg *arg)
1429 return release_client_resource(client,
1430 arg->deallocate.handle, release_iso_resource, NULL);
1433 static int ioctl_allocate_iso_resource_once(struct client *client,
1434 union ioctl_arg *arg)
1436 return init_iso_resource(client,
1437 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1440 static int ioctl_deallocate_iso_resource_once(struct client *client,
1441 union ioctl_arg *arg)
1443 return init_iso_resource(client,
1444 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1448 * Returns a speed code: Maximum speed to or from this device,
1449 * limited by the device's link speed, the local node's link speed,
1450 * and all PHY port speeds between the two links.
1452 static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1454 return client->device->max_speed;
1457 static int ioctl_send_broadcast_request(struct client *client,
1458 union ioctl_arg *arg)
1460 struct fw_cdev_send_request *a = &arg->send_request;
1463 case TCODE_WRITE_QUADLET_REQUEST:
1464 case TCODE_WRITE_BLOCK_REQUEST:
1470 /* Security policy: Only allow accesses to Units Space. */
1471 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1474 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1477 static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1479 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1480 struct fw_cdev_send_request request;
1483 if (a->speed > client->device->card->link_speed ||
1484 a->length > 1024 << a->speed)
1487 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1490 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1491 request.tcode = TCODE_STREAM_DATA;
1492 request.length = a->length;
1493 request.closure = a->closure;
1494 request.data = a->data;
1495 request.generation = a->generation;
1497 return init_request(client, &request, dest, a->speed);
1500 static void outbound_phy_packet_callback(struct fw_packet *packet,
1501 struct fw_card *card, int status)
1503 struct outbound_phy_packet_event *e =
1504 container_of(packet, struct outbound_phy_packet_event, p);
1505 struct client *e_client;
1509 case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
1510 /* should never happen with PHY packets: */
1511 case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
1514 case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
1515 case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
1516 case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
1517 /* stale generation; cancelled; on certain controllers: no ack */
1518 default: e->phy_packet.rcode = status; break;
1520 e->phy_packet.data[0] = packet->timestamp;
1522 e_client = e->client;
1523 queue_event(e->client, &e->event, &e->phy_packet,
1524 sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1525 client_put(e_client);
1528 static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1530 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1531 struct fw_card *card = client->device->card;
1532 struct outbound_phy_packet_event *e;
1534 /* Access policy: Allow this ioctl only on local nodes' device files. */
1535 if (!client->device->is_local)
1538 e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1544 e->p.speed = SCODE_100;
1545 e->p.generation = a->generation;
1546 e->p.header[0] = TCODE_LINK_INTERNAL << 4;
1547 e->p.header[1] = a->data[0];
1548 e->p.header[2] = a->data[1];
1549 e->p.header_length = 12;
1550 e->p.callback = outbound_phy_packet_callback;
1551 e->phy_packet.closure = a->closure;
1552 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1553 if (is_ping_packet(a->data))
1554 e->phy_packet.length = 4;
1556 card->driver->send_request(card, &e->p);
1561 static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1563 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1564 struct fw_card *card = client->device->card;
1566 /* Access policy: Allow this ioctl only on local nodes' device files. */
1567 if (!client->device->is_local)
1570 spin_lock_irq(&card->lock);
1572 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1573 client->phy_receiver_closure = a->closure;
1575 spin_unlock_irq(&card->lock);
1580 void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1582 struct client *client;
1583 struct inbound_phy_packet_event *e;
1584 unsigned long flags;
1586 spin_lock_irqsave(&card->lock, flags);
1588 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1589 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1593 e->phy_packet.closure = client->phy_receiver_closure;
1594 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1595 e->phy_packet.rcode = RCODE_COMPLETE;
1596 e->phy_packet.length = 8;
1597 e->phy_packet.data[0] = p->header[1];
1598 e->phy_packet.data[1] = p->header[2];
1599 queue_event(client, &e->event,
1600 &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1603 spin_unlock_irqrestore(&card->lock, flags);
1606 static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1607 [0x00] = ioctl_get_info,
1608 [0x01] = ioctl_send_request,
1609 [0x02] = ioctl_allocate,
1610 [0x03] = ioctl_deallocate,
1611 [0x04] = ioctl_send_response,
1612 [0x05] = ioctl_initiate_bus_reset,
1613 [0x06] = ioctl_add_descriptor,
1614 [0x07] = ioctl_remove_descriptor,
1615 [0x08] = ioctl_create_iso_context,
1616 [0x09] = ioctl_queue_iso,
1617 [0x0a] = ioctl_start_iso,
1618 [0x0b] = ioctl_stop_iso,
1619 [0x0c] = ioctl_get_cycle_timer,
1620 [0x0d] = ioctl_allocate_iso_resource,
1621 [0x0e] = ioctl_deallocate_iso_resource,
1622 [0x0f] = ioctl_allocate_iso_resource_once,
1623 [0x10] = ioctl_deallocate_iso_resource_once,
1624 [0x11] = ioctl_get_speed,
1625 [0x12] = ioctl_send_broadcast_request,
1626 [0x13] = ioctl_send_stream_packet,
1627 [0x14] = ioctl_get_cycle_timer2,
1628 [0x15] = ioctl_send_phy_packet,
1629 [0x16] = ioctl_receive_phy_packets,
1630 [0x17] = ioctl_set_iso_channels,
1631 [0x18] = ioctl_flush_iso,
1634 static int dispatch_ioctl(struct client *client,
1635 unsigned int cmd, void __user *arg)
1637 union ioctl_arg buffer;
1640 if (fw_device_is_shutdown(client->device))
1643 if (_IOC_TYPE(cmd) != '#' ||
1644 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1645 _IOC_SIZE(cmd) > sizeof(buffer))
1648 memset(&buffer, 0, sizeof(buffer));
1650 if (_IOC_DIR(cmd) & _IOC_WRITE)
1651 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1654 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1658 if (_IOC_DIR(cmd) & _IOC_READ)
1659 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1665 static long fw_device_op_ioctl(struct file *file,
1666 unsigned int cmd, unsigned long arg)
1668 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1671 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1673 struct client *client = file->private_data;
1675 int page_count, ret;
1677 if (fw_device_is_shutdown(client->device))
1680 /* FIXME: We could support multiple buffers, but we don't. */
1681 if (client->buffer.pages != NULL)
1684 if (!(vma->vm_flags & VM_SHARED))
1687 if (vma->vm_start & ~PAGE_MASK)
1690 client->vm_start = vma->vm_start;
1691 size = vma->vm_end - vma->vm_start;
1692 page_count = size >> PAGE_SHIFT;
1693 if (size & ~PAGE_MASK)
1696 ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1700 spin_lock_irq(&client->lock);
1701 if (client->iso_context) {
1702 ret = fw_iso_buffer_map_dma(&client->buffer,
1703 client->device->card,
1704 iso_dma_direction(client->iso_context));
1705 client->buffer_is_mapped = (ret == 0);
1707 spin_unlock_irq(&client->lock);
1711 ret = vm_map_pages_zero(vma, client->buffer.pages,
1712 client->buffer.page_count);
1718 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1722 static int is_outbound_transaction_resource(int id, void *p, void *data)
1724 struct client_resource *resource = p;
1726 return resource->release == release_transaction;
1729 static int has_outbound_transactions(struct client *client)
1733 spin_lock_irq(&client->lock);
1734 ret = idr_for_each(&client->resource_idr,
1735 is_outbound_transaction_resource, NULL);
1736 spin_unlock_irq(&client->lock);
1741 static int shutdown_resource(int id, void *p, void *data)
1743 struct client_resource *resource = p;
1744 struct client *client = data;
1746 resource->release(client, resource);
1752 static int fw_device_op_release(struct inode *inode, struct file *file)
1754 struct client *client = file->private_data;
1755 struct event *event, *next_event;
1757 spin_lock_irq(&client->device->card->lock);
1758 list_del(&client->phy_receiver_link);
1759 spin_unlock_irq(&client->device->card->lock);
1761 mutex_lock(&client->device->client_list_mutex);
1762 list_del(&client->link);
1763 mutex_unlock(&client->device->client_list_mutex);
1765 if (client->iso_context)
1766 fw_iso_context_destroy(client->iso_context);
1768 if (client->buffer.pages)
1769 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1771 /* Freeze client->resource_idr and client->event_list */
1772 spin_lock_irq(&client->lock);
1773 client->in_shutdown = true;
1774 spin_unlock_irq(&client->lock);
1776 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1778 idr_for_each(&client->resource_idr, shutdown_resource, client);
1779 idr_destroy(&client->resource_idr);
1781 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1789 static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
1791 struct client *client = file->private_data;
1794 poll_wait(file, &client->wait, pt);
1796 if (fw_device_is_shutdown(client->device))
1797 mask |= EPOLLHUP | EPOLLERR;
1798 if (!list_empty(&client->event_list))
1799 mask |= EPOLLIN | EPOLLRDNORM;
1804 const struct file_operations fw_device_ops = {
1805 .owner = THIS_MODULE,
1806 .llseek = no_llseek,
1807 .open = fw_device_op_open,
1808 .read = fw_device_op_read,
1809 .unlocked_ioctl = fw_device_op_ioctl,
1810 .mmap = fw_device_op_mmap,
1811 .release = fw_device_op_release,
1812 .poll = fw_device_op_poll,
1813 .compat_ioctl = compat_ptr_ioctl,