2 * cec-api.c - HDMI Consumer Electronics Control framework - API
4 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/kmod.h>
25 #include <linux/ktime.h>
26 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/uaccess.h>
31 #include <linux/version.h>
35 static inline struct cec_devnode *cec_devnode_data(struct file *filp)
37 struct cec_fh *fh = filp->private_data;
39 return &fh->adap->devnode;
42 /* CEC file operations */
44 static unsigned int cec_poll(struct file *filp,
45 struct poll_table_struct *poll)
47 struct cec_devnode *devnode = cec_devnode_data(filp);
48 struct cec_fh *fh = filp->private_data;
49 struct cec_adapter *adap = fh->adap;
52 if (!devnode->registered)
53 return POLLERR | POLLHUP;
54 mutex_lock(&adap->lock);
55 if (adap->is_configured &&
56 adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
57 res |= POLLOUT | POLLWRNORM;
59 res |= POLLIN | POLLRDNORM;
60 if (fh->pending_events)
62 poll_wait(filp, &fh->wait, poll);
63 mutex_unlock(&adap->lock);
67 static bool cec_is_busy(const struct cec_adapter *adap,
68 const struct cec_fh *fh)
70 bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
71 bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
74 * Exclusive initiators and followers can always access the CEC adapter
76 if (valid_initiator || valid_follower)
79 * All others can only access the CEC adapter if there is no
80 * exclusive initiator and they are in INITIATOR mode.
82 return adap->cec_initiator ||
83 fh->mode_initiator == CEC_MODE_NO_INITIATOR;
86 static long cec_adap_g_caps(struct cec_adapter *adap,
87 struct cec_caps __user *parg)
89 struct cec_caps caps = {};
91 strlcpy(caps.driver, adap->devnode.parent->driver->name,
93 strlcpy(caps.name, adap->name, sizeof(caps.name));
94 caps.available_log_addrs = adap->available_log_addrs;
95 caps.capabilities = adap->capabilities;
96 caps.version = LINUX_VERSION_CODE;
97 if (copy_to_user(parg, &caps, sizeof(caps)))
102 static long cec_adap_g_phys_addr(struct cec_adapter *adap,
107 mutex_lock(&adap->lock);
108 phys_addr = adap->phys_addr;
109 mutex_unlock(&adap->lock);
110 if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
115 static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
116 bool block, __u16 __user *parg)
121 if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
123 if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
126 err = cec_phys_addr_validate(phys_addr, NULL, NULL);
129 mutex_lock(&adap->lock);
130 if (cec_is_busy(adap, fh))
133 __cec_s_phys_addr(adap, phys_addr, block);
134 mutex_unlock(&adap->lock);
138 static long cec_adap_g_log_addrs(struct cec_adapter *adap,
139 struct cec_log_addrs __user *parg)
141 struct cec_log_addrs log_addrs;
143 mutex_lock(&adap->lock);
145 * We use memcpy here instead of assignment since there is a
146 * hole at the end of struct cec_log_addrs that an assignment
147 * might ignore. So when we do copy_to_user() we could leak
148 * one byte of memory.
150 memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
151 if (!adap->is_configured)
152 memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
153 sizeof(log_addrs.log_addr));
154 mutex_unlock(&adap->lock);
156 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
161 static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
162 bool block, struct cec_log_addrs __user *parg)
164 struct cec_log_addrs log_addrs;
167 if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
169 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
171 log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
172 mutex_lock(&adap->lock);
173 if (!adap->is_configuring &&
174 (!log_addrs.num_log_addrs || !adap->is_configured) &&
175 !cec_is_busy(adap, fh)) {
176 err = __cec_s_log_addrs(adap, &log_addrs, block);
178 log_addrs = adap->log_addrs;
180 mutex_unlock(&adap->lock);
183 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
188 static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
189 bool block, struct cec_msg __user *parg)
191 struct cec_msg msg = {};
194 if (!(adap->capabilities & CEC_CAP_TRANSMIT))
196 if (copy_from_user(&msg, parg, sizeof(msg)))
198 mutex_lock(&adap->lock);
199 if (!adap->is_configured)
201 else if (cec_is_busy(adap, fh))
204 err = cec_transmit_msg_fh(adap, &msg, fh, block);
205 mutex_unlock(&adap->lock);
208 if (copy_to_user(parg, &msg, sizeof(msg)))
213 /* Called by CEC_RECEIVE: wait for a message to arrive */
214 static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
216 u32 timeout = msg->timeout;
220 mutex_lock(&fh->lock);
221 /* Are there received messages queued up? */
222 if (fh->queued_msgs) {
223 /* Yes, return the first one */
224 struct cec_msg_entry *entry =
225 list_first_entry(&fh->msgs,
226 struct cec_msg_entry, list);
228 list_del(&entry->list);
232 mutex_unlock(&fh->lock);
233 /* restore original timeout value */
234 msg->timeout = timeout;
238 /* No, return EAGAIN in non-blocking mode or wait */
239 mutex_unlock(&fh->lock);
241 /* Return when in non-blocking mode */
246 /* The user specified a timeout */
247 res = wait_event_interruptible_timeout(fh->wait,
249 msecs_to_jiffies(msg->timeout));
255 /* Wait indefinitely */
256 res = wait_event_interruptible(fh->wait,
259 /* Exit on error, otherwise loop to get the new message */
264 static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
265 bool block, struct cec_msg __user *parg)
267 struct cec_msg msg = {};
270 if (copy_from_user(&msg, parg, sizeof(msg)))
272 mutex_lock(&adap->lock);
273 if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
275 mutex_unlock(&adap->lock);
279 err = cec_receive_msg(fh, &msg, block);
282 if (copy_to_user(parg, &msg, sizeof(msg)))
287 static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
288 bool block, struct cec_event __user *parg)
290 struct cec_event *ev = NULL;
295 mutex_lock(&fh->lock);
296 while (!fh->pending_events && block) {
297 mutex_unlock(&fh->lock);
298 err = wait_event_interruptible(fh->wait, fh->pending_events);
301 mutex_lock(&fh->lock);
304 /* Find the oldest event */
305 for (i = 0; i < CEC_NUM_EVENTS; i++) {
306 if (fh->pending_events & (1 << (i + 1)) &&
307 fh->events[i].ts <= ts) {
317 if (copy_to_user(parg, ev, sizeof(*ev))) {
322 fh->pending_events &= ~(1 << ev->event);
325 mutex_unlock(&fh->lock);
329 static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
332 u32 mode = fh->mode_initiator | fh->mode_follower;
334 if (copy_to_user(parg, &mode, sizeof(mode)))
339 static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
347 if (copy_from_user(&mode, parg, sizeof(mode)))
349 if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK))
352 mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
353 mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
355 if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
356 mode_follower > CEC_MODE_MONITOR_ALL)
359 if (mode_follower == CEC_MODE_MONITOR_ALL &&
360 !(adap->capabilities & CEC_CAP_MONITOR_ALL))
363 /* Follower modes should always be able to send CEC messages */
364 if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
365 !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
366 mode_follower >= CEC_MODE_FOLLOWER &&
367 mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU)
370 /* Monitor modes require CEC_MODE_NO_INITIATOR */
371 if (mode_initiator && mode_follower >= CEC_MODE_MONITOR)
374 /* Monitor modes require CAP_NET_ADMIN */
375 if (mode_follower >= CEC_MODE_MONITOR && !capable(CAP_NET_ADMIN))
378 mutex_lock(&adap->lock);
380 * You can't become exclusive follower if someone else already
383 if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
384 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
385 adap->cec_follower && adap->cec_follower != fh)
388 * You can't become exclusive initiator if someone else already
391 if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
392 adap->cec_initiator && adap->cec_initiator != fh)
396 bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
397 bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
399 if (old_mon_all != new_mon_all) {
401 err = cec_monitor_all_cnt_inc(adap);
403 cec_monitor_all_cnt_dec(adap);
408 mutex_unlock(&adap->lock);
412 if (fh->mode_follower == CEC_MODE_FOLLOWER)
413 adap->follower_cnt--;
414 if (mode_follower == CEC_MODE_FOLLOWER)
415 adap->follower_cnt++;
416 if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
417 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
419 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
420 adap->cec_follower = fh;
421 } else if (adap->cec_follower == fh) {
422 adap->passthrough = false;
423 adap->cec_follower = NULL;
425 if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
426 adap->cec_initiator = fh;
427 else if (adap->cec_initiator == fh)
428 adap->cec_initiator = NULL;
429 fh->mode_initiator = mode_initiator;
430 fh->mode_follower = mode_follower;
431 mutex_unlock(&adap->lock);
435 static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
437 struct cec_devnode *devnode = cec_devnode_data(filp);
438 struct cec_fh *fh = filp->private_data;
439 struct cec_adapter *adap = fh->adap;
440 bool block = !(filp->f_flags & O_NONBLOCK);
441 void __user *parg = (void __user *)arg;
443 if (!devnode->registered)
447 case CEC_ADAP_G_CAPS:
448 return cec_adap_g_caps(adap, parg);
450 case CEC_ADAP_G_PHYS_ADDR:
451 return cec_adap_g_phys_addr(adap, parg);
453 case CEC_ADAP_S_PHYS_ADDR:
454 return cec_adap_s_phys_addr(adap, fh, block, parg);
456 case CEC_ADAP_G_LOG_ADDRS:
457 return cec_adap_g_log_addrs(adap, parg);
459 case CEC_ADAP_S_LOG_ADDRS:
460 return cec_adap_s_log_addrs(adap, fh, block, parg);
463 return cec_transmit(adap, fh, block, parg);
466 return cec_receive(adap, fh, block, parg);
469 return cec_dqevent(adap, fh, block, parg);
472 return cec_g_mode(adap, fh, parg);
475 return cec_s_mode(adap, fh, parg);
482 static int cec_open(struct inode *inode, struct file *filp)
484 struct cec_devnode *devnode =
485 container_of(inode->i_cdev, struct cec_devnode, cdev);
486 struct cec_adapter *adap = to_cec_adapter(devnode);
487 struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
489 * Initial events that are automatically sent when the cec device is
492 struct cec_event ev_state = {
493 .event = CEC_EVENT_STATE_CHANGE,
494 .flags = CEC_EVENT_FL_INITIAL_STATE,
501 INIT_LIST_HEAD(&fh->msgs);
502 INIT_LIST_HEAD(&fh->xfer_list);
503 mutex_init(&fh->lock);
504 init_waitqueue_head(&fh->wait);
506 fh->mode_initiator = CEC_MODE_INITIATOR;
509 err = cec_get_device(devnode);
515 filp->private_data = fh;
517 mutex_lock(&devnode->lock);
518 /* Queue up initial state events */
519 ev_state.state_change.phys_addr = adap->phys_addr;
520 ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
521 cec_queue_event_fh(fh, &ev_state, 0);
523 list_add(&fh->list, &devnode->fhs);
524 mutex_unlock(&devnode->lock);
529 /* Override for the release function */
530 static int cec_release(struct inode *inode, struct file *filp)
532 struct cec_devnode *devnode = cec_devnode_data(filp);
533 struct cec_adapter *adap = to_cec_adapter(devnode);
534 struct cec_fh *fh = filp->private_data;
536 mutex_lock(&adap->lock);
537 if (adap->cec_initiator == fh)
538 adap->cec_initiator = NULL;
539 if (adap->cec_follower == fh) {
540 adap->cec_follower = NULL;
541 adap->passthrough = false;
543 if (fh->mode_follower == CEC_MODE_FOLLOWER)
544 adap->follower_cnt--;
545 if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
546 cec_monitor_all_cnt_dec(adap);
547 mutex_unlock(&adap->lock);
549 mutex_lock(&devnode->lock);
551 mutex_unlock(&devnode->lock);
553 /* Unhook pending transmits from this filehandle. */
554 mutex_lock(&adap->lock);
555 while (!list_empty(&fh->xfer_list)) {
556 struct cec_data *data =
557 list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
559 data->blocking = false;
561 list_del(&data->xfer_list);
563 mutex_unlock(&adap->lock);
564 while (!list_empty(&fh->msgs)) {
565 struct cec_msg_entry *entry =
566 list_first_entry(&fh->msgs, struct cec_msg_entry, list);
568 list_del(&entry->list);
573 cec_put_device(devnode);
574 filp->private_data = NULL;
578 const struct file_operations cec_devnode_fops = {
579 .owner = THIS_MODULE,
581 .unlocked_ioctl = cec_ioctl,
582 .release = cec_release,