2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/cpu.h>
22 #include <linux/module.h>
23 #include <linux/err.h>
24 #include <linux/slab.h>
25 #include <linux/numa.h>
26 #include <asm/uv/uv_hub.h>
27 #include <asm/uv/bios.h>
28 #include <asm/uv/uv_irq.h>
29 #include "../sgi-gru/gru.h"
30 #include "../sgi-gru/grukservices.h"
33 static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
35 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
36 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
37 XPC_ACTIVATE_MSG_SIZE_UV)
38 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
40 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
41 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
42 XPC_NOTIFY_MSG_SIZE_UV)
43 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
45 static int xpc_mq_node = NUMA_NO_NODE;
47 static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
48 static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
51 xpc_setup_partitions_uv(void)
54 struct xpc_partition_uv *part_uv;
56 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
57 part_uv = &xpc_partitions[partid].sn.uv;
59 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
60 spin_lock_init(&part_uv->flags_lock);
61 part_uv->remote_act_state = XPC_P_AS_INACTIVE;
67 xpc_teardown_partitions_uv(void)
70 struct xpc_partition_uv *part_uv;
71 unsigned long irq_flags;
73 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
74 part_uv = &xpc_partitions[partid].sn.uv;
76 if (part_uv->cached_activate_gru_mq_desc != NULL) {
77 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
78 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
79 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
80 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
81 kfree(part_uv->cached_activate_gru_mq_desc);
82 part_uv->cached_activate_gru_mq_desc = NULL;
83 mutex_unlock(&part_uv->
84 cached_activate_gru_mq_desc_mutex);
90 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
92 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
94 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
99 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
105 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
107 uv_teardown_irq(mq->irq);
111 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
115 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
116 mq->order, &mq->mmr_offset);
118 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
123 mq->watchlist_num = ret;
128 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
131 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
133 ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
134 BUG_ON(ret != BIOS_STATUS_SUCCESS);
137 static struct xpc_gru_mq_uv *
138 xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
139 irq_handler_t irq_handler)
141 enum xp_retval xp_ret;
147 struct xpc_gru_mq_uv *mq;
148 struct uv_IO_APIC_route_entry *mmr_value;
150 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
152 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
153 "a xpc_gru_mq_uv structure\n");
158 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
160 if (mq->gru_mq_desc == NULL) {
161 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
162 "a gru_message_queue_desc structure\n");
167 pg_order = get_order(mq_size);
168 mq->order = pg_order + PAGE_SHIFT;
169 mq_size = 1UL << mq->order;
171 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
173 nid = cpu_to_node(cpu);
174 page = __alloc_pages_node(nid,
175 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
178 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
179 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
183 mq->address = page_address(page);
185 /* enable generation of irq when GRU mq operation occurs to this mq */
186 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
190 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
194 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
196 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
201 nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
203 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
204 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
205 nasid, mmr_value->vector, mmr_value->dest);
207 dev_err(xpc_part, "gru_create_message_queue() returned "
213 /* allow other partitions to access this GRU mq */
214 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
215 if (xp_ret != xpSuccess) {
222 /* something went wrong */
224 free_irq(mq->irq, NULL);
226 xpc_release_gru_mq_irq_uv(mq);
228 xpc_gru_mq_watchlist_free_uv(mq);
230 free_pages((unsigned long)mq->address, pg_order);
232 kfree(mq->gru_mq_desc);
240 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
242 unsigned int mq_size;
246 /* disallow other partitions to access GRU mq */
247 mq_size = 1UL << mq->order;
248 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
249 BUG_ON(ret != xpSuccess);
251 /* unregister irq handler and release mq irq/vector mapping */
252 free_irq(mq->irq, NULL);
253 xpc_release_gru_mq_irq_uv(mq);
255 /* disable generation of irq when GRU mq op occurs to this mq */
256 xpc_gru_mq_watchlist_free_uv(mq);
258 pg_order = mq->order - PAGE_SHIFT;
259 free_pages((unsigned long)mq->address, pg_order);
264 static enum xp_retval
265 xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
268 enum xp_retval xp_ret;
272 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
278 if (ret == MQE_QUEUE_FULL) {
279 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
280 "error=MQE_QUEUE_FULL\n");
281 /* !!! handle QLimit reached; delay & try again */
282 /* ??? Do we add a limit to the number of retries? */
283 (void)msleep_interruptible(10);
284 } else if (ret == MQE_CONGESTION) {
285 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
286 "error=MQE_CONGESTION\n");
287 /* !!! handle LB Overflow; simply try again */
288 /* ??? Do we add a limit to the number of retries? */
290 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
291 dev_err(xpc_chan, "gru_send_message_gpa() returned "
293 xp_ret = xpGruSendMqError;
301 xpc_process_activate_IRQ_rcvd_uv(void)
303 unsigned long irq_flags;
305 struct xpc_partition *part;
308 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
310 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
311 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
312 part = &xpc_partitions[partid];
314 if (part->sn.uv.act_state_req == 0)
317 xpc_activate_IRQ_rcvd--;
318 BUG_ON(xpc_activate_IRQ_rcvd < 0);
320 act_state_req = part->sn.uv.act_state_req;
321 part->sn.uv.act_state_req = 0;
322 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
324 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
325 if (part->act_state == XPC_P_AS_INACTIVE)
326 xpc_activate_partition(part);
327 else if (part->act_state == XPC_P_AS_DEACTIVATING)
328 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
330 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
331 if (part->act_state == XPC_P_AS_INACTIVE)
332 xpc_activate_partition(part);
334 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
336 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
337 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
343 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
344 if (xpc_activate_IRQ_rcvd == 0)
347 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
352 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
353 struct xpc_activate_mq_msghdr_uv *msg_hdr,
355 int *wakeup_hb_checker)
357 unsigned long irq_flags;
358 struct xpc_partition_uv *part_uv = &part->sn.uv;
359 struct xpc_openclose_args *args;
361 part_uv->remote_act_state = msg_hdr->act_state;
363 switch (msg_hdr->type) {
364 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
365 /* syncing of remote_act_state was just done above */
368 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
369 struct xpc_activate_mq_msg_activate_req_uv *msg;
372 * ??? Do we deal here with ts_jiffies being different
373 * ??? if act_state != XPC_P_AS_INACTIVE instead of
376 msg = container_of(msg_hdr, struct
377 xpc_activate_mq_msg_activate_req_uv, hdr);
379 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
380 if (part_uv->act_state_req == 0)
381 xpc_activate_IRQ_rcvd++;
382 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
383 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
384 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
385 part_uv->heartbeat_gpa = msg->heartbeat_gpa;
387 if (msg->activate_gru_mq_desc_gpa !=
388 part_uv->activate_gru_mq_desc_gpa) {
389 spin_lock(&part_uv->flags_lock);
390 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
391 spin_unlock(&part_uv->flags_lock);
392 part_uv->activate_gru_mq_desc_gpa =
393 msg->activate_gru_mq_desc_gpa;
395 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
397 (*wakeup_hb_checker)++;
400 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
401 struct xpc_activate_mq_msg_deactivate_req_uv *msg;
403 msg = container_of(msg_hdr, struct
404 xpc_activate_mq_msg_deactivate_req_uv, hdr);
406 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
407 if (part_uv->act_state_req == 0)
408 xpc_activate_IRQ_rcvd++;
409 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
410 part_uv->reason = msg->reason;
411 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
413 (*wakeup_hb_checker)++;
416 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
417 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
422 msg = container_of(msg_hdr, struct
423 xpc_activate_mq_msg_chctl_closerequest_uv,
425 args = &part->remote_openclose_args[msg->ch_number];
426 args->reason = msg->reason;
428 spin_lock_irqsave(&part->chctl_lock, irq_flags);
429 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
430 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
432 xpc_wakeup_channel_mgr(part);
435 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
436 struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
441 msg = container_of(msg_hdr, struct
442 xpc_activate_mq_msg_chctl_closereply_uv,
445 spin_lock_irqsave(&part->chctl_lock, irq_flags);
446 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
447 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
449 xpc_wakeup_channel_mgr(part);
452 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
453 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
458 msg = container_of(msg_hdr, struct
459 xpc_activate_mq_msg_chctl_openrequest_uv,
461 args = &part->remote_openclose_args[msg->ch_number];
462 args->entry_size = msg->entry_size;
463 args->local_nentries = msg->local_nentries;
465 spin_lock_irqsave(&part->chctl_lock, irq_flags);
466 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
467 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
469 xpc_wakeup_channel_mgr(part);
472 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
473 struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
478 msg = container_of(msg_hdr, struct
479 xpc_activate_mq_msg_chctl_openreply_uv, hdr);
480 args = &part->remote_openclose_args[msg->ch_number];
481 args->remote_nentries = msg->remote_nentries;
482 args->local_nentries = msg->local_nentries;
483 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
485 spin_lock_irqsave(&part->chctl_lock, irq_flags);
486 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
487 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
489 xpc_wakeup_channel_mgr(part);
492 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
493 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
498 msg = container_of(msg_hdr, struct
499 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
500 spin_lock_irqsave(&part->chctl_lock, irq_flags);
501 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
502 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
504 xpc_wakeup_channel_mgr(part);
507 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
508 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
509 part_uv->flags |= XPC_P_ENGAGED_UV;
510 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
513 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
514 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
515 part_uv->flags &= ~XPC_P_ENGAGED_UV;
516 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
520 dev_err(xpc_part, "received unknown activate_mq msg type=%d "
521 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
523 /* get hb checker to deactivate from the remote partition */
524 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
525 if (part_uv->act_state_req == 0)
526 xpc_activate_IRQ_rcvd++;
527 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
528 part_uv->reason = xpBadMsgType;
529 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
531 (*wakeup_hb_checker)++;
535 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
536 part->remote_rp_ts_jiffies != 0) {
538 * ??? Does what we do here need to be sensitive to
539 * ??? act_state or remote_act_state?
541 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
542 if (part_uv->act_state_req == 0)
543 xpc_activate_IRQ_rcvd++;
544 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
545 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
547 (*wakeup_hb_checker)++;
552 xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
554 struct xpc_activate_mq_msghdr_uv *msg_hdr;
556 struct xpc_partition *part;
557 int wakeup_hb_checker = 0;
561 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
565 partid = msg_hdr->partid;
566 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
567 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
568 "received invalid partid=0x%x in message\n",
571 part = &xpc_partitions[partid];
573 part_referenced = xpc_part_ref(part);
574 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
578 xpc_part_deref(part);
581 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
584 if (wakeup_hb_checker)
585 wake_up_interruptible(&xpc_activate_IRQ_wq);
590 static enum xp_retval
591 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
592 unsigned long gru_mq_desc_gpa)
596 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
597 sizeof(struct gru_message_queue_desc));
598 if (ret == xpSuccess)
599 gru_mq_desc->mq = NULL;
604 static enum xp_retval
605 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
608 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
609 struct xpc_partition_uv *part_uv = &part->sn.uv;
610 struct gru_message_queue_desc *gru_mq_desc;
611 unsigned long irq_flags;
614 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
616 msg_hdr->type = msg_type;
617 msg_hdr->partid = xp_partition_id;
618 msg_hdr->act_state = part->act_state;
619 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
621 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
623 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
624 gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
625 if (gru_mq_desc == NULL) {
626 gru_mq_desc = kmalloc(sizeof(struct
627 gru_message_queue_desc),
629 if (gru_mq_desc == NULL) {
633 part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
636 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
638 activate_gru_mq_desc_gpa);
639 if (ret != xpSuccess)
642 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
643 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
644 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
647 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
648 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
650 if (ret != xpSuccess) {
651 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
652 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
656 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
661 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
662 size_t msg_size, int msg_type)
666 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
667 if (unlikely(ret != xpSuccess))
668 XPC_DEACTIVATE_PARTITION(part, ret);
672 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
673 void *msg, size_t msg_size, int msg_type)
675 struct xpc_partition *part = &xpc_partitions[ch->partid];
678 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
679 if (unlikely(ret != xpSuccess)) {
680 if (irq_flags != NULL)
681 spin_unlock_irqrestore(&ch->lock, *irq_flags);
683 XPC_DEACTIVATE_PARTITION(part, ret);
685 if (irq_flags != NULL)
686 spin_lock_irqsave(&ch->lock, *irq_flags);
691 xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
693 unsigned long irq_flags;
694 struct xpc_partition_uv *part_uv = &part->sn.uv;
697 * !!! Make our side think that the remote partition sent an activate
698 * !!! mq message our way by doing what the activate IRQ handler would
699 * !!! do had one really been sent.
702 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
703 if (part_uv->act_state_req == 0)
704 xpc_activate_IRQ_rcvd++;
705 part_uv->act_state_req = act_state_req;
706 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
708 wake_up_interruptible(&xpc_activate_IRQ_wq);
711 static enum xp_retval
712 xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
718 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
720 if (status == BIOS_STATUS_SUCCESS)
722 else if (status == BIOS_STATUS_MORE_PASSES)
723 ret = xpNeedMoreInfo;
731 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
734 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
735 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
736 rp->sn.uv.activate_gru_mq_desc_gpa =
737 uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
742 xpc_allow_hb_uv(short partid)
747 xpc_disallow_hb_uv(short partid)
752 xpc_disallow_all_hbs_uv(void)
757 xpc_increment_heartbeat_uv(void)
759 xpc_heartbeat_uv->value++;
763 xpc_offline_heartbeat_uv(void)
765 xpc_increment_heartbeat_uv();
766 xpc_heartbeat_uv->offline = 1;
770 xpc_online_heartbeat_uv(void)
772 xpc_increment_heartbeat_uv();
773 xpc_heartbeat_uv->offline = 0;
777 xpc_heartbeat_init_uv(void)
779 xpc_heartbeat_uv->value = 1;
780 xpc_heartbeat_uv->offline = 0;
784 xpc_heartbeat_exit_uv(void)
786 xpc_offline_heartbeat_uv();
789 static enum xp_retval
790 xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
792 struct xpc_partition_uv *part_uv = &part->sn.uv;
795 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
796 part_uv->heartbeat_gpa,
797 sizeof(struct xpc_heartbeat_uv));
798 if (ret != xpSuccess)
801 if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
802 !part_uv->cached_heartbeat.offline) {
806 part->last_heartbeat = part_uv->cached_heartbeat.value;
812 xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
813 unsigned long remote_rp_gpa, int nasid)
815 short partid = remote_rp->SAL_partid;
816 struct xpc_partition *part = &xpc_partitions[partid];
817 struct xpc_activate_mq_msg_activate_req_uv msg;
819 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
820 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
821 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
822 part->sn.uv.activate_gru_mq_desc_gpa =
823 remote_rp->sn.uv.activate_gru_mq_desc_gpa;
826 * ??? Is it a good idea to make this conditional on what is
827 * ??? potentially stale state information?
829 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
830 msg.rp_gpa = uv_gpa(xpc_rsvd_page);
831 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
832 msg.activate_gru_mq_desc_gpa =
833 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
834 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
835 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
838 if (part->act_state == XPC_P_AS_INACTIVE)
839 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
843 xpc_request_partition_reactivation_uv(struct xpc_partition *part)
845 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
849 xpc_request_partition_deactivation_uv(struct xpc_partition *part)
851 struct xpc_activate_mq_msg_deactivate_req_uv msg;
854 * ??? Is it a good idea to make this conditional on what is
855 * ??? potentially stale state information?
857 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
858 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
860 msg.reason = part->reason;
861 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
862 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
867 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
869 /* nothing needs to be done */
874 xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
878 spin_lock_init(&head->lock);
883 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
885 unsigned long irq_flags;
886 struct xpc_fifo_entry_uv *first;
888 spin_lock_irqsave(&head->lock, irq_flags);
890 if (head->first != NULL) {
891 head->first = first->next;
892 if (head->first == NULL)
896 BUG_ON(head->n_entries < 0);
900 spin_unlock_irqrestore(&head->lock, irq_flags);
905 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
906 struct xpc_fifo_entry_uv *last)
908 unsigned long irq_flags;
911 spin_lock_irqsave(&head->lock, irq_flags);
912 if (head->last != NULL)
913 head->last->next = last;
918 spin_unlock_irqrestore(&head->lock, irq_flags);
922 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
924 return head->n_entries;
928 * Setup the channel structures that are uv specific.
930 static enum xp_retval
931 xpc_setup_ch_structures_uv(struct xpc_partition *part)
933 struct xpc_channel_uv *ch_uv;
936 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
937 ch_uv = &part->channels[ch_number].sn.uv;
939 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
940 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
947 * Teardown the channel structures that are uv specific.
950 xpc_teardown_ch_structures_uv(struct xpc_partition *part)
952 /* nothing needs to be done */
956 static enum xp_retval
957 xpc_make_first_contact_uv(struct xpc_partition *part)
959 struct xpc_activate_mq_msg_uv msg;
962 * We send a sync msg to get the remote partition's remote_act_state
963 * updated to our current act_state which at this point should
964 * be XPC_P_AS_ACTIVATING.
966 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
967 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
969 while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
970 (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
972 dev_dbg(xpc_part, "waiting to make first contact with "
973 "partition %d\n", XPC_PARTID(part));
975 /* wait a 1/4 of a second or so */
976 (void)msleep_interruptible(250);
978 if (part->act_state == XPC_P_AS_DEACTIVATING)
986 xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
988 unsigned long irq_flags;
989 union xpc_channel_ctl_flags chctl;
991 spin_lock_irqsave(&part->chctl_lock, irq_flags);
993 if (chctl.all_flags != 0)
994 part->chctl.all_flags = 0;
996 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
997 return chctl.all_flags;
1000 static enum xp_retval
1001 xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
1003 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1004 struct xpc_send_msg_slot_uv *msg_slot;
1005 unsigned long irq_flags;
1010 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1011 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
1012 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1013 if (ch_uv->send_msg_slots == NULL)
1016 for (entry = 0; entry < nentries; entry++) {
1017 msg_slot = &ch_uv->send_msg_slots[entry];
1019 msg_slot->msg_slot_number = entry;
1020 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
1024 spin_lock_irqsave(&ch->lock, irq_flags);
1025 if (nentries < ch->local_nentries)
1026 ch->local_nentries = nentries;
1027 spin_unlock_irqrestore(&ch->lock, irq_flags);
1034 static enum xp_retval
1035 xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
1037 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1038 struct xpc_notify_mq_msg_uv *msg_slot;
1039 unsigned long irq_flags;
1044 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1045 nbytes = nentries * ch->entry_size;
1046 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1047 if (ch_uv->recv_msg_slots == NULL)
1050 for (entry = 0; entry < nentries; entry++) {
1051 msg_slot = ch_uv->recv_msg_slots +
1052 entry * ch->entry_size;
1054 msg_slot->hdr.msg_slot_number = entry;
1057 spin_lock_irqsave(&ch->lock, irq_flags);
1058 if (nentries < ch->remote_nentries)
1059 ch->remote_nentries = nentries;
1060 spin_unlock_irqrestore(&ch->lock, irq_flags);
1068 * Allocate msg_slots associated with the channel.
1070 static enum xp_retval
1071 xpc_setup_msg_structures_uv(struct xpc_channel *ch)
1073 static enum xp_retval ret;
1074 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1076 DBUG_ON(ch->flags & XPC_C_SETUP);
1078 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
1079 gru_message_queue_desc),
1081 if (ch_uv->cached_notify_gru_mq_desc == NULL)
1084 ret = xpc_allocate_send_msg_slot_uv(ch);
1085 if (ret == xpSuccess) {
1087 ret = xpc_allocate_recv_msg_slot_uv(ch);
1088 if (ret != xpSuccess) {
1089 kfree(ch_uv->send_msg_slots);
1090 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1097 * Free up msg_slots and clear other stuff that were setup for the specified
1101 xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
1103 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1105 lockdep_assert_held(&ch->lock);
1107 kfree(ch_uv->cached_notify_gru_mq_desc);
1108 ch_uv->cached_notify_gru_mq_desc = NULL;
1110 if (ch->flags & XPC_C_SETUP) {
1111 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1112 kfree(ch_uv->send_msg_slots);
1113 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1114 kfree(ch_uv->recv_msg_slots);
1119 xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1121 struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
1123 msg.ch_number = ch->number;
1124 msg.reason = ch->reason;
1125 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1126 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
1130 xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1132 struct xpc_activate_mq_msg_chctl_closereply_uv msg;
1134 msg.ch_number = ch->number;
1135 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1136 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
1140 xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1142 struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
1144 msg.ch_number = ch->number;
1145 msg.entry_size = ch->entry_size;
1146 msg.local_nentries = ch->local_nentries;
1147 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1148 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
1152 xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1154 struct xpc_activate_mq_msg_chctl_openreply_uv msg;
1156 msg.ch_number = ch->number;
1157 msg.local_nentries = ch->local_nentries;
1158 msg.remote_nentries = ch->remote_nentries;
1159 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
1160 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1161 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
1165 xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1167 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
1169 msg.ch_number = ch->number;
1170 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1171 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
1175 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
1177 unsigned long irq_flags;
1179 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1180 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
1181 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1183 xpc_wakeup_channel_mgr(part);
1186 static enum xp_retval
1187 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
1188 unsigned long gru_mq_desc_gpa)
1190 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1192 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
1193 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
1198 xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
1200 struct xpc_activate_mq_msg_uv msg;
1202 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1203 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
1207 xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
1209 struct xpc_activate_mq_msg_uv msg;
1211 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1212 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
1216 xpc_assume_partition_disengaged_uv(short partid)
1218 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
1219 unsigned long irq_flags;
1221 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
1222 part_uv->flags &= ~XPC_P_ENGAGED_UV;
1223 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
1227 xpc_partition_engaged_uv(short partid)
1229 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
1233 xpc_any_partition_engaged_uv(void)
1235 struct xpc_partition_uv *part_uv;
1238 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
1239 part_uv = &xpc_partitions[partid].sn.uv;
1240 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
1246 static enum xp_retval
1247 xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
1248 struct xpc_send_msg_slot_uv **address_of_msg_slot)
1251 struct xpc_send_msg_slot_uv *msg_slot;
1252 struct xpc_fifo_entry_uv *entry;
1255 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
1259 if (flags & XPC_NOWAIT)
1262 ret = xpc_allocate_msg_wait(ch);
1263 if (ret != xpInterrupted && ret != xpTimeout)
1267 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
1268 *address_of_msg_slot = msg_slot;
1273 xpc_free_msg_slot_uv(struct xpc_channel *ch,
1274 struct xpc_send_msg_slot_uv *msg_slot)
1276 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
1278 /* wakeup anyone waiting for a free msg slot */
1279 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1280 wake_up(&ch->msg_allocate_wq);
1284 xpc_notify_sender_uv(struct xpc_channel *ch,
1285 struct xpc_send_msg_slot_uv *msg_slot,
1286 enum xp_retval reason)
1288 xpc_notify_func func = msg_slot->func;
1290 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
1292 atomic_dec(&ch->n_to_notify);
1294 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
1295 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1296 msg_slot->msg_slot_number, ch->partid, ch->number);
1298 func(reason, ch->partid, ch->number, msg_slot->key);
1300 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
1301 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1302 msg_slot->msg_slot_number, ch->partid, ch->number);
1307 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
1308 struct xpc_notify_mq_msg_uv *msg)
1310 struct xpc_send_msg_slot_uv *msg_slot;
1311 int entry = msg->hdr.msg_slot_number % ch->local_nentries;
1313 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1315 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
1316 msg_slot->msg_slot_number += ch->local_nentries;
1318 if (msg_slot->func != NULL)
1319 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
1321 xpc_free_msg_slot_uv(ch, msg_slot);
1325 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1326 struct xpc_notify_mq_msg_uv *msg)
1328 struct xpc_partition_uv *part_uv = &part->sn.uv;
1329 struct xpc_channel *ch;
1330 struct xpc_channel_uv *ch_uv;
1331 struct xpc_notify_mq_msg_uv *msg_slot;
1332 unsigned long irq_flags;
1333 int ch_number = msg->hdr.ch_number;
1335 if (unlikely(ch_number >= part->nchannels)) {
1336 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
1337 "channel number=0x%x in message from partid=%d\n",
1338 ch_number, XPC_PARTID(part));
1340 /* get hb checker to deactivate from the remote partition */
1341 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1342 if (part_uv->act_state_req == 0)
1343 xpc_activate_IRQ_rcvd++;
1344 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
1345 part_uv->reason = xpBadChannelNumber;
1346 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1348 wake_up_interruptible(&xpc_activate_IRQ_wq);
1352 ch = &part->channels[ch_number];
1353 xpc_msgqueue_ref(ch);
1355 if (!(ch->flags & XPC_C_CONNECTED)) {
1356 xpc_msgqueue_deref(ch);
1360 /* see if we're really dealing with an ACK for a previously sent msg */
1361 if (msg->hdr.size == 0) {
1362 xpc_handle_notify_mq_ack_uv(ch, msg);
1363 xpc_msgqueue_deref(ch);
1367 /* we're dealing with a normal message sent via the notify_mq */
1370 msg_slot = ch_uv->recv_msg_slots +
1371 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
1373 BUG_ON(msg_slot->hdr.size != 0);
1375 memcpy(msg_slot, msg, msg->hdr.size);
1377 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
1379 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
1381 * If there is an existing idle kthread get it to deliver
1382 * the payload, otherwise we'll have to get the channel mgr
1383 * for this partition to create a kthread to do the delivery.
1385 if (atomic_read(&ch->kthreads_idle) > 0)
1386 wake_up_nr(&ch->idle_wq, 1);
1388 xpc_send_chctl_local_msgrequest_uv(part, ch->number);
1390 xpc_msgqueue_deref(ch);
1394 xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
1396 struct xpc_notify_mq_msg_uv *msg;
1398 struct xpc_partition *part;
1400 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
1403 partid = msg->hdr.partid;
1404 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
1405 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
1406 "invalid partid=0x%x in message\n", partid);
1408 part = &xpc_partitions[partid];
1410 if (xpc_part_ref(part)) {
1411 xpc_handle_notify_mq_msg_uv(part, msg);
1412 xpc_part_deref(part);
1416 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
1423 xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
1425 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
1429 xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
1431 struct xpc_channel *ch = &part->channels[ch_number];
1432 int ndeliverable_payloads;
1434 xpc_msgqueue_ref(ch);
1436 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
1438 if (ndeliverable_payloads > 0 &&
1439 (ch->flags & XPC_C_CONNECTED) &&
1440 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
1442 xpc_activate_kthreads(ch, ndeliverable_payloads);
1445 xpc_msgqueue_deref(ch);
1448 static enum xp_retval
1449 xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
1450 u16 payload_size, u8 notify_type, xpc_notify_func func,
1453 enum xp_retval ret = xpSuccess;
1454 struct xpc_send_msg_slot_uv *msg_slot = NULL;
1455 struct xpc_notify_mq_msg_uv *msg;
1456 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
1459 DBUG_ON(notify_type != XPC_N_CALL);
1461 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
1462 if (msg_size > ch->entry_size)
1463 return xpPayloadTooBig;
1465 xpc_msgqueue_ref(ch);
1467 if (ch->flags & XPC_C_DISCONNECTING) {
1471 if (!(ch->flags & XPC_C_CONNECTED)) {
1472 ret = xpNotConnected;
1476 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
1477 if (ret != xpSuccess)
1481 atomic_inc(&ch->n_to_notify);
1483 msg_slot->key = key;
1484 smp_wmb(); /* a non-NULL func must hit memory after the key */
1485 msg_slot->func = func;
1487 if (ch->flags & XPC_C_DISCONNECTING) {
1493 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
1494 msg->hdr.partid = xp_partition_id;
1495 msg->hdr.ch_number = ch->number;
1496 msg->hdr.size = msg_size;
1497 msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
1498 memcpy(&msg->payload, payload, payload_size);
1500 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1502 if (ret == xpSuccess)
1505 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1509 * Try to NULL the msg_slot's func field. If we fail, then
1510 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1511 * case we need to pretend we succeeded to send the message
1512 * since the user will get a callout for the disconnect error
1513 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1514 * error returned here will confuse them. Additionally, since
1515 * in this case the channel is being disconnected we don't need
1516 * to put the msg_slot back on the free list.
1518 if (cmpxchg(&msg_slot->func, func, NULL) != func) {
1523 msg_slot->key = NULL;
1524 atomic_dec(&ch->n_to_notify);
1526 xpc_free_msg_slot_uv(ch, msg_slot);
1528 xpc_msgqueue_deref(ch);
1533 * Tell the callers of xpc_send_notify() that the status of their payloads
1534 * is unknown because the channel is now disconnecting.
1536 * We don't worry about putting these msg_slots on the free list since the
1537 * msg_slots themselves are about to be kfree'd.
1540 xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
1542 struct xpc_send_msg_slot_uv *msg_slot;
1545 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
1547 for (entry = 0; entry < ch->local_nentries; entry++) {
1549 if (atomic_read(&ch->n_to_notify) == 0)
1552 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1553 if (msg_slot->func != NULL)
1554 xpc_notify_sender_uv(ch, msg_slot, ch->reason);
1559 * Get the next deliverable message's payload.
1562 xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
1564 struct xpc_fifo_entry_uv *entry;
1565 struct xpc_notify_mq_msg_uv *msg;
1566 void *payload = NULL;
1568 if (!(ch->flags & XPC_C_DISCONNECTING)) {
1569 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
1570 if (entry != NULL) {
1571 msg = container_of(entry, struct xpc_notify_mq_msg_uv,
1573 payload = &msg->payload;
1580 xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1582 struct xpc_notify_mq_msg_uv *msg;
1585 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
1587 /* return an ACK to the sender of this message */
1589 msg->hdr.partid = xp_partition_id;
1590 msg->hdr.size = 0; /* size of zero indicates this is an ACK */
1592 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1593 sizeof(struct xpc_notify_mq_msghdr_uv));
1594 if (ret != xpSuccess)
1595 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1598 static const struct xpc_arch_operations xpc_arch_ops_uv = {
1599 .setup_partitions = xpc_setup_partitions_uv,
1600 .teardown_partitions = xpc_teardown_partitions_uv,
1601 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
1602 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
1603 .setup_rsvd_page = xpc_setup_rsvd_page_uv,
1605 .allow_hb = xpc_allow_hb_uv,
1606 .disallow_hb = xpc_disallow_hb_uv,
1607 .disallow_all_hbs = xpc_disallow_all_hbs_uv,
1608 .increment_heartbeat = xpc_increment_heartbeat_uv,
1609 .offline_heartbeat = xpc_offline_heartbeat_uv,
1610 .online_heartbeat = xpc_online_heartbeat_uv,
1611 .heartbeat_init = xpc_heartbeat_init_uv,
1612 .heartbeat_exit = xpc_heartbeat_exit_uv,
1613 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
1615 .request_partition_activation =
1616 xpc_request_partition_activation_uv,
1617 .request_partition_reactivation =
1618 xpc_request_partition_reactivation_uv,
1619 .request_partition_deactivation =
1620 xpc_request_partition_deactivation_uv,
1621 .cancel_partition_deactivation_request =
1622 xpc_cancel_partition_deactivation_request_uv,
1624 .setup_ch_structures = xpc_setup_ch_structures_uv,
1625 .teardown_ch_structures = xpc_teardown_ch_structures_uv,
1627 .make_first_contact = xpc_make_first_contact_uv,
1629 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
1630 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
1631 .send_chctl_closereply = xpc_send_chctl_closereply_uv,
1632 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
1633 .send_chctl_openreply = xpc_send_chctl_openreply_uv,
1634 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
1635 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
1637 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
1639 .setup_msg_structures = xpc_setup_msg_structures_uv,
1640 .teardown_msg_structures = xpc_teardown_msg_structures_uv,
1642 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
1643 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
1644 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
1645 .partition_engaged = xpc_partition_engaged_uv,
1646 .any_partition_engaged = xpc_any_partition_engaged_uv,
1648 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
1649 .send_payload = xpc_send_payload_uv,
1650 .get_deliverable_payload = xpc_get_deliverable_payload_uv,
1651 .received_payload = xpc_received_payload_uv,
1652 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
1656 xpc_init_mq_node(int nid)
1662 for_each_cpu(cpu, cpumask_of_node(nid)) {
1663 xpc_activate_mq_uv =
1664 xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
1665 XPC_ACTIVATE_IRQ_NAME,
1666 xpc_handle_activate_IRQ_uv);
1667 if (!IS_ERR(xpc_activate_mq_uv))
1670 if (IS_ERR(xpc_activate_mq_uv)) {
1672 return PTR_ERR(xpc_activate_mq_uv);
1675 for_each_cpu(cpu, cpumask_of_node(nid)) {
1677 xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
1678 XPC_NOTIFY_IRQ_NAME,
1679 xpc_handle_notify_IRQ_uv);
1680 if (!IS_ERR(xpc_notify_mq_uv))
1683 if (IS_ERR(xpc_notify_mq_uv)) {
1684 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1686 return PTR_ERR(xpc_notify_mq_uv);
1699 xpc_arch_ops = xpc_arch_ops_uv;
1701 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
1702 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1703 XPC_MSG_HDR_MAX_SIZE);
1707 if (xpc_mq_node < 0)
1708 for_each_online_node(nid) {
1709 ret = xpc_init_mq_node(nid);
1715 ret = xpc_init_mq_node(xpc_mq_node);
1718 dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
1727 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1728 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1731 module_param(xpc_mq_node, int, 0);
1732 MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");