2 * Copyright (c) 2012, Microsoft Corporation.
5 * K. Y. Srinivasan <kys@microsoft.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
35 #include <linux/hyperv.h>
38 * We begin with definitions supporting the Dynamic Memory protocol
41 * Begin protocol definitions.
47 * Protocol versions. The low word is the minor version, the high word the major
52 * Changed to 0.1 on 2009/03/25
53 * Changes to 0.2 on 2009/05/14
54 * Changes to 0.3 on 2009/12/03
55 * Changed to 1.0 on 2011/04/05
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
63 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65 DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
67 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
68 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
69 DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
71 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
80 enum dm_message_type {
85 DM_VERSION_REQUEST = 1,
86 DM_VERSION_RESPONSE = 2,
87 DM_CAPABILITIES_REPORT = 3,
88 DM_CAPABILITIES_RESPONSE = 4,
90 DM_BALLOON_REQUEST = 6,
91 DM_BALLOON_RESPONSE = 7,
92 DM_UNBALLOON_REQUEST = 8,
93 DM_UNBALLOON_RESPONSE = 9,
94 DM_MEM_HOT_ADD_REQUEST = 10,
95 DM_MEM_HOT_ADD_RESPONSE = 11,
96 DM_VERSION_03_MAX = 11,
100 DM_INFO_MESSAGE = 12,
101 DM_VERSION_1_MAX = 12
106 * Structures defining the dynamic memory management
124 * To support guests that may have alignment
125 * limitations on hot-add, the guest can specify
126 * its alignment requirements; a value of n
127 * represents an alignment of 2^n in mega bytes.
129 __u64 hot_add_alignment:4;
135 union dm_mem_page_range {
138 * The PFN number of the first page in the range.
139 * 40 bits is the architectural limit of a PFN
144 * The number of pages in the range.
154 * The header for all dynamic memory messages:
156 * type: Type of the message.
157 * size: Size of the message in bytes; including the header.
158 * trans_id: The guest is responsible for manufacturing this ID.
168 * A generic message format for dynamic memory.
169 * Specific message formats are defined later in the file.
173 struct dm_header hdr;
174 __u8 data[]; /* enclosed message */
179 * Specific message types supporting the dynamic memory protocol.
183 * Version negotiation message. Sent from the guest to the host.
184 * The guest is free to try different versions until the host
185 * accepts the version.
187 * dm_version: The protocol version requested.
188 * is_last_attempt: If TRUE, this is the last version guest will request.
189 * reservedz: Reserved field, set to zero.
192 struct dm_version_request {
193 struct dm_header hdr;
194 union dm_version version;
195 __u32 is_last_attempt:1;
200 * Version response message; Host to Guest and indicates
201 * if the host has accepted the version sent by the guest.
203 * is_accepted: If TRUE, host has accepted the version and the guest
204 * should proceed to the next stage of the protocol. FALSE indicates that
205 * guest should re-try with a different version.
207 * reservedz: Reserved field, set to zero.
210 struct dm_version_response {
211 struct dm_header hdr;
217 * Message reporting capabilities. This is sent from the guest to the
221 struct dm_capabilities {
222 struct dm_header hdr;
225 __u64 max_page_number;
229 * Response to the capabilities message. This is sent from the host to the
230 * guest. This message notifies if the host has accepted the guest's
231 * capabilities. If the host has not accepted, the guest must shutdown
234 * is_accepted: Indicates if the host has accepted guest's capabilities.
235 * reservedz: Must be 0.
238 struct dm_capabilities_resp_msg {
239 struct dm_header hdr;
245 * This message is used to report memory pressure from the guest.
246 * This message is not part of any transaction and there is no
247 * response to this message.
249 * num_avail: Available memory in pages.
250 * num_committed: Committed memory in pages.
251 * page_file_size: The accumulated size of all page files
252 * in the system in pages.
253 * zero_free: The nunber of zero and free pages.
254 * page_file_writes: The writes to the page file in pages.
255 * io_diff: An indicator of file cache efficiency or page file activity,
256 * calculated as File Cache Page Fault Count - Page Read Count.
257 * This value is in pages.
259 * Some of these metrics are Windows specific and fortunately
260 * the algorithm on the host side that computes the guest memory
261 * pressure only uses num_committed value.
265 struct dm_header hdr;
268 __u64 page_file_size;
270 __u32 page_file_writes;
276 * Message to ask the guest to allocate memory - balloon up message.
277 * This message is sent from the host to the guest. The guest may not be
278 * able to allocate as much memory as requested.
280 * num_pages: number of pages to allocate.
284 struct dm_header hdr;
291 * Balloon response message; this message is sent from the guest
292 * to the host in response to the balloon message.
294 * reservedz: Reserved; must be set to zero.
295 * more_pages: If FALSE, this is the last message of the transaction.
296 * if TRUE there will atleast one more message from the guest.
298 * range_count: The number of ranges in the range array.
300 * range_array: An array of page ranges returned to the host.
304 struct dm_balloon_response {
305 struct dm_header hdr;
308 __u32 range_count:31;
309 union dm_mem_page_range range_array[];
313 * Un-balloon message; this message is sent from the host
314 * to the guest to give guest more memory.
316 * more_pages: If FALSE, this is the last message of the transaction.
317 * if TRUE there will atleast one more message from the guest.
319 * reservedz: Reserved; must be set to zero.
321 * range_count: The number of ranges in the range array.
323 * range_array: An array of page ranges returned to the host.
327 struct dm_unballoon_request {
328 struct dm_header hdr;
332 union dm_mem_page_range range_array[];
336 * Un-balloon response message; this message is sent from the guest
337 * to the host in response to an unballoon request.
341 struct dm_unballoon_response {
342 struct dm_header hdr;
347 * Hot add request message. Message sent from the host to the guest.
349 * mem_range: Memory range to hot add.
351 * On Linux we currently don't support this since we cannot hot add
352 * arbitrary granularity of memory.
356 struct dm_header hdr;
357 union dm_mem_page_range range;
361 * Hot add response message.
362 * This message is sent by the guest to report the status of a hot add request.
363 * If page_count is less than the requested page count, then the host should
364 * assume all further hot add requests will fail, since this indicates that
365 * the guest has hit an upper physical memory barrier.
367 * Hot adds may also fail due to low resources; in this case, the guest must
368 * not complete this message until the hot add can succeed, and the host must
369 * not send a new hot add request until the response is sent.
370 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
371 * times it fails the request.
374 * page_count: number of pages that were successfully hot added.
376 * result: result of the operation 1: success, 0: failure.
380 struct dm_hot_add_response {
381 struct dm_header hdr;
387 * Types of information sent from host to the guest.
391 INFO_TYPE_MAX_PAGE_CNT = 0,
397 * Header for the information message.
400 struct dm_info_header {
401 enum dm_info_type type;
406 * This message is sent from the host to the guest to pass
407 * some relevant information (win8 addition).
410 * info_size: size of the information blob.
411 * info: information blob.
415 struct dm_header hdr;
422 * End protocol definitions.
426 * State to manage hot adding memory into the guest.
427 * The range start_pfn : end_pfn specifies the range
428 * that the host has asked us to hot add. The range
429 * start_pfn : ha_end_pfn specifies the range that we have
430 * currently hot added. We hot add in multiples of 128M
431 * chunks; it is possible that we may not be able to bring
432 * online all the pages in the region. The range
433 * covered_start_pfn:covered_end_pfn defines the pages that can
437 struct hv_hotadd_state {
438 struct list_head list;
439 unsigned long start_pfn;
440 unsigned long covered_start_pfn;
441 unsigned long covered_end_pfn;
442 unsigned long ha_end_pfn;
443 unsigned long end_pfn;
447 struct list_head gap_list;
450 struct hv_hotadd_gap {
451 struct list_head list;
452 unsigned long start_pfn;
453 unsigned long end_pfn;
456 struct balloon_state {
458 struct work_struct wrk;
462 union dm_mem_page_range ha_page_range;
463 union dm_mem_page_range ha_region_range;
464 struct work_struct wrk;
467 static bool hot_add = true;
468 static bool do_hot_add;
470 * Delay reporting memory pressure by
471 * the specified number of seconds.
473 static uint pressure_report_delay = 45;
476 * The last time we posted a pressure report to host.
478 static unsigned long last_post_time;
480 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
481 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
483 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
484 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
485 static atomic_t trans_id = ATOMIC_INIT(0);
487 static int dm_ring_size = (5 * PAGE_SIZE);
490 * Driver specific state.
503 static __u8 recv_buffer[PAGE_SIZE];
504 static __u8 *send_buffer;
505 #define PAGES_IN_2M 512
506 #define HA_CHUNK (32 * 1024)
508 struct hv_dynmem_device {
509 struct hv_device *dev;
510 enum hv_dm_state state;
511 struct completion host_event;
512 struct completion config_event;
515 * Number of pages we have currently ballooned out.
517 unsigned int num_pages_ballooned;
518 unsigned int num_pages_onlined;
519 unsigned int num_pages_added;
522 * State to manage the ballooning (up) operation.
524 struct balloon_state balloon_wrk;
527 * State to execute the "hot-add" operation.
529 struct hot_add_wrk ha_wrk;
532 * This state tracks if the host has specified a hot-add
535 bool host_specified_ha_region;
538 * State to synchronize hot-add.
540 struct completion ol_waitevent;
543 * This thread handles hot-add
544 * requests from the host as well as notifying
545 * the host with regards to memory pressure in
548 struct task_struct *thread;
551 * Protects ha_region_list, num_pages_onlined counter and individual
552 * regions from ha_region_list.
557 * A list of hot-add regions.
559 struct list_head ha_region_list;
562 * We start with the highest version we can support
563 * and downgrade based on the host; we save here the
564 * next version to try.
569 * The negotiated version agreed by host.
574 static struct hv_dynmem_device dm_device;
576 static void post_status(struct hv_dynmem_device *dm);
578 #ifdef CONFIG_MEMORY_HOTPLUG
579 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
582 struct memory_notify *mem = (struct memory_notify *)v;
587 case MEM_CANCEL_ONLINE:
588 if (dm_device.ha_waiting) {
589 dm_device.ha_waiting = false;
590 complete(&dm_device.ol_waitevent);
595 spin_lock_irqsave(&dm_device.ha_lock, flags);
596 dm_device.num_pages_onlined -= mem->nr_pages;
597 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
599 case MEM_GOING_ONLINE:
600 case MEM_GOING_OFFLINE:
601 case MEM_CANCEL_OFFLINE:
607 static struct notifier_block hv_memory_nb = {
608 .notifier_call = hv_memory_notifier,
612 /* Check if the particular page is backed and can be onlined and online it. */
613 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
615 unsigned long cur_start_pgp;
616 unsigned long cur_end_pgp;
617 struct hv_hotadd_gap *gap;
619 cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
620 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
622 /* The page is not backed. */
623 if (((unsigned long)pg < cur_start_pgp) ||
624 ((unsigned long)pg >= cur_end_pgp))
627 /* Check for gaps. */
628 list_for_each_entry(gap, &has->gap_list, list) {
629 cur_start_pgp = (unsigned long)
630 pfn_to_page(gap->start_pfn);
631 cur_end_pgp = (unsigned long)
632 pfn_to_page(gap->end_pfn);
633 if (((unsigned long)pg >= cur_start_pgp) &&
634 ((unsigned long)pg < cur_end_pgp)) {
639 /* This frame is currently backed; online the page. */
640 __online_page_set_limits(pg);
641 __online_page_increment_counters(pg);
642 __online_page_free(pg);
644 WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
645 dm_device.num_pages_onlined++;
648 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
649 unsigned long start_pfn, unsigned long size)
653 pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
654 for (i = 0; i < size; i++)
655 hv_page_online_one(has, pfn_to_page(start_pfn + i));
658 static void hv_mem_hot_add(unsigned long start, unsigned long size,
659 unsigned long pfn_count,
660 struct hv_hotadd_state *has)
664 unsigned long start_pfn;
665 unsigned long processed_pfn;
666 unsigned long total_pfn = pfn_count;
669 for (i = 0; i < (size/HA_CHUNK); i++) {
670 start_pfn = start + (i * HA_CHUNK);
672 spin_lock_irqsave(&dm_device.ha_lock, flags);
673 has->ha_end_pfn += HA_CHUNK;
675 if (total_pfn > HA_CHUNK) {
676 processed_pfn = HA_CHUNK;
677 total_pfn -= HA_CHUNK;
679 processed_pfn = total_pfn;
683 has->covered_end_pfn += processed_pfn;
684 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
686 init_completion(&dm_device.ol_waitevent);
687 dm_device.ha_waiting = !memhp_auto_online;
689 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
690 ret = add_memory(nid, PFN_PHYS((start_pfn)),
691 (HA_CHUNK << PAGE_SHIFT));
694 pr_warn("hot_add memory failed error is %d\n", ret);
695 if (ret == -EEXIST) {
697 * This error indicates that the error
698 * is not a transient failure. This is the
699 * case where the guest's physical address map
700 * precludes hot adding memory. Stop all further
705 spin_lock_irqsave(&dm_device.ha_lock, flags);
706 has->ha_end_pfn -= HA_CHUNK;
707 has->covered_end_pfn -= processed_pfn;
708 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
713 * Wait for the memory block to be onlined when memory onlining
714 * is done outside of kernel (memhp_auto_online). Since the hot
715 * add has succeeded, it is ok to proceed even if the pages in
716 * the hot added region have not been "onlined" within the
719 if (dm_device.ha_waiting)
720 wait_for_completion_timeout(&dm_device.ol_waitevent,
722 post_status(&dm_device);
726 static void hv_online_page(struct page *pg)
728 struct hv_hotadd_state *has;
729 unsigned long cur_start_pgp;
730 unsigned long cur_end_pgp;
733 spin_lock_irqsave(&dm_device.ha_lock, flags);
734 list_for_each_entry(has, &dm_device.ha_region_list, list) {
735 cur_start_pgp = (unsigned long)
736 pfn_to_page(has->start_pfn);
737 cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
739 /* The page belongs to a different HAS. */
740 if (((unsigned long)pg < cur_start_pgp) ||
741 ((unsigned long)pg >= cur_end_pgp))
744 hv_page_online_one(has, pg);
747 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
750 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
752 struct hv_hotadd_state *has;
753 struct hv_hotadd_gap *gap;
754 unsigned long residual, new_inc;
758 spin_lock_irqsave(&dm_device.ha_lock, flags);
759 list_for_each_entry(has, &dm_device.ha_region_list, list) {
761 * If the pfn range we are dealing with is not in the current
762 * "hot add block", move on.
764 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
768 * If the current start pfn is not where the covered_end
769 * is, create a gap and update covered_end_pfn.
771 if (has->covered_end_pfn != start_pfn) {
772 gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
778 INIT_LIST_HEAD(&gap->list);
779 gap->start_pfn = has->covered_end_pfn;
780 gap->end_pfn = start_pfn;
781 list_add_tail(&gap->list, &has->gap_list);
783 has->covered_end_pfn = start_pfn;
787 * If the current hot add-request extends beyond
788 * our current limit; extend it.
790 if ((start_pfn + pfn_cnt) > has->end_pfn) {
791 residual = (start_pfn + pfn_cnt - has->end_pfn);
793 * Extend the region by multiples of HA_CHUNK.
795 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
796 if (residual % HA_CHUNK)
799 has->end_pfn += new_inc;
805 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
810 static unsigned long handle_pg_range(unsigned long pg_start,
811 unsigned long pg_count)
813 unsigned long start_pfn = pg_start;
814 unsigned long pfn_cnt = pg_count;
816 struct hv_hotadd_state *has;
817 unsigned long pgs_ol = 0;
818 unsigned long old_covered_state;
819 unsigned long res = 0, flags;
821 pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
824 spin_lock_irqsave(&dm_device.ha_lock, flags);
825 list_for_each_entry(has, &dm_device.ha_region_list, list) {
827 * If the pfn range we are dealing with is not in the current
828 * "hot add block", move on.
830 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
833 old_covered_state = has->covered_end_pfn;
835 if (start_pfn < has->ha_end_pfn) {
837 * This is the case where we are backing pages
838 * in an already hot added region. Bring
839 * these pages online first.
841 pgs_ol = has->ha_end_pfn - start_pfn;
842 if (pgs_ol > pfn_cnt)
845 has->covered_end_pfn += pgs_ol;
848 * Check if the corresponding memory block is already
849 * online. It is possible to observe struct pages still
850 * being uninitialized here so check section instead.
851 * In case the section is online we need to bring the
852 * rest of pfns (which were not backed previously)
855 if (start_pfn > has->start_pfn &&
856 online_section_nr(pfn_to_section_nr(start_pfn)))
857 hv_bring_pgs_online(has, start_pfn, pgs_ol);
861 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
863 * We have some residual hot add range
864 * that needs to be hot added; hot add
865 * it now. Hot add a multiple of
866 * of HA_CHUNK that fully covers the pages
869 size = (has->end_pfn - has->ha_end_pfn);
870 if (pfn_cnt <= size) {
871 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
872 if (pfn_cnt % HA_CHUNK)
877 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
878 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
879 spin_lock_irqsave(&dm_device.ha_lock, flags);
882 * If we managed to online any pages that were given to us,
883 * we declare success.
885 res = has->covered_end_pfn - old_covered_state;
888 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
893 static unsigned long process_hot_add(unsigned long pg_start,
894 unsigned long pfn_cnt,
895 unsigned long rg_start,
896 unsigned long rg_size)
898 struct hv_hotadd_state *ha_region = NULL;
905 if (!dm_device.host_specified_ha_region) {
906 covered = pfn_covered(pg_start, pfn_cnt);
915 * If the host has specified a hot-add range; deal with it first.
919 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
923 INIT_LIST_HEAD(&ha_region->list);
924 INIT_LIST_HEAD(&ha_region->gap_list);
926 ha_region->start_pfn = rg_start;
927 ha_region->ha_end_pfn = rg_start;
928 ha_region->covered_start_pfn = pg_start;
929 ha_region->covered_end_pfn = pg_start;
930 ha_region->end_pfn = rg_start + rg_size;
932 spin_lock_irqsave(&dm_device.ha_lock, flags);
933 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
934 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
939 * Process the page range specified; bringing them
940 * online if possible.
942 return handle_pg_range(pg_start, pfn_cnt);
947 static void hot_add_req(struct work_struct *dummy)
949 struct dm_hot_add_response resp;
950 #ifdef CONFIG_MEMORY_HOTPLUG
951 unsigned long pg_start, pfn_cnt;
952 unsigned long rg_start, rg_sz;
954 struct hv_dynmem_device *dm = &dm_device;
956 memset(&resp, 0, sizeof(struct dm_hot_add_response));
957 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
958 resp.hdr.size = sizeof(struct dm_hot_add_response);
960 #ifdef CONFIG_MEMORY_HOTPLUG
961 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
962 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
964 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
965 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
967 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
968 unsigned long region_size;
969 unsigned long region_start;
972 * The host has not specified the hot-add region.
973 * Based on the hot-add page range being specified,
974 * compute a hot-add region that can cover the pages
975 * that need to be hot-added while ensuring the alignment
976 * and size requirements of Linux as it relates to hot-add.
978 region_start = pg_start;
979 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
980 if (pfn_cnt % HA_CHUNK)
981 region_size += HA_CHUNK;
983 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
985 rg_start = region_start;
990 resp.page_count = process_hot_add(pg_start, pfn_cnt,
993 dm->num_pages_added += resp.page_count;
996 * The result field of the response structure has the
997 * following semantics:
999 * 1. If all or some pages hot-added: Guest should return success.
1001 * 2. If no pages could be hot-added:
1003 * If the guest returns success, then the host
1004 * will not attempt any further hot-add operations. This
1005 * signifies a permanent failure.
1007 * If the guest returns failure, then this failure will be
1008 * treated as a transient failure and the host may retry the
1009 * hot-add operation after some delay.
1011 if (resp.page_count > 0)
1013 else if (!do_hot_add)
1018 if (!do_hot_add || (resp.page_count == 0))
1019 pr_info("Memory hot add failed\n");
1021 dm->state = DM_INITIALIZED;
1022 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1023 vmbus_sendpacket(dm->dev->channel, &resp,
1024 sizeof(struct dm_hot_add_response),
1025 (unsigned long)NULL,
1026 VM_PKT_DATA_INBAND, 0);
1029 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1031 struct dm_info_header *info_hdr;
1033 info_hdr = (struct dm_info_header *)msg->info;
1035 switch (info_hdr->type) {
1036 case INFO_TYPE_MAX_PAGE_CNT:
1037 if (info_hdr->data_size == sizeof(__u64)) {
1038 __u64 *max_page_count = (__u64 *)&info_hdr[1];
1040 pr_info("Max. dynamic memory size: %llu MB\n",
1041 (*max_page_count) >> (20 - PAGE_SHIFT));
1046 pr_info("Received Unknown type: %d\n", info_hdr->type);
1050 static unsigned long compute_balloon_floor(void)
1052 unsigned long min_pages;
1053 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1054 /* Simple continuous piecewiese linear function:
1055 * max MiB -> min MiB gradient
1065 if (totalram_pages < MB2PAGES(128))
1066 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1067 else if (totalram_pages < MB2PAGES(512))
1068 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1069 else if (totalram_pages < MB2PAGES(2048))
1070 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1071 else if (totalram_pages < MB2PAGES(8192))
1072 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1074 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1080 * Post our status as it relates memory pressure to the
1081 * host. Host expects the guests to post this status
1082 * periodically at 1 second intervals.
1084 * The metrics specified in this protocol are very Windows
1085 * specific and so we cook up numbers here to convey our memory
1089 static void post_status(struct hv_dynmem_device *dm)
1091 struct dm_status status;
1092 unsigned long now = jiffies;
1093 unsigned long last_post = last_post_time;
1095 if (pressure_report_delay > 0) {
1096 --pressure_report_delay;
1100 if (!time_after(now, (last_post_time + HZ)))
1103 memset(&status, 0, sizeof(struct dm_status));
1104 status.hdr.type = DM_STATUS_REPORT;
1105 status.hdr.size = sizeof(struct dm_status);
1106 status.hdr.trans_id = atomic_inc_return(&trans_id);
1109 * The host expects the guest to report free and committed memory.
1110 * Furthermore, the host expects the pressure information to include
1111 * the ballooned out pages. For a given amount of memory that we are
1112 * managing we need to compute a floor below which we should not
1113 * balloon. Compute this and add it to the pressure report.
1114 * We also need to report all offline pages (num_pages_added -
1115 * num_pages_onlined) as committed to the host, otherwise it can try
1116 * asking us to balloon them out.
1118 status.num_avail = si_mem_available();
1119 status.num_committed = vm_memory_committed() +
1120 dm->num_pages_ballooned +
1121 (dm->num_pages_added > dm->num_pages_onlined ?
1122 dm->num_pages_added - dm->num_pages_onlined : 0) +
1123 compute_balloon_floor();
1126 * If our transaction ID is no longer current, just don't
1127 * send the status. This can happen if we were interrupted
1128 * after we picked our transaction ID.
1130 if (status.hdr.trans_id != atomic_read(&trans_id))
1134 * If the last post time that we sampled has changed,
1135 * we have raced, don't post the status.
1137 if (last_post != last_post_time)
1140 last_post_time = jiffies;
1141 vmbus_sendpacket(dm->dev->channel, &status,
1142 sizeof(struct dm_status),
1143 (unsigned long)NULL,
1144 VM_PKT_DATA_INBAND, 0);
1148 static void free_balloon_pages(struct hv_dynmem_device *dm,
1149 union dm_mem_page_range *range_array)
1151 int num_pages = range_array->finfo.page_cnt;
1152 __u64 start_frame = range_array->finfo.start_page;
1156 for (i = 0; i < num_pages; i++) {
1157 pg = pfn_to_page(i + start_frame);
1159 dm->num_pages_ballooned--;
1165 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1166 unsigned int num_pages,
1167 struct dm_balloon_response *bl_resp,
1173 for (i = 0; i < num_pages / alloc_unit; i++) {
1174 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1176 return i * alloc_unit;
1179 * We execute this code in a thread context. Furthermore,
1180 * we don't want the kernel to try too hard.
1182 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1183 __GFP_NOMEMALLOC | __GFP_NOWARN,
1184 get_order(alloc_unit << PAGE_SHIFT));
1187 return i * alloc_unit;
1189 dm->num_pages_ballooned += alloc_unit;
1192 * If we allocatted 2M pages; split them so we
1193 * can free them in any order we get.
1196 if (alloc_unit != 1)
1197 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1199 bl_resp->range_count++;
1200 bl_resp->range_array[i].finfo.start_page =
1202 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1203 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1207 return i * alloc_unit;
1210 static void balloon_up(struct work_struct *dummy)
1212 unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1213 unsigned int num_ballooned = 0;
1214 struct dm_balloon_response *bl_resp;
1220 unsigned long floor;
1223 * We will attempt 2M allocations. However, if we fail to
1224 * allocate 2M chunks, we will go back to 4k allocations.
1228 avail_pages = si_mem_available();
1229 floor = compute_balloon_floor();
1231 /* Refuse to balloon below the floor. */
1232 if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1233 pr_info("Balloon request will be partially fulfilled. %s\n",
1234 avail_pages < num_pages ? "Not enough memory." :
1235 "Balloon floor reached.");
1237 num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1241 bl_resp = (struct dm_balloon_response *)send_buffer;
1242 memset(send_buffer, 0, PAGE_SIZE);
1243 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1244 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1245 bl_resp->more_pages = 1;
1247 num_pages -= num_ballooned;
1248 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1249 bl_resp, alloc_unit);
1251 if (alloc_unit != 1 && num_ballooned == 0) {
1256 if (num_ballooned == 0 || num_ballooned == num_pages) {
1257 pr_debug("Ballooned %u out of %u requested pages.\n",
1258 num_pages, dm_device.balloon_wrk.num_pages);
1260 bl_resp->more_pages = 0;
1262 dm_device.state = DM_INITIALIZED;
1266 * We are pushing a lot of data through the channel;
1267 * deal with transient failures caused because of the
1268 * lack of space in the ring buffer.
1272 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1273 ret = vmbus_sendpacket(dm_device.dev->channel,
1276 (unsigned long)NULL,
1277 VM_PKT_DATA_INBAND, 0);
1281 post_status(&dm_device);
1282 } while (ret == -EAGAIN);
1286 * Free up the memory we allocatted.
1288 pr_info("Balloon response failed\n");
1290 for (i = 0; i < bl_resp->range_count; i++)
1291 free_balloon_pages(&dm_device,
1292 &bl_resp->range_array[i]);
1300 static void balloon_down(struct hv_dynmem_device *dm,
1301 struct dm_unballoon_request *req)
1303 union dm_mem_page_range *range_array = req->range_array;
1304 int range_count = req->range_count;
1305 struct dm_unballoon_response resp;
1307 unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1309 for (i = 0; i < range_count; i++) {
1310 free_balloon_pages(dm, &range_array[i]);
1311 complete(&dm_device.config_event);
1314 pr_debug("Freed %u ballooned pages.\n",
1315 prev_pages_ballooned - dm->num_pages_ballooned);
1317 if (req->more_pages == 1)
1320 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1321 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1322 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1323 resp.hdr.size = sizeof(struct dm_unballoon_response);
1325 vmbus_sendpacket(dm_device.dev->channel, &resp,
1326 sizeof(struct dm_unballoon_response),
1327 (unsigned long)NULL,
1328 VM_PKT_DATA_INBAND, 0);
1330 dm->state = DM_INITIALIZED;
1333 static void balloon_onchannelcallback(void *context);
1335 static int dm_thread_func(void *dm_dev)
1337 struct hv_dynmem_device *dm = dm_dev;
1339 while (!kthread_should_stop()) {
1340 wait_for_completion_interruptible_timeout(
1341 &dm_device.config_event, 1*HZ);
1343 * The host expects us to post information on the memory
1344 * pressure every second.
1346 reinit_completion(&dm_device.config_event);
1354 static void version_resp(struct hv_dynmem_device *dm,
1355 struct dm_version_response *vresp)
1357 struct dm_version_request version_req;
1360 if (vresp->is_accepted) {
1362 * We are done; wakeup the
1363 * context waiting for version
1366 complete(&dm->host_event);
1370 * If there are more versions to try, continue
1371 * with negotiations; if not
1372 * shutdown the service since we are not able
1373 * to negotiate a suitable version number
1376 if (dm->next_version == 0)
1379 memset(&version_req, 0, sizeof(struct dm_version_request));
1380 version_req.hdr.type = DM_VERSION_REQUEST;
1381 version_req.hdr.size = sizeof(struct dm_version_request);
1382 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1383 version_req.version.version = dm->next_version;
1384 dm->version = version_req.version.version;
1387 * Set the next version to try in case current version fails.
1388 * Win7 protocol ought to be the last one to try.
1390 switch (version_req.version.version) {
1391 case DYNMEM_PROTOCOL_VERSION_WIN8:
1392 dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1393 version_req.is_last_attempt = 0;
1396 dm->next_version = 0;
1397 version_req.is_last_attempt = 1;
1400 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1401 sizeof(struct dm_version_request),
1402 (unsigned long)NULL,
1403 VM_PKT_DATA_INBAND, 0);
1411 dm->state = DM_INIT_ERROR;
1412 complete(&dm->host_event);
1415 static void cap_resp(struct hv_dynmem_device *dm,
1416 struct dm_capabilities_resp_msg *cap_resp)
1418 if (!cap_resp->is_accepted) {
1419 pr_info("Capabilities not accepted by host\n");
1420 dm->state = DM_INIT_ERROR;
1422 complete(&dm->host_event);
1425 static void balloon_onchannelcallback(void *context)
1427 struct hv_device *dev = context;
1430 struct dm_message *dm_msg;
1431 struct dm_header *dm_hdr;
1432 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1433 struct dm_balloon *bal_msg;
1434 struct dm_hot_add *ha_msg;
1435 union dm_mem_page_range *ha_pg_range;
1436 union dm_mem_page_range *ha_region;
1438 memset(recv_buffer, 0, sizeof(recv_buffer));
1439 vmbus_recvpacket(dev->channel, recv_buffer,
1440 PAGE_SIZE, &recvlen, &requestid);
1443 dm_msg = (struct dm_message *)recv_buffer;
1444 dm_hdr = &dm_msg->hdr;
1446 switch (dm_hdr->type) {
1447 case DM_VERSION_RESPONSE:
1449 (struct dm_version_response *)dm_msg);
1452 case DM_CAPABILITIES_RESPONSE:
1454 (struct dm_capabilities_resp_msg *)dm_msg);
1457 case DM_BALLOON_REQUEST:
1458 if (dm->state == DM_BALLOON_UP)
1459 pr_warn("Currently ballooning\n");
1460 bal_msg = (struct dm_balloon *)recv_buffer;
1461 dm->state = DM_BALLOON_UP;
1462 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1463 schedule_work(&dm_device.balloon_wrk.wrk);
1466 case DM_UNBALLOON_REQUEST:
1467 dm->state = DM_BALLOON_DOWN;
1469 (struct dm_unballoon_request *)recv_buffer);
1472 case DM_MEM_HOT_ADD_REQUEST:
1473 if (dm->state == DM_HOT_ADD)
1474 pr_warn("Currently hot-adding\n");
1475 dm->state = DM_HOT_ADD;
1476 ha_msg = (struct dm_hot_add *)recv_buffer;
1477 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1479 * This is a normal hot-add request specifying
1482 dm->host_specified_ha_region = false;
1483 ha_pg_range = &ha_msg->range;
1484 dm->ha_wrk.ha_page_range = *ha_pg_range;
1485 dm->ha_wrk.ha_region_range.page_range = 0;
1488 * Host is specifying that we first hot-add
1489 * a region and then partially populate this
1492 dm->host_specified_ha_region = true;
1493 ha_pg_range = &ha_msg->range;
1494 ha_region = &ha_pg_range[1];
1495 dm->ha_wrk.ha_page_range = *ha_pg_range;
1496 dm->ha_wrk.ha_region_range = *ha_region;
1498 schedule_work(&dm_device.ha_wrk.wrk);
1501 case DM_INFO_MESSAGE:
1502 process_info(dm, (struct dm_info_msg *)dm_msg);
1506 pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1513 static int balloon_probe(struct hv_device *dev,
1514 const struct hv_vmbus_device_id *dev_id)
1518 struct dm_version_request version_req;
1519 struct dm_capabilities cap_msg;
1521 #ifdef CONFIG_MEMORY_HOTPLUG
1522 do_hot_add = hot_add;
1528 * First allocate a send buffer.
1531 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1535 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1536 balloon_onchannelcallback, dev);
1541 dm_device.dev = dev;
1542 dm_device.state = DM_INITIALIZING;
1543 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1544 init_completion(&dm_device.host_event);
1545 init_completion(&dm_device.config_event);
1546 INIT_LIST_HEAD(&dm_device.ha_region_list);
1547 spin_lock_init(&dm_device.ha_lock);
1548 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1549 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1550 dm_device.host_specified_ha_region = false;
1553 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1554 if (IS_ERR(dm_device.thread)) {
1555 ret = PTR_ERR(dm_device.thread);
1559 #ifdef CONFIG_MEMORY_HOTPLUG
1560 set_online_page_callback(&hv_online_page);
1561 register_memory_notifier(&hv_memory_nb);
1564 hv_set_drvdata(dev, &dm_device);
1566 * Initiate the hand shake with the host and negotiate
1567 * a version that the host can support. We start with the
1568 * highest version number and go down if the host cannot
1571 memset(&version_req, 0, sizeof(struct dm_version_request));
1572 version_req.hdr.type = DM_VERSION_REQUEST;
1573 version_req.hdr.size = sizeof(struct dm_version_request);
1574 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1575 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1576 version_req.is_last_attempt = 0;
1577 dm_device.version = version_req.version.version;
1579 ret = vmbus_sendpacket(dev->channel, &version_req,
1580 sizeof(struct dm_version_request),
1581 (unsigned long)NULL,
1582 VM_PKT_DATA_INBAND, 0);
1586 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1593 * If we could not negotiate a compatible version with the host
1594 * fail the probe function.
1596 if (dm_device.state == DM_INIT_ERROR) {
1601 pr_info("Using Dynamic Memory protocol version %u.%u\n",
1602 DYNMEM_MAJOR_VERSION(dm_device.version),
1603 DYNMEM_MINOR_VERSION(dm_device.version));
1606 * Now submit our capabilities to the host.
1608 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1609 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1610 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1611 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1613 cap_msg.caps.cap_bits.balloon = 1;
1614 cap_msg.caps.cap_bits.hot_add = 1;
1617 * Specify our alignment requirements as it relates
1618 * memory hot-add. Specify 128MB alignment.
1620 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1623 * Currently the host does not use these
1624 * values and we set them to what is done in the
1627 cap_msg.min_page_cnt = 0;
1628 cap_msg.max_page_number = -1;
1630 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1631 sizeof(struct dm_capabilities),
1632 (unsigned long)NULL,
1633 VM_PKT_DATA_INBAND, 0);
1637 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1644 * If the host does not like our capabilities,
1645 * fail the probe function.
1647 if (dm_device.state == DM_INIT_ERROR) {
1652 dm_device.state = DM_INITIALIZED;
1653 last_post_time = jiffies;
1658 #ifdef CONFIG_MEMORY_HOTPLUG
1659 restore_online_page_callback(&hv_online_page);
1661 kthread_stop(dm_device.thread);
1664 vmbus_close(dev->channel);
1670 static int balloon_remove(struct hv_device *dev)
1672 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1673 struct hv_hotadd_state *has, *tmp;
1674 struct hv_hotadd_gap *gap, *tmp_gap;
1675 unsigned long flags;
1677 if (dm->num_pages_ballooned != 0)
1678 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1680 cancel_work_sync(&dm->balloon_wrk.wrk);
1681 cancel_work_sync(&dm->ha_wrk.wrk);
1683 vmbus_close(dev->channel);
1684 kthread_stop(dm->thread);
1686 #ifdef CONFIG_MEMORY_HOTPLUG
1687 restore_online_page_callback(&hv_online_page);
1688 unregister_memory_notifier(&hv_memory_nb);
1690 spin_lock_irqsave(&dm_device.ha_lock, flags);
1691 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1692 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1693 list_del(&gap->list);
1696 list_del(&has->list);
1699 spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1704 static const struct hv_vmbus_device_id id_table[] = {
1705 /* Dynamic Memory Class ID */
1706 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1711 MODULE_DEVICE_TABLE(vmbus, id_table);
1713 static struct hv_driver balloon_drv = {
1714 .name = "hv_balloon",
1715 .id_table = id_table,
1716 .probe = balloon_probe,
1717 .remove = balloon_remove,
1720 static int __init init_balloon_drv(void)
1723 return vmbus_driver_register(&balloon_drv);
1726 module_init(init_balloon_drv);
1728 MODULE_DESCRIPTION("Hyper-V Balloon");
1729 MODULE_LICENSE("GPL");