Lines Matching defs:has
193 * if the host has accepted the version sent by the guest.
195 * is_accepted: If TRUE, host has accepted the version and the guest
222 * guest. This message notifies if the host has accepted the guest's
223 * capabilities. If the host has not accepted, the guest must shutdown
226 * is_accepted: Indicates if the host has accepted guest's capabilities.
352 * the guest has hit an upper physical memory barrier.
414 * that the host has asked us to hot add. The range
525 * This state tracks if the host has specified a hot-add
582 static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
588 if (pfn < has->covered_start_pfn || pfn >= has->covered_end_pfn)
592 list_for_each_entry(gap, &has->gap_list, list) {
604 struct hv_hotadd_state *has;
613 list_for_each_entry(has, &dm_device.ha_region_list, list) {
614 while ((pfn >= has->start_pfn) &&
615 (pfn < has->end_pfn) &&
618 if (has_pfn_is_backed(has, pfn))
680 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
682 if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
696 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
703 hv_page_online_one(has, pfn_to_page(start_pfn + i));
708 struct hv_hotadd_state *has)
720 has->ha_end_pfn += ha_pages_in_chunk;
723 has->covered_end_pfn += processed_pfn;
745 has->ha_end_pfn -= ha_pages_in_chunk;
746 has->covered_end_pfn -= processed_pfn;
766 struct hv_hotadd_state *has;
770 list_for_each_entry(has, &dm_device.ha_region_list, list) {
772 if (pfn < has->start_pfn ||
773 (pfn + (1UL << order) > has->end_pfn))
776 hv_bring_pgs_online(has, pfn, 1UL << order);
785 struct hv_hotadd_state *has;
791 list_for_each_entry(has, &dm_device.ha_region_list, list) {
796 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
803 if (has->covered_end_pfn != start_pfn) {
811 gap->start_pfn = has->covered_end_pfn;
813 list_add_tail(&gap->list, &has->gap_list);
815 has->covered_end_pfn = start_pfn;
822 if ((start_pfn + pfn_cnt) > has->end_pfn) {
824 residual = (start_pfn + pfn_cnt - has->end_pfn);
825 has->end_pfn += ALIGN(residual, ha_pages_in_chunk);
841 struct hv_hotadd_state *has;
850 list_for_each_entry(has, &dm_device.ha_region_list, list) {
855 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
858 old_covered_state = has->covered_end_pfn;
860 if (start_pfn < has->ha_end_pfn) {
866 pgs_ol = has->ha_end_pfn - start_pfn;
870 has->covered_end_pfn += pgs_ol;
880 if (start_pfn > has->start_pfn &&
882 hv_bring_pgs_online(has, start_pfn, pgs_ol);
885 if (has->ha_end_pfn < has->end_pfn && pfn_cnt > 0) {
893 size = (has->end_pfn - has->ha_end_pfn);
900 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
907 res = has->covered_end_pfn - old_covered_state;
936 * If the host has specified a hot-add range; deal with it first.
990 * The host has not specified the hot-add region.
1007 * The result field of the response structure has the
1169 * If the last post time that we sampled has changed,
2020 struct hv_hotadd_state *has, *tmp;
2035 * call has failed and some cleanup has been done as
2048 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
2049 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
2053 list_del(&has->list);
2054 kfree(has);