|
26 | 26 |
|
27 | 27 | import operator
|
28 | 28 | import re
|
29 |
| -from typing import Callable, Iterable, Iterator, List, NamedTuple, Optional |
| 29 | +from typing import Callable, Iterable, Iterator, List, NamedTuple, Optional, Tuple |
30 | 30 |
|
31 | 31 | from _drgn import (
|
32 | 32 | _linux_helper_direct_mapping_offset,
|
|
35 | 35 | )
|
36 | 36 | from drgn import (
|
37 | 37 | NULL,
|
| 38 | + Architecture, |
38 | 39 | IntegerLike,
|
39 | 40 | Object,
|
40 | 41 | ObjectAbsentError,
|
| 42 | + ObjectNotFoundError, |
41 | 43 | Program,
|
42 | 44 | TypeKind,
|
43 | 45 | cast,
|
44 | 46 | container_of,
|
45 | 47 | )
|
46 | 48 | from drgn.helpers.common.format import decode_enum_type_flags
|
47 | 49 | from drgn.helpers.common.prog import takes_program_or_default
|
| 50 | +from drgn.helpers.linux.bitops import for_each_set_bit |
48 | 51 | from drgn.helpers.linux.device import bus_for_each_dev
|
49 | 52 | from drgn.helpers.linux.list import list_for_each_entry
|
50 | 53 | from drgn.helpers.linux.mapletree import mt_for_each, mtree_load
|
| 54 | +from drgn.helpers.linux.mmzone import _highest_present_section_nr, _section_flags |
51 | 55 | from drgn.helpers.linux.percpu import percpu_counter_sum, percpu_counter_sum_positive
|
52 | 56 | from drgn.helpers.linux.pid import for_each_task_in_group
|
53 | 57 | from drgn.helpers.linux.rbtree import rb_find
|
|
74 | 78 | "follow_phys",
|
75 | 79 | "for_each_memory_block",
|
76 | 80 | "for_each_page",
|
| 81 | + "for_each_valid_page_range", |
77 | 82 | "for_each_vma",
|
78 | 83 | "for_each_vmap_area",
|
79 | 84 | "get_page_flags",
|
@@ -910,6 +915,153 @@ def for_each_page(prog: Program) -> Iterator[Object]:
|
910 | 915 | yield page0 + i
|
911 | 916 |
|
912 | 917 |
|
| 918 | +def _for_each_valid_page_range_flatmem( |
| 919 | + prog: Program, |
| 920 | +) -> Iterator[Tuple[int, int, Object]]: |
| 921 | + mem_map = _page0(prog) |
| 922 | + |
| 923 | + if ( |
| 924 | + prog.platform.arch # type: ignore[union-attr] # platform can't be None |
| 925 | + == Architecture.ARM |
| 926 | + ): |
| 927 | + # Since Linux kernel commit a4d5613c4dc6 ("arm: extend pfn_valid to |
| 928 | + # take into account freed memory map alignment") (in v5.14), Arm's |
| 929 | + # pfn_valid() checks that the PFN lies within a pageblock that |
| 930 | + # intersects a present memory chunk. However, pageblock_nr_pages is a |
| 931 | + # macro, and it's not easy to get its value. So, we get as close as we |
| 932 | + # can and ignore the extra pages granted by the pageblock alignment |
| 933 | + # (which is also what the kernel did before Linux kernel commit |
| 934 | + # 09414d00a137 ("ARM: only consider memblocks with NOMAP cleared for |
| 935 | + # linear mapping") (in v4.5)). |
| 936 | + page_shift = prog["PAGE_SHIFT"].value_() |
| 937 | + memory = prog["memblock"].memory |
| 938 | + prev_start = prev_end = None |
| 939 | + for region in memory.regions[: memory.cnt]: |
| 940 | + start = region.base.value_() |
| 941 | + end = (start + region.size.value_()) >> page_shift |
| 942 | + start >>= page_shift |
| 943 | + if start == prev_end: |
| 944 | + prev_end = end # Merge adjacent regions. |
| 945 | + else: |
| 946 | + if prev_start is not None: |
| 947 | + yield prev_start, prev_end, mem_map |
| 948 | + prev_start = start |
| 949 | + prev_end = end |
| 950 | + if prev_start is not None: |
| 951 | + yield prev_start, prev_end, mem_map # type: ignore # prev_end can't be None |
| 952 | + return |
| 953 | + |
| 954 | + # Generic FLATMEM validity. |
| 955 | + start_pfn = prog["ARCH_PFN_OFFSET"].value_() |
| 956 | + yield start_pfn, start_pfn + prog["max_mapnr"].value_(), mem_map |
| 957 | + |
| 958 | + |
| 959 | +@takes_program_or_default |
| 960 | +def for_each_valid_page_range(prog: Program) -> Iterator[Tuple[int, int, Object]]: |
| 961 | + """ |
| 962 | + Iterate over every contiguous range of valid page frame numbers and |
| 963 | + ``struct page``\\ s. |
| 964 | +
|
| 965 | + >>> for start_pfn, end_pfn, mem_map in for_each_valid_page(): |
| 966 | + ... pages = mem_map[start_pfn:end_pfn] |
| 967 | +
|
| 968 | + :return: Iterator of (``start_pfn``, ``end_pfn``, ``mem_map``) tuples. |
| 969 | + ``start_pfn`` is the minimum page frame number (PFN) in the range |
| 970 | + (inclusive). ``end_pfn`` is the maximum PFN in the range (exclusive). |
| 971 | + ``mem_map`` is a ``struct page *`` object such that ``mem_map[pfn]`` is |
| 972 | + the ``struct page`` for the given PFN. |
| 973 | + """ |
| 974 | + try: |
| 975 | + mem_section = prog["mem_section"] |
| 976 | + except ObjectNotFoundError: |
| 977 | + yield from _for_each_valid_page_range_flatmem(prog) |
| 978 | + return |
| 979 | + |
| 980 | + # To support SPARSEMEM without SPARSEMEM_VMEMMAP, we will need to check |
| 981 | + # whether each section's mem_map is contiguous. |
| 982 | + mem_map = prog["vmemmap"].read_() |
| 983 | + |
| 984 | + PAGE_SHIFT = prog["PAGE_SHIFT"].value_() |
| 985 | + SECTIONS_PER_ROOT = prog["SECTIONS_PER_ROOT"].value_() |
| 986 | + SECTION_SIZE_BITS = prog["SECTION_SIZE_BITS"].value_() |
| 987 | + PAGES_PER_SECTION = 1 << (SECTION_SIZE_BITS - PAGE_SHIFT) |
| 988 | + SUBSECTION_SHIFT = 21 |
| 989 | + SUBSECTIONS_PER_SECTION = 1 << (SECTION_SIZE_BITS - SUBSECTION_SHIFT) |
| 990 | + PAGES_PER_SUBSECTION = 1 << (SUBSECTION_SHIFT - PAGE_SHIFT) |
| 991 | + flags = _section_flags(prog) |
| 992 | + SECTION_HAS_MEM_MAP = flags["SECTION_HAS_MEM_MAP"] |
| 993 | + SECTION_IS_EARLY = flags["SECTION_IS_EARLY"] |
| 994 | + |
| 995 | + highest_present_section_nr = _highest_present_section_nr(prog) |
| 996 | + nr_roots = highest_present_section_nr // SECTIONS_PER_ROOT + 1 |
| 997 | + |
| 998 | + unaliased_type = mem_section.type_.unaliased() |
| 999 | + if unaliased_type.kind == TypeKind.POINTER: |
| 1000 | + mem_section = mem_section.read_() |
| 1001 | + if not mem_section: |
| 1002 | + return |
| 1003 | + |
| 1004 | + root_kind = unaliased_type.type.unaliased_kind() |
| 1005 | + |
| 1006 | + pfn = 0 |
| 1007 | + start_pfn = None |
| 1008 | + for root_nr, root in enumerate(mem_section[:nr_roots]): |
| 1009 | + if root_kind == TypeKind.POINTER: |
| 1010 | + root = root.read_() |
| 1011 | + if not root: |
| 1012 | + if start_pfn is not None: |
| 1013 | + yield start_pfn, pfn, mem_map |
| 1014 | + start_pfn = None |
| 1015 | + pfn += SECTIONS_PER_ROOT * PAGES_PER_SECTION |
| 1016 | + continue |
| 1017 | + |
| 1018 | + if root_nr == nr_roots - 1: |
| 1019 | + nr_sections = highest_present_section_nr % SECTIONS_PER_ROOT + 1 |
| 1020 | + else: |
| 1021 | + nr_sections = SECTIONS_PER_ROOT |
| 1022 | + for section in root[:nr_sections]: |
| 1023 | + mem_map_value = section.section_mem_map.value_() |
| 1024 | + # Open-coded valid_section() and early_section() to avoid some |
| 1025 | + # overhead. |
| 1026 | + if mem_map_value & SECTION_HAS_MEM_MAP: |
| 1027 | + # struct mem_section::usage only exists since Linux kernel |
| 1028 | + # commit f1eca35a0dc7 ("mm/sparsemem: introduce struct |
| 1029 | + # mem_section_usage") (in v5.3). Additionally, struct |
| 1030 | + # mem_section_usage::subsection_map only exists for |
| 1031 | + # CONFIG_SPARSEMEM_VMEMMAP. Without both, as well as for early |
| 1032 | + # sections, validity has section granularity. |
| 1033 | + subsection_map = None |
| 1034 | + if not (mem_map_value & SECTION_IS_EARLY): |
| 1035 | + try: |
| 1036 | + subsection_map = section.usage.subsection_map |
| 1037 | + except AttributeError: |
| 1038 | + pass |
| 1039 | + |
| 1040 | + if subsection_map is None: |
| 1041 | + if start_pfn is None: |
| 1042 | + start_pfn = pfn |
| 1043 | + else: |
| 1044 | + end_bit = None if start_pfn is None else 0 |
| 1045 | + for bit in for_each_set_bit( |
| 1046 | + subsection_map, SUBSECTIONS_PER_SECTION |
| 1047 | + ): |
| 1048 | + if bit != end_bit: |
| 1049 | + if start_pfn is not None: |
| 1050 | + yield start_pfn, pfn + end_bit * PAGES_PER_SUBSECTION, mem_map |
| 1051 | + start_pfn = pfn + bit * PAGES_PER_SUBSECTION |
| 1052 | + end_bit = bit + 1 |
| 1053 | + if end_bit != SUBSECTIONS_PER_SECTION and start_pfn is not None: |
| 1054 | + yield start_pfn, pfn + end_bit * PAGES_PER_SUBSECTION, mem_map |
| 1055 | + start_pfn = None |
| 1056 | + elif start_pfn is not None: |
| 1057 | + yield start_pfn, pfn, mem_map |
| 1058 | + start_pfn = None |
| 1059 | + pfn += PAGES_PER_SECTION |
| 1060 | + |
| 1061 | + if start_pfn is not None: |
| 1062 | + yield start_pfn, pfn, mem_map |
| 1063 | + |
| 1064 | + |
913 | 1065 | @takes_program_or_default
|
914 | 1066 | def PFN_PHYS(prog: Program, pfn: IntegerLike) -> Object:
|
915 | 1067 | """
|
|
0 commit comments