{
struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
- struct map *replacement_map = NULL;
+ struct map *map_ref, *replacement_map = NULL;
struct machine *machine;
bool is_64_bit;
int err, fd;
if (!replacement_map)
replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
+ /*
+ * Update addresses of vmlinux map. Re-insert it to ensure maps are
+ * correctly ordered. Do this before using maps__merge_in() for the
+ * remaining maps so vmlinux gets split if necessary.
+ */
+ map_ref = map__get(map);
+ maps__remove(kmaps, map_ref);
+
+ map__set_start(map_ref, map__start(replacement_map));
+ map__set_end(map_ref, map__end(replacement_map));
+ map__set_pgoff(map_ref, map__pgoff(replacement_map));
+ map__set_mapping_type(map_ref, map__mapping_type(replacement_map));
+
+ err = maps__insert(kmaps, map_ref);
+ map__put(map_ref);
+ if (err)
+ goto out_err;
+
/* Add new maps */
while (!list_empty(&md.maps)) {
struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
list_del_init(&new_node->node);
- if (RC_CHK_EQUAL(new_map, replacement_map)) {
- struct map *map_ref;
-
- /* Ensure maps are correctly ordered */
- map_ref = map__get(map);
- maps__remove(kmaps, map_ref);
-
- map__set_start(map_ref, map__start(new_map));
- map__set_end(map_ref, map__end(new_map));
- map__set_pgoff(map_ref, map__pgoff(new_map));
- map__set_mapping_type(map_ref, map__mapping_type(new_map));
-
- err = maps__insert(kmaps, map_ref);
- map__put(map_ref);
- map__put(new_map);
- if (err)
- goto out_err;
- } else {
+ /* skip if replacement_map, already inserted above */
+ if (!RC_CHK_EQUAL(new_map, replacement_map)) {
/*
* Merge kcore map into existing maps,
* and ensure that current maps (eBPF)