}
 }
 
+static u32 rt6_multipath_custom_hash_outer(const struct net *net,
+                                          const struct sk_buff *skb,
+                                          bool *p_has_inner)
+{
+       u32 hash_fields = ip6_multipath_hash_fields(net);
+       struct flow_keys keys, hash_keys;
+
+       if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
+               return 0;
+
+       memset(&hash_keys, 0, sizeof(hash_keys));
+       skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
+
+       hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
+               hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
+               hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+               hash_keys.basic.ip_proto = keys.basic.ip_proto;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
+               hash_keys.tags.flow_label = keys.tags.flow_label;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+               hash_keys.ports.src = keys.ports.src;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+               hash_keys.ports.dst = keys.ports.dst;
+
+       *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
+       return flow_hash_from_keys(&hash_keys);
+}
+
+static u32 rt6_multipath_custom_hash_inner(const struct net *net,
+                                          const struct sk_buff *skb,
+                                          bool has_inner)
+{
+       u32 hash_fields = ip6_multipath_hash_fields(net);
+       struct flow_keys keys, hash_keys;
+
+       /* We assume the packet carries an encapsulation, but if none was
+        * encountered during dissection of the outer flow, then there is no
+        * point in calling the flow dissector again.
+        */
+       if (!has_inner)
+               return 0;
+
+       if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
+               return 0;
+
+       memset(&hash_keys, 0, sizeof(hash_keys));
+       skb_flow_dissect_flow_keys(skb, &keys, 0);
+
+       if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
+               return 0;
+
+       if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+               hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+               if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
+                       hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
+               if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
+                       hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
+       } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+               hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+               if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
+                       hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
+               if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
+                       hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
+               if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
+                       hash_keys.tags.flow_label = keys.tags.flow_label;
+       }
+
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
+               hash_keys.basic.ip_proto = keys.basic.ip_proto;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
+               hash_keys.ports.src = keys.ports.src;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
+               hash_keys.ports.dst = keys.ports.dst;
+
+       return flow_hash_from_keys(&hash_keys);
+}
+
+static u32 rt6_multipath_custom_hash_skb(const struct net *net,
+                                        const struct sk_buff *skb)
+{
+       u32 mhash, mhash_inner;
+       bool has_inner = true;
+
+       mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
+       mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);
+
+       return jhash_2words(mhash, mhash_inner, 0);
+}
+
+static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
+                                        const struct flowi6 *fl6)
+{
+       u32 hash_fields = ip6_multipath_hash_fields(net);
+       struct flow_keys hash_keys;
+
+       if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
+               return 0;
+
+       memset(&hash_keys, 0, sizeof(hash_keys));
+       hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
+               hash_keys.addrs.v6addrs.src = fl6->saddr;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
+               hash_keys.addrs.v6addrs.dst = fl6->daddr;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
+               hash_keys.basic.ip_proto = fl6->flowi6_proto;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
+               hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
+               hash_keys.ports.src = fl6->fl6_sport;
+       if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
+               hash_keys.ports.dst = fl6->fl6_dport;
+
+       return flow_hash_from_keys(&hash_keys);
+}
+
 /* if skb is set it will be used and fl6 can be NULL */
 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
                       const struct sk_buff *skb, struct flow_keys *flkeys)
                }
                mhash = flow_hash_from_keys(&hash_keys);
                break;
+       case 3:
+               if (skb)
+                       mhash = rt6_multipath_custom_hash_skb(net, skb);
+               else
+                       mhash = rt6_multipath_custom_hash_fl6(net, fl6);
+               break;
        }
 
        return mhash >> 1;