nvidia-340xx/0013-kernel-6.3.patch
2024-01-19 20:55:32 +08:00

106 lines
3.1 KiB
Diff

--- a/kernel/nv-mmap.c 2023-05-03 11:48:48.481814709 +0200
+++ b/kernel/nv-mmap.c 2023-05-03 11:51:39.220313386 +0200
@@ -312,7 +312,11 @@
goto done;
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)
+ vm_flags_set(vma, VM_IO);
+#else
vma->vm_flags |= VM_IO;
+#endif
}
else
{
@@ -363,8 +367,13 @@
NV_PRINT_AT(NV_DBG_MEMINFO, at);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)
+ vm_flags_set(vma, VM_IO | VM_LOCKED | VM_RESERVED);
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
+#else
vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED);
vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
+#endif
}
if (status == 0)
@@ -374,8 +383,13 @@
if ((prot & NV_PROTECT_WRITEABLE) == 0)
{
vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)
+ vm_flags_clear(vma, VM_WRITE);
+ vm_flags_clear(vma, VM_MAYWRITE);
+#else
vma->vm_flags &= ~VM_WRITE;
vma->vm_flags &= ~VM_MAYWRITE;
+#endif
}
vma->vm_ops = &nv_vm_ops;
--- a/kernel/uvm/nvidia_uvm_lite.c 2023-05-03 12:00:17.508256190 +0200
+++ b/kernel/uvm/nvidia_uvm_lite.c 2023-05-03 12:03:59.160897071 +0200
@@ -1524,10 +1524,17 @@
vma->vm_ops = &uvmlite_vma_ops;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)
+ // Prohibit copying the vma on fork().
+ vm_flags_set(vma, VM_DONTCOPY);
+ // Prohibt mremap() that would expand the vma.
+ vm_flags_set(vma, VM_DONTEXPAND);
+#else
// Prohibit copying the vma on fork().
vma->vm_flags |= VM_DONTCOPY;
// Prohibt mremap() that would expand the vma.
vma->vm_flags |= VM_DONTEXPAND;
+#endif
// Other cases of vma modification are detected in _mmap_open().
@@ -1546,9 +1553,15 @@
return -EINVAL;
vma->vm_ops = &counters_vma_ops;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)
+ vm_flags_clear(vma, VM_MAYWRITE);
+ // prevent vm_insert_page from modifying the vma's flags:
+ vm_flags_set(vma, VM_MIXEDMAP);
+#else
vma->vm_flags &= ~VM_MAYWRITE;
// prevent vm_insert_page from modifying the vma's flags:
vma->vm_flags |= VM_MIXEDMAP;
+#endif
ret = 0;
}
UVM_DBG_PRINT_RL("vma 0x%p [0x%p, 0x%p) ret %d pgoff"
@@ -2527,8 +2540,13 @@
// Subsequent access from userspace after the pages are unmapped will cause
// a SIGSEGV.
//
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)
+ vm_flags_clear(vma, VM_READ|VM_MAYREAD);
+ vm_flags_clear(vma, VM_WRITE|VM_MAYWRITE);
+#else
vma->vm_flags &= ~(VM_READ|VM_MAYREAD);
vma->vm_flags &= ~(VM_WRITE|VM_MAYWRITE);
+#endif
}
//
@@ -2536,8 +2554,13 @@
//
static void _set_vma_accessible(struct vm_area_struct * vma)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 3, 0)
+ vm_flags_set(vma, VM_READ|VM_MAYREAD);
+ vm_flags_set(vma, VM_WRITE|VM_MAYWRITE);
+#else
vma->vm_flags |= (VM_READ|VM_MAYREAD);
vma->vm_flags |= (VM_WRITE|VM_MAYWRITE);
+#endif
}
//