summaryrefslogtreecommitdiff
path: root/sysutils/fusefs-kmod/files/extra-patch-9-fuse_vnops.c
blob: 2f49f897050bd62a86aff306813eae0a3314819a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
--- fs/fuse/fuse_vnops.c.orig
+++ fs/fuse/fuse_vnops.c
@@ -579,7 +579,7 @@ fuse_vnop_inactive(struct vop_inactive_args *ap)
 	}
 
 	if ((fvdat->flag & FN_REVOKED) != 0 && fuse_reclaim_revoked) {
-		vrecycle(vp);
+		vrecycle(vp, curthread);
 	}
 	return 0;
 }
@@ -706,7 +706,7 @@ fuse_vnop_lookup(struct vop_lookup_args *ap)
 		op = FUSE_GETATTR;
 		goto calldaemon;
 	} else if (fuse_lookup_cache_enable) {
-		err = cache_lookup(dvp, vpp, cnp, NULL, NULL);
+		err = cache_lookup(dvp, vpp, cnp);
 		switch (err) {
 
 		case -1:		/* positive match */
@@ -1758,7 +1758,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
 	 * can only occur at the file EOF.
 	 */
 
-	VM_OBJECT_WLOCK(vp->v_object);
+	VM_OBJECT_LOCK(vp->v_object);
 	fuse_vm_page_lock_queues();
 	if (pages[ap->a_reqpage]->valid != 0) {
 		for (i = 0; i < npages; ++i) {
@@ -1769,11 +1769,11 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
 			}
 		}
 		fuse_vm_page_unlock_queues();
-		VM_OBJECT_WUNLOCK(vp->v_object);
+		VM_OBJECT_UNLOCK(vp->v_object);
 		return 0;
 	}
 	fuse_vm_page_unlock_queues();
-	VM_OBJECT_WUNLOCK(vp->v_object);
+	VM_OBJECT_UNLOCK(vp->v_object);
 
 	/*
 	 * We use only the kva address for the buffer, but this is extremely
@@ -1803,7 +1803,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
 
 	if (error && (uio.uio_resid == count)) {
 		FS_DEBUG("error %d\n", error);
-		VM_OBJECT_WLOCK(vp->v_object);
+		VM_OBJECT_LOCK(vp->v_object);
 		fuse_vm_page_lock_queues();
 		for (i = 0; i < npages; ++i) {
 			if (i != ap->a_reqpage) {
@@ -1813,7 +1813,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
 			}
 		}
 		fuse_vm_page_unlock_queues();
-		VM_OBJECT_WUNLOCK(vp->v_object);
+		VM_OBJECT_UNLOCK(vp->v_object);
 		return VM_PAGER_ERROR;
 	}
 	/*
@@ -1823,7 +1823,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
 	 */
 
 	size = count - uio.uio_resid;
-	VM_OBJECT_WLOCK(vp->v_object);
+	VM_OBJECT_LOCK(vp->v_object);
 	fuse_vm_page_lock_queues();
 	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
 		vm_page_t m;
@@ -1843,7 +1843,7 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
 			 * Read operation filled a partial page.
 			 */
 			m->valid = 0;
-			vm_page_set_valid_range(m, 0, size - toff);
+			vm_page_set_valid(m, 0, size - toff);
 			KASSERT(m->dirty == 0,
 			    ("fuse_getpages: page %p is dirty", m));
 		} else {
@@ -1854,11 +1854,39 @@ fuse_vnop_getpages(struct vop_getpages_args *ap)
 			 */
 			;
 		}
-		if (i != ap->a_reqpage)
-			vm_page_readahead_finish(m);
+		if (i != ap->a_reqpage) {
+			/*
+			 * whether or not to leave the page activated is up in
+			 * the air, but we should put the page on a page queue
+			 * somewhere. (it already is in the object). Result:
+			 * It appears that empirical results show that
+			 * deactivating pages is best.
+			 */
+
+			/*
+			 * just in case someone was asking for this page we
+			 * now tell them that it is ok to use
+			 */
+			if (!error) {
+				if (m->oflags & VPO_WANTED) {
+					vm_page_lock(m);
+					vm_page_activate(m);
+					vm_page_unlock(m);
+				} else {
+					vm_page_lock(m);
+					vm_page_deactivate(m);
+					vm_page_unlock(m);
+				}
+				vm_page_wakeup(m);
+			} else {
+				vm_page_lock(m);
+				vm_page_free(m);
+				vm_page_unlock(m);
+			}
+		}
 	}
 	fuse_vm_page_unlock_queues();
-	VM_OBJECT_WUNLOCK(vp->v_object);
+	VM_OBJECT_UNLOCK(vp->v_object);
 	return 0;
 }
 
@@ -1947,9 +1975,9 @@ fuse_vnop_putpages(struct vop_putpages_args *ap)
 
 		for (i = 0; i < nwritten; i++) {
 			rtvals[i] = VM_PAGER_OK;
-			VM_OBJECT_WLOCK(pages[i]->object);
+			VM_OBJECT_LOCK(pages[i]->object);
 			vm_page_undirty(pages[i]);
-			VM_OBJECT_WUNLOCK(pages[i]->object);
+			VM_OBJECT_UNLOCK(pages[i]->object);
 		}
 	}
 	return rtvals[0];