summaryrefslogtreecommitdiff
path: root/java/openjdk6/files/icedtea.patch
blob: e16d7ef75892bb33a49054bf50cdfdb8344121de (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
$FreeBSD$

This patch works around POSIX thread implementation differences between
FreeBSD's libthr and Linux's NPTL:

- We do not support static allocations of mutex and condition variable.
Thus, we must initialize them explicitly with pthread_mutex_init(3) and
pthread_cond_init(3), respectively.

- We must initialize mutex before calling pthread_cond_wait(3).  Otherwise,
it fails with EINVAL.

- We must lock mutex before calling pthread_cond_wait(3).  Otherwise, it
fails with EPERM.

- We must join threads via pthread_join(3) after calling pthread_cancel(3).
Otherwise, we may destroy mutex or condition variable in use.

--- icedtea6-1.9/plugin/icedteanp/IcedTeaNPPlugin.cc.orig	2010-09-23 09:40:49.000000000 -0400
+++ icedtea6-1.9/plugin/icedteanp/IcedTeaNPPlugin.cc	2010-09-23 15:31:27.000000000 -0400
@@ -2369,6 +2369,10 @@ NP_Shutdown (void)
   pthread_cancel(plugin_request_processor_thread2);
   pthread_cancel(plugin_request_processor_thread3);
 
+  pthread_join(plugin_request_processor_thread1, NULL);
+  pthread_join(plugin_request_processor_thread2, NULL);
+  pthread_join(plugin_request_processor_thread3, NULL);
+
   java_to_plugin_bus->unSubscribe(plugin_req_proc);
   plugin_to_java_bus->unSubscribe(java_req_proc);
   //internal_bus->unSubscribe(java_req_proc);
--- icedtea6-1.9/plugin/icedteanp/IcedTeaPluginRequestProcessor.cc.orig	2010-09-23 09:40:49.000000000 -0400
+++ icedtea6-1.9/plugin/icedteanp/IcedTeaPluginRequestProcessor.cc	2010-09-23 15:36:38.000000000 -0400
@@ -63,6 +63,12 @@ PluginRequestProcessor::PluginRequestPro
     this->pendingRequests = new std::map<pthread_t, uintmax_t>();
 
     internal_req_ref_counter = 0;
+
+    pthread_mutex_init(&message_queue_mutex, NULL);
+    pthread_mutex_init(&syn_write_mutex, NULL);
+    pthread_mutex_init(&tc_mutex, NULL);
+
+    pthread_cond_init(&cond_message_available, NULL);
 }
 
 /**
@@ -77,6 +83,12 @@ PluginRequestProcessor::~PluginRequestPr
 
     if (pendingRequests)
         delete pendingRequests;
+
+    pthread_mutex_destroy(&message_queue_mutex);
+    pthread_mutex_destroy(&syn_write_mutex);
+    pthread_mutex_destroy(&tc_mutex);
+
+    pthread_cond_destroy(&cond_message_available);
 }
 
 /**
@@ -709,10 +721,12 @@ queue_processor(void* data)
     PluginRequestProcessor* processor = (PluginRequestProcessor*) data;
     std::vector<std::string*>* message_parts = NULL;
     std::string command;
-    pthread_mutex_t wait_mutex = PTHREAD_MUTEX_INITIALIZER; // This is needed for API compat. and is unused
+    pthread_mutex_t wait_mutex = PTHREAD_MUTEX_INITIALIZER;
 
     PLUGIN_DEBUG("Queue processor initialized. Queue = %p\n", message_queue);
 
+    pthread_mutex_init(&wait_mutex, NULL);
+
     while (true)
     {
         pthread_mutex_lock(&message_queue_mutex);
@@ -780,13 +794,17 @@ queue_processor(void* data)
 
         } else
         {
-        	pthread_cond_wait(&cond_message_available, &wait_mutex);
-            pthread_testcancel();
+	    pthread_mutex_lock(&wait_mutex);
+	    pthread_cond_wait(&cond_message_available, &wait_mutex);
+	    pthread_mutex_unlock(&wait_mutex);
+	    pthread_testcancel();
         }
 
         message_parts = NULL;
     }
 
+    pthread_mutex_destroy(&wait_mutex);
+
     PLUGIN_DEBUG("Queue processing stopped.\n");
 }